summaryrefslogtreecommitdiffstats
path: root/src/cmd/internal/obj/ppc64
diff options
context:
space:
mode:
Diffstat (limited to 'src/cmd/internal/obj/ppc64')
-rw-r--r--src/cmd/internal/obj/ppc64/a.out.go1095
-rw-r--r--src/cmd/internal/obj/ppc64/anames.go614
-rw-r--r--src/cmd/internal/obj/ppc64/anames9.go55
-rw-r--r--src/cmd/internal/obj/ppc64/asm9.go5529
-rw-r--r--src/cmd/internal/obj/ppc64/asm9_gtables.go1660
-rw-r--r--src/cmd/internal/obj/ppc64/asm_test.go555
-rw-r--r--src/cmd/internal/obj/ppc64/doc.go290
-rw-r--r--src/cmd/internal/obj/ppc64/list9.go117
-rw-r--r--src/cmd/internal/obj/ppc64/obj9.go1412
9 files changed, 11327 insertions, 0 deletions
diff --git a/src/cmd/internal/obj/ppc64/a.out.go b/src/cmd/internal/obj/ppc64/a.out.go
new file mode 100644
index 0000000..efc10ea
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/a.out.go
@@ -0,0 +1,1095 @@
+// cmd/9c/9.out.h from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import "cmd/internal/obj"
+
+//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p ppc64
+
+/*
+ * powerpc 64
+ */
+const (
+ NSNAME = 8
+ NSYM = 50
+ NREG = 32 /* number of general registers */
+ NFREG = 32 /* number of floating point registers */
+)
+
+const (
+ /* RBasePPC64 = 4096 */
+ /* R0=4096 ... R31=4127 */
+ REG_R0 = obj.RBasePPC64 + iota
+ REG_R1
+ REG_R2
+ REG_R3
+ REG_R4
+ REG_R5
+ REG_R6
+ REG_R7
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+ REG_R16
+ REG_R17
+ REG_R18
+ REG_R19
+ REG_R20
+ REG_R21
+ REG_R22
+ REG_R23
+ REG_R24
+ REG_R25
+ REG_R26
+ REG_R27
+ REG_R28
+ REG_R29
+ REG_R30
+ REG_R31
+
+ // CR bits. Use Book 1, chapter 2 naming for bits. Keep aligned to 32
+ REG_CR0LT
+ REG_CR0GT
+ REG_CR0EQ
+ REG_CR0SO
+ REG_CR1LT
+ REG_CR1GT
+ REG_CR1EQ
+ REG_CR1SO
+ REG_CR2LT
+ REG_CR2GT
+ REG_CR2EQ
+ REG_CR2SO
+ REG_CR3LT
+ REG_CR3GT
+ REG_CR3EQ
+ REG_CR3SO
+ REG_CR4LT
+ REG_CR4GT
+ REG_CR4EQ
+ REG_CR4SO
+ REG_CR5LT
+ REG_CR5GT
+ REG_CR5EQ
+ REG_CR5SO
+ REG_CR6LT
+ REG_CR6GT
+ REG_CR6EQ
+ REG_CR6SO
+ REG_CR7LT
+ REG_CR7GT
+ REG_CR7EQ
+ REG_CR7SO
+
+ /* Align FPR and VSR vectors such that when masked with 0x3F they produce
+ an equivalent VSX register. */
+ /* F0=4160 ... F31=4191 */
+ REG_F0
+ REG_F1
+ REG_F2
+ REG_F3
+ REG_F4
+ REG_F5
+ REG_F6
+ REG_F7
+ REG_F8
+ REG_F9
+ REG_F10
+ REG_F11
+ REG_F12
+ REG_F13
+ REG_F14
+ REG_F15
+ REG_F16
+ REG_F17
+ REG_F18
+ REG_F19
+ REG_F20
+ REG_F21
+ REG_F22
+ REG_F23
+ REG_F24
+ REG_F25
+ REG_F26
+ REG_F27
+ REG_F28
+ REG_F29
+ REG_F30
+ REG_F31
+
+ /* V0=4192 ... V31=4223 */
+ REG_V0
+ REG_V1
+ REG_V2
+ REG_V3
+ REG_V4
+ REG_V5
+ REG_V6
+ REG_V7
+ REG_V8
+ REG_V9
+ REG_V10
+ REG_V11
+ REG_V12
+ REG_V13
+ REG_V14
+ REG_V15
+ REG_V16
+ REG_V17
+ REG_V18
+ REG_V19
+ REG_V20
+ REG_V21
+ REG_V22
+ REG_V23
+ REG_V24
+ REG_V25
+ REG_V26
+ REG_V27
+ REG_V28
+ REG_V29
+ REG_V30
+ REG_V31
+
+ /* VS0=4224 ... VS63=4287 */
+ REG_VS0
+ REG_VS1
+ REG_VS2
+ REG_VS3
+ REG_VS4
+ REG_VS5
+ REG_VS6
+ REG_VS7
+ REG_VS8
+ REG_VS9
+ REG_VS10
+ REG_VS11
+ REG_VS12
+ REG_VS13
+ REG_VS14
+ REG_VS15
+ REG_VS16
+ REG_VS17
+ REG_VS18
+ REG_VS19
+ REG_VS20
+ REG_VS21
+ REG_VS22
+ REG_VS23
+ REG_VS24
+ REG_VS25
+ REG_VS26
+ REG_VS27
+ REG_VS28
+ REG_VS29
+ REG_VS30
+ REG_VS31
+ REG_VS32
+ REG_VS33
+ REG_VS34
+ REG_VS35
+ REG_VS36
+ REG_VS37
+ REG_VS38
+ REG_VS39
+ REG_VS40
+ REG_VS41
+ REG_VS42
+ REG_VS43
+ REG_VS44
+ REG_VS45
+ REG_VS46
+ REG_VS47
+ REG_VS48
+ REG_VS49
+ REG_VS50
+ REG_VS51
+ REG_VS52
+ REG_VS53
+ REG_VS54
+ REG_VS55
+ REG_VS56
+ REG_VS57
+ REG_VS58
+ REG_VS59
+ REG_VS60
+ REG_VS61
+ REG_VS62
+ REG_VS63
+
+ REG_CR0
+ REG_CR1
+ REG_CR2
+ REG_CR3
+ REG_CR4
+ REG_CR5
+ REG_CR6
+ REG_CR7
+
+ // MMA accumulator registers, these shadow VSR 0-31
+ // e.g MMAx shadows VSRx*4-VSRx*4+3 or
+ // MMA0 shadows VSR0-VSR3
+ REG_A0
+ REG_A1
+ REG_A2
+ REG_A3
+ REG_A4
+ REG_A5
+ REG_A6
+ REG_A7
+
+ REG_MSR
+ REG_FPSCR
+ REG_CR
+
+ REG_SPECIAL = REG_CR0
+
+ REG_CRBIT0 = REG_CR0LT // An alias for a Condition Register bit 0
+
+ REG_SPR0 = obj.RBasePPC64 + 1024 // first of 1024 registers
+
+ REG_XER = REG_SPR0 + 1
+ REG_LR = REG_SPR0 + 8
+ REG_CTR = REG_SPR0 + 9
+
+ REGZERO = REG_R0 /* set to zero */
+ REGSP = REG_R1
+ REGSB = REG_R2
+ REGRET = REG_R3
+ REGARG = -1 /* -1 disables passing the first argument in register */
+ REGRT1 = REG_R20 /* reserved for runtime, duffzero and duffcopy */
+ REGRT2 = REG_R21 /* reserved for runtime, duffcopy */
+ REGMIN = REG_R7 /* register variables allocated from here to REGMAX */
+ REGCTXT = REG_R11 /* context for closures */
+ REGTLS = REG_R13 /* C ABI TLS base pointer */
+ REGMAX = REG_R27
+ REGEXT = REG_R30 /* external registers allocated from here down */
+ REGG = REG_R30 /* G */
+ REGTMP = REG_R31 /* used by the linker */
+ FREGRET = REG_F0
+ FREGMIN = REG_F17 /* first register variable */
+ FREGMAX = REG_F26 /* last register variable for 9g only */
+ FREGEXT = REG_F26 /* first external register */
+)
+
+// OpenPOWER ABI for Linux Supplement Power Architecture 64-Bit ELF V2 ABI
+// https://openpowerfoundation.org/?resource_lib=64-bit-elf-v2-abi-specification-power-architecture
+var PPC64DWARFRegisters = map[int16]int16{}
+
+func init() {
+ // f assigns dwarfregister[from:to] = (base):(to-from+base)
+ f := func(from, to, base int16) {
+ for r := int16(from); r <= to; r++ {
+ PPC64DWARFRegisters[r] = r - from + base
+ }
+ }
+ f(REG_R0, REG_R31, 0)
+ f(REG_F0, REG_F31, 32)
+ f(REG_V0, REG_V31, 77)
+ f(REG_CR0, REG_CR7, 68)
+
+ f(REG_VS0, REG_VS31, 32) // overlaps F0-F31
+ f(REG_VS32, REG_VS63, 77) // overlaps V0-V31
+ PPC64DWARFRegisters[REG_LR] = 65
+ PPC64DWARFRegisters[REG_CTR] = 66
+ PPC64DWARFRegisters[REG_XER] = 76
+}
+
+/*
+ * GENERAL:
+ *
+ * compiler allocates R3 up as temps
+ * compiler allocates register variables R7-R27
+ * compiler allocates external registers R30 down
+ *
+ * compiler allocates register variables F17-F26
+ * compiler allocates external registers F26 down
+ */
+const (
+ BIG = 32768 - 8
+)
+
+const (
+ /* mark flags */
+ LABEL = 1 << 0
+ LEAF = 1 << 1
+ FLOAT = 1 << 2
+ BRANCH = 1 << 3
+ LOAD = 1 << 4
+ FCMP = 1 << 5
+ SYNC = 1 << 6
+ LIST = 1 << 7
+ FOLL = 1 << 8
+ NOSCHED = 1 << 9
+ PFX_X64B = 1 << 10 // A prefixed instruction crossing a 64B boundary
+)
+
+// Values for use in branch instruction BC
+// BC B0,BI,label
+// BO is type of branch + likely bits described below
+// BI is CR value + branch type
+// ex: BEQ CR2,label is BC 12,10,label
+// 12 = BO_BCR
+// 10 = BI_CR2 + BI_EQ
+
+const (
+ BI_CR0 = 0
+ BI_CR1 = 4
+ BI_CR2 = 8
+ BI_CR3 = 12
+ BI_CR4 = 16
+ BI_CR5 = 20
+ BI_CR6 = 24
+ BI_CR7 = 28
+ BI_LT = 0
+ BI_GT = 1
+ BI_EQ = 2
+ BI_FU = 3
+)
+
+// Common values for the BO field.
+
+const (
+ BO_ALWAYS = 20 // branch unconditionally
+ BO_BCTR = 16 // decrement ctr, branch on ctr != 0
+ BO_NOTBCTR = 18 // decrement ctr, branch on ctr == 0
+ BO_BCR = 12 // branch on cr value
+ BO_BCRBCTR = 8 // decrement ctr, branch on ctr != 0 and cr value
+ BO_NOTBCR = 4 // branch on not cr value
+)
+
+// Bit settings from the CR
+
+const (
+ C_COND_LT = iota // 0 result is negative
+ C_COND_GT // 1 result is positive
+ C_COND_EQ // 2 result is zero
+ C_COND_SO // 3 summary overflow or FP compare w/ NaN
+)
+
+const (
+ C_NONE = iota
+ C_REGP /* An even numbered gpr which can be used a gpr pair argument */
+ C_REG /* Any gpr register */
+ C_FREGP /* An even numbered fpr which can be used a fpr pair argument */
+ C_FREG /* Any fpr register */
+ C_VREG /* Any vector register */
+ C_VSREGP /* An even numbered vsx register which can be used as a vsx register pair argument */
+ C_VSREG /* Any vector-scalar register */
+ C_CREG /* The condition registor (CR) */
+ C_CRBIT /* A single bit of the CR register (0-31) */
+ C_SPR /* special processor register */
+ C_AREG /* MMA accumulator register */
+ C_ZCON /* The constant zero */
+ C_U1CON /* 1 bit unsigned constant */
+ C_U2CON /* 2 bit unsigned constant */
+ C_U3CON /* 3 bit unsigned constant */
+ C_U4CON /* 4 bit unsigned constant */
+ C_U5CON /* 5 bit unsigned constant */
+ C_U8CON /* 8 bit unsigned constant */
+ C_U15CON /* 15 bit unsigned constant */
+ C_S16CON /* 16 bit signed constant */
+ C_U16CON /* 16 bit unsigned constant */
+ C_32S16CON /* Any 32 bit constant of the form 0x....0000, signed or unsigned */
+ C_32CON /* Any constant which fits into 32 bits. Can be signed or unsigned */
+ C_S34CON /* 34 bit signed constant */
+ C_64CON /* Any constant which fits into 64 bits. Can be signed or unsigned */
+ C_SACON /* $n(REG) where n <= int16 */
+ C_LACON /* $n(REG) where n <= int32 */
+ C_DACON /* $n(REG) where n <= int64 */
+ C_SBRA /* A short offset argument to a branching instruction */
+ C_LBRA /* A long offset argument to a branching instruction */
+ C_LBRAPIC /* Like C_LBRA, but requires an extra NOP for potential TOC restore by the linker. */
+ C_ZOREG /* An $0+reg memory op */
+ C_SOREG /* An $n+reg memory arg where n is a 16 bit signed offset */
+ C_LOREG /* An $n+reg memory arg where n is a 32 bit signed offset */
+ C_XOREG /* An reg+reg memory arg */
+ C_FPSCR /* The fpscr register */
+ C_XER /* The xer, holds the carry bit */
+ C_LR /* The link register */
+ C_CTR /* The count register */
+ C_ANY /* Any argument */
+ C_GOK /* A non-matched argument */
+ C_ADDR /* A symbolic memory location */
+ C_TLS_LE /* A thread local, local-exec, type memory arg */
+ C_TLS_IE /* A thread local, initial-exec, type memory arg */
+ C_TEXTSIZE /* An argument with Type obj.TYPE_TEXTSIZE */
+
+ C_NCLASS /* must be the last */
+
+ /* Aliased names which should be cleaned up, or integrated. */
+ C_SCON = C_U15CON
+ C_UCON = C_32S16CON
+ C_ADDCON = C_S16CON
+ C_ANDCON = C_U16CON
+ C_LCON = C_32CON
+
+ /* Aliased names which may be generated by ppc64map for the optab. */
+ C_S3216CON = C_32S16CON // TODO: these should be treated differently (e.g xoris vs addis)
+ C_U3216CON = C_32S16CON
+ C_S32CON = C_32CON
+ C_U32CON = C_32CON
+)
+
+const (
+ AADD = obj.ABasePPC64 + obj.A_ARCHSPECIFIC + iota
+ AADDCC
+ AADDIS
+ AADDV
+ AADDVCC
+ AADDC
+ AADDCCC
+ AADDCV
+ AADDCVCC
+ AADDME
+ AADDMECC
+ AADDMEVCC
+ AADDMEV
+ AADDE
+ AADDECC
+ AADDEVCC
+ AADDEV
+ AADDZE
+ AADDZECC
+ AADDZEVCC
+ AADDZEV
+ AADDEX
+ AAND
+ AANDCC
+ AANDN
+ AANDNCC
+ AANDISCC
+ ABC
+ ABCL
+ ABEQ
+ ABGE // not LT = G/E/U
+ ABGT
+ ABLE // not GT = L/E/U
+ ABLT
+ ABNE // not EQ = L/G/U
+ ABVC // Branch if float not unordered (also branch on not summary overflow)
+ ABVS // Branch if float unordered (also branch on summary overflow)
+ ABDNZ // Decrement CTR, and branch if CTR != 0
+ ABDZ // Decrement CTR, and branch if CTR == 0
+ ACMP
+ ACMPU
+ ACMPEQB
+ ACNTLZW
+ ACNTLZWCC
+ ACRAND
+ ACRANDN
+ ACREQV
+ ACRNAND
+ ACRNOR
+ ACROR
+ ACRORN
+ ACRXOR
+ ADIVW
+ ADIVWCC
+ ADIVWVCC
+ ADIVWV
+ ADIVWU
+ ADIVWUCC
+ ADIVWUVCC
+ ADIVWUV
+ AMODUD
+ AMODUW
+ AMODSD
+ AMODSW
+ AEQV
+ AEQVCC
+ AEXTSB
+ AEXTSBCC
+ AEXTSH
+ AEXTSHCC
+ AFABS
+ AFABSCC
+ AFADD
+ AFADDCC
+ AFADDS
+ AFADDSCC
+ AFCMPO
+ AFCMPU
+ AFCTIW
+ AFCTIWCC
+ AFCTIWZ
+ AFCTIWZCC
+ AFDIV
+ AFDIVCC
+ AFDIVS
+ AFDIVSCC
+ AFMADD
+ AFMADDCC
+ AFMADDS
+ AFMADDSCC
+ AFMOVD
+ AFMOVDCC
+ AFMOVDU
+ AFMOVS
+ AFMOVSU
+ AFMOVSX
+ AFMOVSZ
+ AFMSUB
+ AFMSUBCC
+ AFMSUBS
+ AFMSUBSCC
+ AFMUL
+ AFMULCC
+ AFMULS
+ AFMULSCC
+ AFNABS
+ AFNABSCC
+ AFNEG
+ AFNEGCC
+ AFNMADD
+ AFNMADDCC
+ AFNMADDS
+ AFNMADDSCC
+ AFNMSUB
+ AFNMSUBCC
+ AFNMSUBS
+ AFNMSUBSCC
+ AFRSP
+ AFRSPCC
+ AFSUB
+ AFSUBCC
+ AFSUBS
+ AFSUBSCC
+ AISEL
+ AMOVMW
+ ALBAR
+ ALHAR
+ ALSW
+ ALWAR
+ ALWSYNC
+ AMOVDBR
+ AMOVWBR
+ AMOVB
+ AMOVBU
+ AMOVBZ
+ AMOVBZU
+ AMOVH
+ AMOVHBR
+ AMOVHU
+ AMOVHZ
+ AMOVHZU
+ AMOVW
+ AMOVWU
+ AMOVFL
+ AMOVCRFS
+ AMTFSB0
+ AMTFSB0CC
+ AMTFSB1
+ AMTFSB1CC
+ AMULHW
+ AMULHWCC
+ AMULHWU
+ AMULHWUCC
+ AMULLW
+ AMULLWCC
+ AMULLWVCC
+ AMULLWV
+ ANAND
+ ANANDCC
+ ANEG
+ ANEGCC
+ ANEGVCC
+ ANEGV
+ ANOR
+ ANORCC
+ AOR
+ AORCC
+ AORN
+ AORNCC
+ AORIS
+ AREM
+ AREMU
+ ARFI
+ ARLWMI
+ ARLWMICC
+ ARLWNM
+ ARLWNMCC
+ ACLRLSLWI
+ ASLW
+ ASLWCC
+ ASRW
+ ASRAW
+ ASRAWCC
+ ASRWCC
+ ASTBCCC
+ ASTHCCC
+ ASTSW
+ ASTWCCC
+ ASUB
+ ASUBCC
+ ASUBVCC
+ ASUBC
+ ASUBCCC
+ ASUBCV
+ ASUBCVCC
+ ASUBME
+ ASUBMECC
+ ASUBMEVCC
+ ASUBMEV
+ ASUBV
+ ASUBE
+ ASUBECC
+ ASUBEV
+ ASUBEVCC
+ ASUBZE
+ ASUBZECC
+ ASUBZEVCC
+ ASUBZEV
+ ASYNC
+ AXOR
+ AXORCC
+ AXORIS
+
+ ADCBF
+ ADCBI
+ ADCBST
+ ADCBT
+ ADCBTST
+ ADCBZ
+ AEIEIO
+ AICBI
+ AISYNC
+ APTESYNC
+ ATLBIE
+ ATLBIEL
+ ATLBSYNC
+ ATW
+
+ ASYSCALL
+ AWORD
+
+ ARFCI
+
+ AFCPSGN
+ AFCPSGNCC
+ /* optional on 32-bit */
+ AFRES
+ AFRESCC
+ AFRIM
+ AFRIMCC
+ AFRIP
+ AFRIPCC
+ AFRIZ
+ AFRIZCC
+ AFRIN
+ AFRINCC
+ AFRSQRTE
+ AFRSQRTECC
+ AFSEL
+ AFSELCC
+ AFSQRT
+ AFSQRTCC
+ AFSQRTS
+ AFSQRTSCC
+
+ /* 64-bit */
+
+ ACNTLZD
+ ACNTLZDCC
+ ACMPW /* CMP with L=0 */
+ ACMPWU
+ ACMPB
+ AFTDIV
+ AFTSQRT
+ ADIVD
+ ADIVDCC
+ ADIVDE
+ ADIVDECC
+ ADIVDEU
+ ADIVDEUCC
+ ADIVDVCC
+ ADIVDV
+ ADIVDU
+ ADIVDUCC
+ ADIVDUVCC
+ ADIVDUV
+ AEXTSW
+ AEXTSWCC
+ /* AFCFIW; AFCFIWCC */
+ AFCFID
+ AFCFIDCC
+ AFCFIDU
+ AFCFIDUCC
+ AFCFIDS
+ AFCFIDSCC
+ AFCTID
+ AFCTIDCC
+ AFCTIDZ
+ AFCTIDZCC
+ ALDAR
+ AMOVD
+ AMOVDU
+ AMOVWZ
+ AMOVWZU
+ AMULHD
+ AMULHDCC
+ AMULHDU
+ AMULHDUCC
+ AMULLD
+ AMULLDCC
+ AMULLDVCC
+ AMULLDV
+ ARFID
+ ARLDMI
+ ARLDMICC
+ ARLDIMI
+ ARLDIMICC
+ ARLDC
+ ARLDCCC
+ ARLDCR
+ ARLDCRCC
+ ARLDICR
+ ARLDICRCC
+ ARLDCL
+ ARLDCLCC
+ ARLDICL
+ ARLDICLCC
+ ARLDIC
+ ARLDICCC
+ ACLRLSLDI
+ AROTL
+ AROTLW
+ ASLBIA
+ ASLBIE
+ ASLBMFEE
+ ASLBMFEV
+ ASLBMTE
+ ASLD
+ ASLDCC
+ ASRD
+ ASRAD
+ ASRADCC
+ ASRDCC
+ AEXTSWSLI
+ AEXTSWSLICC
+ ASTDCCC
+ ATD
+ ASETB
+
+ /* 64-bit pseudo operation */
+ ADWORD
+ AREMD
+ AREMDU
+
+ /* more 64-bit operations */
+ AHRFID
+ APOPCNTD
+ APOPCNTW
+ APOPCNTB
+ ACNTTZW
+ ACNTTZWCC
+ ACNTTZD
+ ACNTTZDCC
+ ACOPY
+ APASTECC
+ ADARN
+ AMADDHD
+ AMADDHDU
+ AMADDLD
+
+ /* Vector */
+ ALVEBX
+ ALVEHX
+ ALVEWX
+ ALVX
+ ALVXL
+ ALVSL
+ ALVSR
+ ASTVEBX
+ ASTVEHX
+ ASTVEWX
+ ASTVX
+ ASTVXL
+ AVAND
+ AVANDC
+ AVNAND
+ AVOR
+ AVORC
+ AVNOR
+ AVXOR
+ AVEQV
+ AVADDUM
+ AVADDUBM
+ AVADDUHM
+ AVADDUWM
+ AVADDUDM
+ AVADDUQM
+ AVADDCU
+ AVADDCUQ
+ AVADDCUW
+ AVADDUS
+ AVADDUBS
+ AVADDUHS
+ AVADDUWS
+ AVADDSS
+ AVADDSBS
+ AVADDSHS
+ AVADDSWS
+ AVADDE
+ AVADDEUQM
+ AVADDECUQ
+ AVSUBUM
+ AVSUBUBM
+ AVSUBUHM
+ AVSUBUWM
+ AVSUBUDM
+ AVSUBUQM
+ AVSUBCU
+ AVSUBCUQ
+ AVSUBCUW
+ AVSUBUS
+ AVSUBUBS
+ AVSUBUHS
+ AVSUBUWS
+ AVSUBSS
+ AVSUBSBS
+ AVSUBSHS
+ AVSUBSWS
+ AVSUBE
+ AVSUBEUQM
+ AVSUBECUQ
+ AVMULESB
+ AVMULOSB
+ AVMULEUB
+ AVMULOUB
+ AVMULESH
+ AVMULOSH
+ AVMULEUH
+ AVMULOUH
+ AVMULESW
+ AVMULOSW
+ AVMULEUW
+ AVMULOUW
+ AVMULUWM
+ AVPMSUM
+ AVPMSUMB
+ AVPMSUMH
+ AVPMSUMW
+ AVPMSUMD
+ AVMSUMUDM
+ AVR
+ AVRLB
+ AVRLH
+ AVRLW
+ AVRLD
+ AVS
+ AVSLB
+ AVSLH
+ AVSLW
+ AVSL
+ AVSLO
+ AVSRB
+ AVSRH
+ AVSRW
+ AVSR
+ AVSRO
+ AVSLD
+ AVSRD
+ AVSA
+ AVSRAB
+ AVSRAH
+ AVSRAW
+ AVSRAD
+ AVSOI
+ AVSLDOI
+ AVCLZ
+ AVCLZB
+ AVCLZH
+ AVCLZW
+ AVCLZD
+ AVPOPCNT
+ AVPOPCNTB
+ AVPOPCNTH
+ AVPOPCNTW
+ AVPOPCNTD
+ AVCMPEQ
+ AVCMPEQUB
+ AVCMPEQUBCC
+ AVCMPEQUH
+ AVCMPEQUHCC
+ AVCMPEQUW
+ AVCMPEQUWCC
+ AVCMPEQUD
+ AVCMPEQUDCC
+ AVCMPGT
+ AVCMPGTUB
+ AVCMPGTUBCC
+ AVCMPGTUH
+ AVCMPGTUHCC
+ AVCMPGTUW
+ AVCMPGTUWCC
+ AVCMPGTUD
+ AVCMPGTUDCC
+ AVCMPGTSB
+ AVCMPGTSBCC
+ AVCMPGTSH
+ AVCMPGTSHCC
+ AVCMPGTSW
+ AVCMPGTSWCC
+ AVCMPGTSD
+ AVCMPGTSDCC
+ AVCMPNEZB
+ AVCMPNEZBCC
+ AVCMPNEB
+ AVCMPNEBCC
+ AVCMPNEH
+ AVCMPNEHCC
+ AVCMPNEW
+ AVCMPNEWCC
+ AVPERM
+ AVPERMXOR
+ AVPERMR
+ AVBPERMQ
+ AVBPERMD
+ AVSEL
+ AVSPLTB
+ AVSPLTH
+ AVSPLTW
+ AVSPLTISB
+ AVSPLTISH
+ AVSPLTISW
+ AVCIPH
+ AVCIPHER
+ AVCIPHERLAST
+ AVNCIPH
+ AVNCIPHER
+ AVNCIPHERLAST
+ AVSBOX
+ AVSHASIGMA
+ AVSHASIGMAW
+ AVSHASIGMAD
+ AVMRGEW
+ AVMRGOW
+ AVCLZLSBB
+ AVCTZLSBB
+
+ /* VSX */
+ ALXV
+ ALXVL
+ ALXVLL
+ ALXVD2X
+ ALXVW4X
+ ALXVH8X
+ ALXVB16X
+ ALXVX
+ ALXVDSX
+ ASTXV
+ ASTXVL
+ ASTXVLL
+ ASTXVD2X
+ ASTXVW4X
+ ASTXVH8X
+ ASTXVB16X
+ ASTXVX
+ ALXSDX
+ ASTXSDX
+ ALXSIWAX
+ ALXSIWZX
+ ASTXSIWX
+ AMFVSRD
+ AMFFPRD
+ AMFVRD
+ AMFVSRWZ
+ AMFVSRLD
+ AMTVSRD
+ AMTFPRD
+ AMTVRD
+ AMTVSRWA
+ AMTVSRWZ
+ AMTVSRDD
+ AMTVSRWS
+ AXXLAND
+ AXXLANDC
+ AXXLEQV
+ AXXLNAND
+ AXXLOR
+ AXXLORC
+ AXXLNOR
+ AXXLORQ
+ AXXLXOR
+ AXXSEL
+ AXXMRGHW
+ AXXMRGLW
+ AXXSPLTW
+ AXXSPLTIB
+ AXXPERM
+ AXXPERMDI
+ AXXSLDWI
+ AXXBRQ
+ AXXBRD
+ AXXBRW
+ AXXBRH
+ AXSCVDPSP
+ AXSCVSPDP
+ AXSCVDPSPN
+ AXSCVSPDPN
+ AXVCVDPSP
+ AXVCVSPDP
+ AXSCVDPSXDS
+ AXSCVDPSXWS
+ AXSCVDPUXDS
+ AXSCVDPUXWS
+ AXSCVSXDDP
+ AXSCVUXDDP
+ AXSCVSXDSP
+ AXSCVUXDSP
+ AXVCVDPSXDS
+ AXVCVDPSXWS
+ AXVCVDPUXDS
+ AXVCVDPUXWS
+ AXVCVSPSXDS
+ AXVCVSPSXWS
+ AXVCVSPUXDS
+ AXVCVSPUXWS
+ AXVCVSXDDP
+ AXVCVSXWDP
+ AXVCVUXDDP
+ AXVCVUXWDP
+ AXVCVSXDSP
+ AXVCVSXWSP
+ AXVCVUXDSP
+ AXVCVUXWSP
+ ALASTAOUT // The last instruction in this list. Also the first opcode generated by ppc64map.
+
+ // aliases
+ ABR = obj.AJMP
+ ABL = obj.ACALL
+ ALAST = ALASTGEN // The final enumerated instruction value + 1. This is used to size the oprange table.
+)
diff --git a/src/cmd/internal/obj/ppc64/anames.go b/src/cmd/internal/obj/ppc64/anames.go
new file mode 100644
index 0000000..f4680cc
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/anames.go
@@ -0,0 +1,614 @@
+// Code generated by stringer -i a.out.go -o anames.go -p ppc64; DO NOT EDIT.
+
+package ppc64
+
+import "cmd/internal/obj"
+
+var Anames = []string{
+ obj.A_ARCHSPECIFIC: "ADD",
+ "ADDCC",
+ "ADDIS",
+ "ADDV",
+ "ADDVCC",
+ "ADDC",
+ "ADDCCC",
+ "ADDCV",
+ "ADDCVCC",
+ "ADDME",
+ "ADDMECC",
+ "ADDMEVCC",
+ "ADDMEV",
+ "ADDE",
+ "ADDECC",
+ "ADDEVCC",
+ "ADDEV",
+ "ADDZE",
+ "ADDZECC",
+ "ADDZEVCC",
+ "ADDZEV",
+ "ADDEX",
+ "AND",
+ "ANDCC",
+ "ANDN",
+ "ANDNCC",
+ "ANDISCC",
+ "BC",
+ "BCL",
+ "BEQ",
+ "BGE",
+ "BGT",
+ "BLE",
+ "BLT",
+ "BNE",
+ "BVC",
+ "BVS",
+ "BDNZ",
+ "BDZ",
+ "CMP",
+ "CMPU",
+ "CMPEQB",
+ "CNTLZW",
+ "CNTLZWCC",
+ "CRAND",
+ "CRANDN",
+ "CREQV",
+ "CRNAND",
+ "CRNOR",
+ "CROR",
+ "CRORN",
+ "CRXOR",
+ "DIVW",
+ "DIVWCC",
+ "DIVWVCC",
+ "DIVWV",
+ "DIVWU",
+ "DIVWUCC",
+ "DIVWUVCC",
+ "DIVWUV",
+ "MODUD",
+ "MODUW",
+ "MODSD",
+ "MODSW",
+ "EQV",
+ "EQVCC",
+ "EXTSB",
+ "EXTSBCC",
+ "EXTSH",
+ "EXTSHCC",
+ "FABS",
+ "FABSCC",
+ "FADD",
+ "FADDCC",
+ "FADDS",
+ "FADDSCC",
+ "FCMPO",
+ "FCMPU",
+ "FCTIW",
+ "FCTIWCC",
+ "FCTIWZ",
+ "FCTIWZCC",
+ "FDIV",
+ "FDIVCC",
+ "FDIVS",
+ "FDIVSCC",
+ "FMADD",
+ "FMADDCC",
+ "FMADDS",
+ "FMADDSCC",
+ "FMOVD",
+ "FMOVDCC",
+ "FMOVDU",
+ "FMOVS",
+ "FMOVSU",
+ "FMOVSX",
+ "FMOVSZ",
+ "FMSUB",
+ "FMSUBCC",
+ "FMSUBS",
+ "FMSUBSCC",
+ "FMUL",
+ "FMULCC",
+ "FMULS",
+ "FMULSCC",
+ "FNABS",
+ "FNABSCC",
+ "FNEG",
+ "FNEGCC",
+ "FNMADD",
+ "FNMADDCC",
+ "FNMADDS",
+ "FNMADDSCC",
+ "FNMSUB",
+ "FNMSUBCC",
+ "FNMSUBS",
+ "FNMSUBSCC",
+ "FRSP",
+ "FRSPCC",
+ "FSUB",
+ "FSUBCC",
+ "FSUBS",
+ "FSUBSCC",
+ "ISEL",
+ "MOVMW",
+ "LBAR",
+ "LHAR",
+ "LSW",
+ "LWAR",
+ "LWSYNC",
+ "MOVDBR",
+ "MOVWBR",
+ "MOVB",
+ "MOVBU",
+ "MOVBZ",
+ "MOVBZU",
+ "MOVH",
+ "MOVHBR",
+ "MOVHU",
+ "MOVHZ",
+ "MOVHZU",
+ "MOVW",
+ "MOVWU",
+ "MOVFL",
+ "MOVCRFS",
+ "MTFSB0",
+ "MTFSB0CC",
+ "MTFSB1",
+ "MTFSB1CC",
+ "MULHW",
+ "MULHWCC",
+ "MULHWU",
+ "MULHWUCC",
+ "MULLW",
+ "MULLWCC",
+ "MULLWVCC",
+ "MULLWV",
+ "NAND",
+ "NANDCC",
+ "NEG",
+ "NEGCC",
+ "NEGVCC",
+ "NEGV",
+ "NOR",
+ "NORCC",
+ "OR",
+ "ORCC",
+ "ORN",
+ "ORNCC",
+ "ORIS",
+ "REM",
+ "REMU",
+ "RFI",
+ "RLWMI",
+ "RLWMICC",
+ "RLWNM",
+ "RLWNMCC",
+ "CLRLSLWI",
+ "SLW",
+ "SLWCC",
+ "SRW",
+ "SRAW",
+ "SRAWCC",
+ "SRWCC",
+ "STBCCC",
+ "STHCCC",
+ "STSW",
+ "STWCCC",
+ "SUB",
+ "SUBCC",
+ "SUBVCC",
+ "SUBC",
+ "SUBCCC",
+ "SUBCV",
+ "SUBCVCC",
+ "SUBME",
+ "SUBMECC",
+ "SUBMEVCC",
+ "SUBMEV",
+ "SUBV",
+ "SUBE",
+ "SUBECC",
+ "SUBEV",
+ "SUBEVCC",
+ "SUBZE",
+ "SUBZECC",
+ "SUBZEVCC",
+ "SUBZEV",
+ "SYNC",
+ "XOR",
+ "XORCC",
+ "XORIS",
+ "DCBF",
+ "DCBI",
+ "DCBST",
+ "DCBT",
+ "DCBTST",
+ "DCBZ",
+ "EIEIO",
+ "ICBI",
+ "ISYNC",
+ "PTESYNC",
+ "TLBIE",
+ "TLBIEL",
+ "TLBSYNC",
+ "TW",
+ "SYSCALL",
+ "WORD",
+ "RFCI",
+ "FCPSGN",
+ "FCPSGNCC",
+ "FRES",
+ "FRESCC",
+ "FRIM",
+ "FRIMCC",
+ "FRIP",
+ "FRIPCC",
+ "FRIZ",
+ "FRIZCC",
+ "FRIN",
+ "FRINCC",
+ "FRSQRTE",
+ "FRSQRTECC",
+ "FSEL",
+ "FSELCC",
+ "FSQRT",
+ "FSQRTCC",
+ "FSQRTS",
+ "FSQRTSCC",
+ "CNTLZD",
+ "CNTLZDCC",
+ "CMPW",
+ "CMPWU",
+ "CMPB",
+ "FTDIV",
+ "FTSQRT",
+ "DIVD",
+ "DIVDCC",
+ "DIVDE",
+ "DIVDECC",
+ "DIVDEU",
+ "DIVDEUCC",
+ "DIVDVCC",
+ "DIVDV",
+ "DIVDU",
+ "DIVDUCC",
+ "DIVDUVCC",
+ "DIVDUV",
+ "EXTSW",
+ "EXTSWCC",
+ "FCFID",
+ "FCFIDCC",
+ "FCFIDU",
+ "FCFIDUCC",
+ "FCFIDS",
+ "FCFIDSCC",
+ "FCTID",
+ "FCTIDCC",
+ "FCTIDZ",
+ "FCTIDZCC",
+ "LDAR",
+ "MOVD",
+ "MOVDU",
+ "MOVWZ",
+ "MOVWZU",
+ "MULHD",
+ "MULHDCC",
+ "MULHDU",
+ "MULHDUCC",
+ "MULLD",
+ "MULLDCC",
+ "MULLDVCC",
+ "MULLDV",
+ "RFID",
+ "RLDMI",
+ "RLDMICC",
+ "RLDIMI",
+ "RLDIMICC",
+ "RLDC",
+ "RLDCCC",
+ "RLDCR",
+ "RLDCRCC",
+ "RLDICR",
+ "RLDICRCC",
+ "RLDCL",
+ "RLDCLCC",
+ "RLDICL",
+ "RLDICLCC",
+ "RLDIC",
+ "RLDICCC",
+ "CLRLSLDI",
+ "ROTL",
+ "ROTLW",
+ "SLBIA",
+ "SLBIE",
+ "SLBMFEE",
+ "SLBMFEV",
+ "SLBMTE",
+ "SLD",
+ "SLDCC",
+ "SRD",
+ "SRAD",
+ "SRADCC",
+ "SRDCC",
+ "EXTSWSLI",
+ "EXTSWSLICC",
+ "STDCCC",
+ "TD",
+ "SETB",
+ "DWORD",
+ "REMD",
+ "REMDU",
+ "HRFID",
+ "POPCNTD",
+ "POPCNTW",
+ "POPCNTB",
+ "CNTTZW",
+ "CNTTZWCC",
+ "CNTTZD",
+ "CNTTZDCC",
+ "COPY",
+ "PASTECC",
+ "DARN",
+ "MADDHD",
+ "MADDHDU",
+ "MADDLD",
+ "LVEBX",
+ "LVEHX",
+ "LVEWX",
+ "LVX",
+ "LVXL",
+ "LVSL",
+ "LVSR",
+ "STVEBX",
+ "STVEHX",
+ "STVEWX",
+ "STVX",
+ "STVXL",
+ "VAND",
+ "VANDC",
+ "VNAND",
+ "VOR",
+ "VORC",
+ "VNOR",
+ "VXOR",
+ "VEQV",
+ "VADDUM",
+ "VADDUBM",
+ "VADDUHM",
+ "VADDUWM",
+ "VADDUDM",
+ "VADDUQM",
+ "VADDCU",
+ "VADDCUQ",
+ "VADDCUW",
+ "VADDUS",
+ "VADDUBS",
+ "VADDUHS",
+ "VADDUWS",
+ "VADDSS",
+ "VADDSBS",
+ "VADDSHS",
+ "VADDSWS",
+ "VADDE",
+ "VADDEUQM",
+ "VADDECUQ",
+ "VSUBUM",
+ "VSUBUBM",
+ "VSUBUHM",
+ "VSUBUWM",
+ "VSUBUDM",
+ "VSUBUQM",
+ "VSUBCU",
+ "VSUBCUQ",
+ "VSUBCUW",
+ "VSUBUS",
+ "VSUBUBS",
+ "VSUBUHS",
+ "VSUBUWS",
+ "VSUBSS",
+ "VSUBSBS",
+ "VSUBSHS",
+ "VSUBSWS",
+ "VSUBE",
+ "VSUBEUQM",
+ "VSUBECUQ",
+ "VMULESB",
+ "VMULOSB",
+ "VMULEUB",
+ "VMULOUB",
+ "VMULESH",
+ "VMULOSH",
+ "VMULEUH",
+ "VMULOUH",
+ "VMULESW",
+ "VMULOSW",
+ "VMULEUW",
+ "VMULOUW",
+ "VMULUWM",
+ "VPMSUM",
+ "VPMSUMB",
+ "VPMSUMH",
+ "VPMSUMW",
+ "VPMSUMD",
+ "VMSUMUDM",
+ "VR",
+ "VRLB",
+ "VRLH",
+ "VRLW",
+ "VRLD",
+ "VS",
+ "VSLB",
+ "VSLH",
+ "VSLW",
+ "VSL",
+ "VSLO",
+ "VSRB",
+ "VSRH",
+ "VSRW",
+ "VSR",
+ "VSRO",
+ "VSLD",
+ "VSRD",
+ "VSA",
+ "VSRAB",
+ "VSRAH",
+ "VSRAW",
+ "VSRAD",
+ "VSOI",
+ "VSLDOI",
+ "VCLZ",
+ "VCLZB",
+ "VCLZH",
+ "VCLZW",
+ "VCLZD",
+ "VPOPCNT",
+ "VPOPCNTB",
+ "VPOPCNTH",
+ "VPOPCNTW",
+ "VPOPCNTD",
+ "VCMPEQ",
+ "VCMPEQUB",
+ "VCMPEQUBCC",
+ "VCMPEQUH",
+ "VCMPEQUHCC",
+ "VCMPEQUW",
+ "VCMPEQUWCC",
+ "VCMPEQUD",
+ "VCMPEQUDCC",
+ "VCMPGT",
+ "VCMPGTUB",
+ "VCMPGTUBCC",
+ "VCMPGTUH",
+ "VCMPGTUHCC",
+ "VCMPGTUW",
+ "VCMPGTUWCC",
+ "VCMPGTUD",
+ "VCMPGTUDCC",
+ "VCMPGTSB",
+ "VCMPGTSBCC",
+ "VCMPGTSH",
+ "VCMPGTSHCC",
+ "VCMPGTSW",
+ "VCMPGTSWCC",
+ "VCMPGTSD",
+ "VCMPGTSDCC",
+ "VCMPNEZB",
+ "VCMPNEZBCC",
+ "VCMPNEB",
+ "VCMPNEBCC",
+ "VCMPNEH",
+ "VCMPNEHCC",
+ "VCMPNEW",
+ "VCMPNEWCC",
+ "VPERM",
+ "VPERMXOR",
+ "VPERMR",
+ "VBPERMQ",
+ "VBPERMD",
+ "VSEL",
+ "VSPLTB",
+ "VSPLTH",
+ "VSPLTW",
+ "VSPLTISB",
+ "VSPLTISH",
+ "VSPLTISW",
+ "VCIPH",
+ "VCIPHER",
+ "VCIPHERLAST",
+ "VNCIPH",
+ "VNCIPHER",
+ "VNCIPHERLAST",
+ "VSBOX",
+ "VSHASIGMA",
+ "VSHASIGMAW",
+ "VSHASIGMAD",
+ "VMRGEW",
+ "VMRGOW",
+ "VCLZLSBB",
+ "VCTZLSBB",
+ "LXV",
+ "LXVL",
+ "LXVLL",
+ "LXVD2X",
+ "LXVW4X",
+ "LXVH8X",
+ "LXVB16X",
+ "LXVX",
+ "LXVDSX",
+ "STXV",
+ "STXVL",
+ "STXVLL",
+ "STXVD2X",
+ "STXVW4X",
+ "STXVH8X",
+ "STXVB16X",
+ "STXVX",
+ "LXSDX",
+ "STXSDX",
+ "LXSIWAX",
+ "LXSIWZX",
+ "STXSIWX",
+ "MFVSRD",
+ "MFFPRD",
+ "MFVRD",
+ "MFVSRWZ",
+ "MFVSRLD",
+ "MTVSRD",
+ "MTFPRD",
+ "MTVRD",
+ "MTVSRWA",
+ "MTVSRWZ",
+ "MTVSRDD",
+ "MTVSRWS",
+ "XXLAND",
+ "XXLANDC",
+ "XXLEQV",
+ "XXLNAND",
+ "XXLOR",
+ "XXLORC",
+ "XXLNOR",
+ "XXLORQ",
+ "XXLXOR",
+ "XXSEL",
+ "XXMRGHW",
+ "XXMRGLW",
+ "XXSPLTW",
+ "XXSPLTIB",
+ "XXPERM",
+ "XXPERMDI",
+ "XXSLDWI",
+ "XXBRQ",
+ "XXBRD",
+ "XXBRW",
+ "XXBRH",
+ "XSCVDPSP",
+ "XSCVSPDP",
+ "XSCVDPSPN",
+ "XSCVSPDPN",
+ "XVCVDPSP",
+ "XVCVSPDP",
+ "XSCVDPSXDS",
+ "XSCVDPSXWS",
+ "XSCVDPUXDS",
+ "XSCVDPUXWS",
+ "XSCVSXDDP",
+ "XSCVUXDDP",
+ "XSCVSXDSP",
+ "XSCVUXDSP",
+ "XVCVDPSXDS",
+ "XVCVDPSXWS",
+ "XVCVDPUXDS",
+ "XVCVDPUXWS",
+ "XVCVSPSXDS",
+ "XVCVSPSXWS",
+ "XVCVSPUXDS",
+ "XVCVSPUXWS",
+ "XVCVSXDDP",
+ "XVCVSXWDP",
+ "XVCVUXDDP",
+ "XVCVUXWDP",
+ "XVCVSXDSP",
+ "XVCVSXWSP",
+ "XVCVUXDSP",
+ "XVCVUXWSP",
+ "LASTAOUT",
+}
diff --git a/src/cmd/internal/obj/ppc64/anames9.go b/src/cmd/internal/obj/ppc64/anames9.go
new file mode 100644
index 0000000..ad6776a
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/anames9.go
@@ -0,0 +1,55 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+var cnames9 = []string{
+ "NONE",
+ "REGP",
+ "REG",
+ "FREGP",
+ "FREG",
+ "VREG",
+ "VSREGP",
+ "VSREG",
+ "CREG",
+ "CRBIT",
+ "SPR",
+ "MREG",
+ "ZCON",
+ "U1CON",
+ "U2CON",
+ "U3CON",
+ "U4CON",
+ "U5CON",
+ "U8CON",
+ "U15CON",
+ "S16CON",
+ "U16CON",
+ "32S16CON",
+ "32CON",
+ "S34CON",
+ "64CON",
+ "SACON",
+ "LACON",
+ "DACON",
+ "SBRA",
+ "LBRA",
+ "LBRAPIC",
+ "ZOREG",
+ "SOREG",
+ "LOREG",
+ "XOREG",
+ "FPSCR",
+ "XER",
+ "LR",
+ "CTR",
+ "ANY",
+ "GOK",
+ "ADDR",
+ "TLS_LE",
+ "TLS_IE",
+ "TEXTSIZE",
+ "NCLASS",
+}
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go
new file mode 100644
index 0000000..4559eed
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/asm9.go
@@ -0,0 +1,5529 @@
+// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "encoding/binary"
+ "fmt"
+ "internal/buildcfg"
+ "log"
+ "math"
+ "math/bits"
+ "sort"
+)
+
+// ctxt9 holds state while assembling a single function.
+// Each function gets a fresh ctxt9.
+// This allows for multiple functions to be safely concurrently assembled.
+type ctxt9 struct {
+ ctxt *obj.Link
+ newprog obj.ProgAlloc
+ cursym *obj.LSym
+ autosize int32
+ instoffset int64
+ pc int64
+}
+
+// Instruction layout.
+
+const (
+ r0iszero = 1
+)
+
+const (
+ // R bit option in prefixed load/store/add D-form operations
+ PFX_R_ABS = 0 // Offset is absolute
+ PFX_R_PCREL = 1 // Offset is relative to PC, RA should be 0
+)
+
+type Optab struct {
+ as obj.As // Opcode
+ a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog.
+ a2 uint8 // p.Reg argument (int16 Register)
+ a3 uint8 // p.RestArgs[0] (obj.AddrPos)
+ a4 uint8 // p.RestArgs[1]
+ a5 uint8 // p.RestARgs[2]
+ a6 uint8 // p.To (obj.Addr)
+ type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r
+ size int8 // Text space in bytes to lay operation
+
+ // A prefixed instruction is generated by this opcode. This cannot be placed
+ // across a 64B PC address. Opcodes should not translate to more than one
+ // prefixed instruction. The prefixed instruction should be written first
+ // (e.g when Optab.size > 8).
+ ispfx bool
+
+ asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32)
+}
+
+// optab contains an array to be sliced of accepted operand combinations for an
+// instruction. Unused arguments and fields are not explicitly enumerated, and
+// should not be listed for clarity. Unused arguments and values should always
+// assume the default value for the given type.
+//
+// optab does not list every valid ppc64 opcode, it enumerates representative
+// operand combinations for a class of instruction. The variable oprange indexes
+// all valid ppc64 opcodes.
+//
+// oprange is initialized to point a slice within optab which contains the valid
+// operand combinations for a given instruction. This is initialized from buildop.
+//
+// Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface
+// to arrange entries to minimize text size of each opcode.
+var optab = []Optab{
+ {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0},
+ {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
+ {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0},
+ {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0},
+ /* move register */
+ {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
+ {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4},
+ {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
+ {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
+ {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
+ {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4},
+ {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8},
+ {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8},
+ {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4},
+ {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4},
+ {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
+ {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
+ {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
+ {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
+ {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */
+ {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
+ {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
+ {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
+ {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
+ {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
+ {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
+ {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
+ {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
+ {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
+ {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
+ {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
+ {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4},
+ {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4},
+ {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4},
+ {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12},
+ {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12},
+ {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4},
+ {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4},
+ {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4},
+ {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12},
+ {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */
+ {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4},
+ {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4},
+ {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4},
+ {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
+ {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8},
+ {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8},
+ {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12},
+ {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12},
+ {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4},
+ {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4},
+ {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */
+ {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4},
+ {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */
+ {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4},
+ {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
+ {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
+ {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4},
+ {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4},
+ {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4},
+ {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4},
+ {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
+ {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
+ {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4},
+ {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4},
+ {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4},
+ {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
+ {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4},
+ {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4},
+ {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4},
+ {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4},
+ {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4},
+ {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
+ {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4},
+ {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
+ {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
+ {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
+ {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4},
+ {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4},
+ {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4},
+ {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
+ {as: AFABS, a6: C_FREG, type_: 33, size: 4},
+ {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4},
+ {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4},
+ {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4},
+
+ {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
+ {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
+ {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
+
+ {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
+ {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
+ {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
+
+ {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
+ {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
+
+ {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8},
+ {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8},
+ {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
+ {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4},
+
+ {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
+ {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
+ {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
+ {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4},
+
+ {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
+ {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
+ {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8},
+ {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
+ {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
+ {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8},
+ {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
+ {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4},
+
+ {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4},
+ {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4},
+ {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4},
+ {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4},
+ {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4},
+ {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
+ {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4},
+ {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4},
+ {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4},
+
+ {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8},
+ {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4},
+ {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4},
+ {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4},
+ {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4},
+ {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4},
+ {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4},
+
+ {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
+ {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4},
+
+ {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4},
+ {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4},
+
+ {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4},
+ {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4},
+ {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4},
+ {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4},
+ {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4},
+ {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4},
+ {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4},
+ {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4},
+
+ {as: ASYSCALL, type_: 5, size: 4},
+ {as: ASYSCALL, a1: C_REG, type_: 77, size: 12},
+ {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12},
+ {as: ABEQ, a6: C_SBRA, type_: 16, size: 4},
+ {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4},
+ {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label
+ {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop
+ {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr
+ {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr
+ {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label
+ {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label
+ {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi
+ {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh
+ {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi
+ {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4},
+ {as: ASYNC, type_: 46, size: 4},
+ {as: AWORD, a1: C_LCON, type_: 40, size: 4},
+ {as: ADWORD, a1: C_64CON, type_: 31, size: 8},
+ {as: ADWORD, a1: C_LACON, type_: 31, size: 8},
+ {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4},
+ {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4},
+ {as: AEXTSB, a6: C_REG, type_: 48, size: 4},
+ {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
+ {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4},
+ {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4},
+ {as: ANEG, a6: C_REG, type_: 47, size: 4},
+ {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12},
+ {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12},
+ {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16},
+ {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16},
+ {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12},
+ {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12},
+ {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4},
+ /* Other ISA 2.05+ instructions */
+ {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */
+ {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */
+ {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */
+ {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4},
+ {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */
+ {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */
+ {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */
+ {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */
+ {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */
+ {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */
+ {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */
+
+ /* Misc ISA 3.0 instructions */
+ {as: ASETB, a1: C_CREG, a6: C_REG, type_: 110, size: 4},
+ {as: AVCLZLSBB, a1: C_VREG, a6: C_REG, type_: 85, size: 4},
+
+ /* Vector instructions */
+
+ /* Vector load */
+ {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */
+
+ /* Vector store */
+ {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */
+
+ /* Vector logical */
+ {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */
+ {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */
+
+ /* Vector add */
+ {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */
+ {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */
+ {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */
+ {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */
+ {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */
+
+ /* Vector subtract */
+ {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */
+ {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */
+ {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */
+ {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */
+ {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */
+
+ /* Vector multiply */
+ {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */
+ {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */
+ {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */
+
+ /* Vector rotate */
+ {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */
+
+ /* Vector shift */
+ {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */
+ {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */
+ {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */
+
+ /* Vector count */
+ {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */
+ {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */
+
+ /* Vector compare */
+ {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */
+ {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */
+ {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */
+
+ /* Vector merge */
+ {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */
+
+ /* Vector permute */
+ {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */
+
+ /* Vector bit permute */
+ {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */
+
+ /* Vector select */
+ {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */
+
+ /* Vector splat */
+ {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */
+ {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4},
+ {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */
+ {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4},
+
+ /* Vector AES */
+ {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */
+ {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */
+ {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */
+
+ /* Vector SHA */
+ {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */
+
+ /* VSX vector load */
+ {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */
+ {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */
+ {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */
+
+ /* VSX vector store */
+ {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */
+ {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */
+ {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */
+
+ /* VSX scalar load */
+ {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */
+
+ /* VSX scalar store */
+ {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */
+
+ /* VSX scalar as integer load */
+ {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */
+
+ /* VSX scalar store as integer */
+ {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */
+
+ /* VSX move from VSR */
+ {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4},
+ {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4},
+
+ /* VSX move to VSR */
+ {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4},
+ {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4},
+ {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4},
+
+ /* VSX logical */
+ {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */
+ {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */
+
+ /* VSX select */
+ {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */
+
+ /* VSX merge */
+ {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */
+
+ /* VSX splat */
+ {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */
+ {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */
+
+ /* VSX permute */
+ {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */
+
+ /* VSX shift */
+ {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */
+
+ /* VSX reverse bytes */
+ {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */
+
+ /* VSX scalar FP-FP conversion */
+ {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */
+
+ /* VSX vector FP-FP conversion */
+ {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */
+
+ /* VSX scalar FP-integer conversion */
+ {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */
+
+ /* VSX scalar integer-FP conversion */
+ {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */
+
+ /* VSX vector FP-integer conversion */
+ {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */
+
+ /* VSX vector integer-FP conversion */
+ {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */
+
+ {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4},
+ {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
+ {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4},
+ {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4},
+ {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4},
+ {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4},
+ {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4},
+ {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4},
+ {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4},
+ {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4},
+ {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4},
+ {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4},
+ {as: ADCBF, a1: C_SOREG, type_: 43, size: 4},
+ {as: ADCBF, a1: C_XOREG, type_: 43, size: 4},
+ {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4},
+ {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4},
+ {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4},
+ {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4},
+ {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
+ {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
+ {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4},
+ {as: AEIEIO, type_: 46, size: 4},
+ {as: ATLBIE, a1: C_REG, type_: 49, size: 4},
+ {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4},
+ {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
+ {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4},
+ {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4},
+ {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4},
+ {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4},
+ {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4},
+
+ {as: obj.AUNDEF, type_: 78, size: 4},
+ {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0},
+ {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0},
+ {as: obj.ANOP, type_: 0, size: 0},
+ {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689
+ {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior
+ {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0},
+ {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
+ {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL
+ {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code
+}
+
+// These are opcodes above which may generate different sequences depending on whether prefix opcode support
+// is available
+type PrefixableOptab struct {
+ Optab
+ minGOPPC64 int // Minimum GOPPC64 required to support this.
+ pfxsize int8 // Instruction sequence size when prefixed opcodes are used
+}
+
+// The prefixable optab entry contains the pseudo-opcodes which generate relocations, or may generate
+// a more efficient sequence of instructions if a prefixed version exists (ex. paddi instead of oris/ori/add).
+//
+// This table is meant to transform all sequences which might be TOC-relative into an equivalent PC-relative
+// sequence. It also encompasses several transformations which do not involve relocations, those could be
+// separated and applied to AIX and other non-ELF targets. Likewise, the prefixed forms do not have encoding
+// restrictions on the offset, so they are also used for static binary to allow better code generation. e.x
+//
+// MOVD something-byte-aligned(Rx), Ry
+// MOVD 3(Rx), Ry
+//
+// is allowed when the prefixed forms are used.
+//
+// This requires an ISA 3.1 compatible cpu (e.g Power10), and when linking externally an ELFv2 1.5 compliant.
+var prefixableOptab = []PrefixableOptab{
+ {Optab: Optab{as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, minGOPPC64: 10, pfxsize: 12},
+ {Optab: Optab{as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
+
+ {Optab: Optab{as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
+
+ {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, minGOPPC64: 10, pfxsize: 12},
+ {Optab: Optab{as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, minGOPPC64: 10, pfxsize: 12},
+ {Optab: Optab{as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
+
+ {Optab: Optab{as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
+
+ {Optab: Optab{as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, minGOPPC64: 10, pfxsize: 8},
+
+ {Optab: Optab{as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
+ {Optab: Optab{as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, minGOPPC64: 10, pfxsize: 8},
+}
+
+var oprange [ALAST & obj.AMask][]Optab
+
+var xcmp [C_NCLASS][C_NCLASS]bool
+
+var pfxEnabled = false // ISA 3.1 prefixed instructions are supported.
+var buildOpCfg = "" // Save the os/cpu/arch tuple used to configure the assembler in buildop
+
+// padding bytes to add to align code as requested.
+func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int {
+ switch a {
+ case 8, 16, 32, 64:
+ // By default function alignment is 16. If an alignment > 16 is
+ // requested then the function alignment must also be promoted.
+ // The function alignment is not promoted on AIX at this time.
+ // TODO: Investigate AIX function alignment.
+ if ctxt.Headtype != objabi.Haix && cursym.Func().Align < int32(a) {
+ cursym.Func().Align = int32(a)
+ }
+ if pc&(a-1) != 0 {
+ return int(a - (pc & (a - 1)))
+ }
+ default:
+ ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a)
+ }
+ return 0
+}
+
+// Get the implied register of an operand which doesn't specify one. These show up
+// in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied,
+// or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when
+// generating constants in register like "MOVD $constant, Rx".
+func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int {
+ class := oclass(a)
+ if class >= C_ZCON && class <= C_64CON {
+ return REGZERO
+ }
+ switch class {
+ case C_SACON, C_LACON:
+ return REGSP
+ case C_LOREG, C_SOREG, C_ZOREG, C_XOREG:
+ switch a.Name {
+ case obj.NAME_EXTERN, obj.NAME_STATIC:
+ return REGSB
+ case obj.NAME_AUTO, obj.NAME_PARAM:
+ return REGSP
+ case obj.NAME_NONE:
+ return REGZERO
+ }
+ }
+ c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p)
+ return 0
+}
+
+func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
+ p := cursym.Func().Text
+ if p == nil || p.Link == nil { // handle external functions and ELF section symbols
+ return
+ }
+
+ if oprange[AANDN&obj.AMask] == nil {
+ ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first")
+ }
+
+ c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)}
+
+ pc := int64(0)
+ p.Pc = pc
+
+ var m int
+ var o *Optab
+ for p = p.Link; p != nil; p = p.Link {
+ p.Pc = pc
+ o = c.oplook(p)
+ m = int(o.size)
+ if m == 0 {
+ if p.As == obj.APCALIGN {
+ a := c.vregoff(&p.From)
+ m = addpad(pc, a, ctxt, cursym)
+ } else {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+ }
+ pc += int64(m)
+ }
+
+ c.cursym.Size = pc
+
+ /*
+ * if any procedure is large enough to
+ * generate a large SBRA branch, then
+ * generate extra passes putting branches
+ * around jmps to fix. this is rare.
+ */
+ bflag := 1
+
+ var otxt int64
+ var q *obj.Prog
+ var out [5]uint32
+ var falign int32 // Track increased alignment requirements for prefix.
+ for bflag != 0 {
+ bflag = 0
+ pc = 0
+ falign = 0 // Note, linker bumps function symbols to funcAlign.
+ for p = c.cursym.Func().Text.Link; p != nil; p = p.Link {
+ p.Pc = pc
+ o = c.oplook(p)
+
+ // very large conditional branches
+ if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil {
+ otxt = p.To.Target().Pc - pc
+ if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 {
+ // Assemble the instruction with a target not too far to figure out BI and BO fields.
+ // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted,
+ // and only one extra branch is needed to reach the target.
+ tgt := p.To.Target()
+ p.To.SetTarget(p.Link)
+ o.asmout(&c, p, o, &out)
+ p.To.SetTarget(tgt)
+
+ bo := int64(out[0]>>21) & 31
+ bi := int16((out[0] >> 16) & 31)
+ invertible := false
+
+ if bo&0x14 == 0x14 {
+ // A conditional branch that is unconditionally taken. This cannot be inverted.
+ } else if bo&0x10 == 0x10 {
+ // A branch based on the value of CTR. Invert the CTR comparison against zero bit.
+ bo ^= 0x2
+ invertible = true
+ } else if bo&0x04 == 0x04 {
+ // A branch based on CR bit. Invert the BI comparison bit.
+ bo ^= 0x8
+ invertible = true
+ }
+
+ if invertible {
+ // Rewrite
+ // BC bo,...,far_away_target
+ // NEXT_INSN
+ // to:
+ // BC invert(bo),next_insn
+ // JMP far_away_target
+ // next_insn:
+ // NEXT_INSN
+ p.As = ABC
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo}
+ q = c.newprog()
+ q.As = ABR
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.SetTarget(p.To.Target())
+ q.Link = p.Link
+ p.To.SetTarget(p.Link)
+ p.Link = q
+ p.Reg = REG_CRBIT0 + bi
+ } else {
+ // Rewrite
+ // BC ...,far_away_target
+ // NEXT_INSN
+ // to
+ // BC ...,tmp
+ // JMP next_insn
+ // tmp:
+ // JMP far_away_target
+ // next_insn:
+ // NEXT_INSN
+ q = c.newprog()
+ q.Link = p.Link
+ p.Link = q
+ q.As = ABR
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.SetTarget(p.To.Target())
+ p.To.SetTarget(q)
+ q = c.newprog()
+ q.Link = p.Link
+ p.Link = q
+ q.As = ABR
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.SetTarget(q.Link.Link)
+ }
+ bflag = 1
+ }
+ }
+
+ m = int(o.size)
+ if m == 0 {
+ if p.As == obj.APCALIGN {
+ a := c.vregoff(&p.From)
+ m = addpad(pc, a, ctxt, cursym)
+ } else {
+ if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA {
+ ctxt.Diag("zero-width instruction\n%v", p)
+ }
+ continue
+ }
+ }
+
+ // Prefixed instructions cannot be placed across a 64B boundary.
+ // Mark and adjust the PC of those which do. A nop will be
+ // inserted during final assembly.
+ if o.ispfx {
+ mark := p.Mark &^ PFX_X64B
+ if pc&63 == 60 {
+ p.Pc += 4
+ m += 4
+ mark |= PFX_X64B
+ }
+
+ // Marks may be adjusted if a too-far conditional branch is
+ // fixed up above. Likewise, inserting a NOP may cause a
+ // branch target to become too far away. We need to run
+ // another iteration and verify no additional changes
+ // are needed.
+ if mark != p.Mark {
+ bflag = 1
+ p.Mark = mark
+ }
+
+ // Check for 16 or 32B crossing of this prefixed insn.
+ // These do no require padding, but do require increasing
+ // the function alignment to prevent them from potentially
+ // crossing a 64B boundary when the linker assigns the final
+ // PC.
+ switch p.Pc & 31 {
+ case 28: // 32B crossing
+ falign = 64
+ case 12: // 16B crossing
+ if falign < 64 {
+ falign = 32
+ }
+ }
+ }
+
+ pc += int64(m)
+ }
+
+ c.cursym.Size = pc
+ }
+
+ c.cursym.Size = pc
+ c.cursym.Func().Align = falign
+ c.cursym.Grow(c.cursym.Size)
+
+ // lay out the code, emitting code and data relocations.
+
+ bp := c.cursym.P
+ nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0)
+ var i int32
+ for p := c.cursym.Func().Text.Link; p != nil; p = p.Link {
+ c.pc = p.Pc
+ o = c.oplook(p)
+ if int(o.size) > 4*len(out) {
+ log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p)
+ }
+ // asmout is not set up to add large amounts of padding
+ if o.type_ == 0 && p.As == obj.APCALIGN {
+ aln := c.vregoff(&p.From)
+ v := addpad(p.Pc, aln, c.ctxt, c.cursym)
+ if v > 0 {
+ // Same padding instruction for all
+ for i = 0; i < int32(v/4); i++ {
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
+ bp = bp[4:]
+ }
+ }
+ } else {
+ if p.Mark&PFX_X64B != 0 {
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, nop)
+ bp = bp[4:]
+ }
+ o.asmout(&c, p, o, &out)
+ for i = 0; i < int32(o.size/4); i++ {
+ c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i])
+ bp = bp[4:]
+ }
+ }
+ }
+}
+
+func isint32(v int64) bool {
+ return int64(int32(v)) == v
+}
+
+func isuint32(v uint64) bool {
+ return uint64(uint32(v)) == v
+}
+
+func (c *ctxt9) aclassreg(reg int16) int {
+ if REG_R0 <= reg && reg <= REG_R31 {
+ return C_REGP + int(reg&1)
+ }
+ if REG_F0 <= reg && reg <= REG_F31 {
+ return C_FREGP + int(reg&1)
+ }
+ if REG_V0 <= reg && reg <= REG_V31 {
+ return C_VREG
+ }
+ if REG_VS0 <= reg && reg <= REG_VS63 {
+ return C_VSREGP + int(reg&1)
+ }
+ if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR {
+ return C_CREG
+ }
+ if REG_CR0LT <= reg && reg <= REG_CR7SO {
+ return C_CRBIT
+ }
+ if REG_SPR0 <= reg && reg <= REG_SPR0+1023 {
+ switch reg {
+ case REG_LR:
+ return C_LR
+
+ case REG_XER:
+ return C_XER
+
+ case REG_CTR:
+ return C_CTR
+ }
+
+ return C_SPR
+ }
+ if REG_A0 <= reg && reg <= REG_A7 {
+ return C_AREG
+ }
+ if reg == REG_FPSCR {
+ return C_FPSCR
+ }
+ return C_GOK
+}
+
+func (c *ctxt9) aclass(a *obj.Addr) int {
+ switch a.Type {
+ case obj.TYPE_NONE:
+ return C_NONE
+
+ case obj.TYPE_REG:
+ return c.aclassreg(a.Reg)
+
+ case obj.TYPE_MEM:
+ if a.Index != 0 {
+ if a.Name != obj.NAME_NONE || a.Offset != 0 {
+ c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class)
+
+ }
+ return C_XOREG
+ }
+ switch a.Name {
+ case obj.NAME_GOTREF, obj.NAME_TOCREF:
+ return C_ADDR
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ c.instoffset = a.Offset
+ if a.Sym == nil {
+ break
+ } else if a.Sym.Type == objabi.STLSBSS {
+ // For PIC builds, use 12 byte got initial-exec TLS accesses.
+ if c.ctxt.Flag_shared {
+ return C_TLS_IE
+ }
+ // Otherwise, use 8 byte local-exec TLS accesses.
+ return C_TLS_LE
+ } else {
+ return C_ADDR
+ }
+
+ case obj.NAME_AUTO:
+ c.instoffset = int64(c.autosize) + a.Offset
+
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SOREG
+ }
+ return C_LOREG
+
+ case obj.NAME_PARAM:
+ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SOREG
+ }
+ return C_LOREG
+
+ case obj.NAME_NONE:
+ c.instoffset = a.Offset
+ if a.Offset == 0 && a.Index == 0 {
+ return C_ZOREG
+ } else if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SOREG
+ } else {
+ return C_LOREG
+ }
+ }
+
+ return C_GOK
+
+ case obj.TYPE_TEXTSIZE:
+ return C_TEXTSIZE
+
+ case obj.TYPE_FCONST:
+ // The only cases where FCONST will occur are with float64 +/- 0.
+ // All other float constants are generated in memory.
+ f64 := a.Val.(float64)
+ if f64 == 0 {
+ if math.Signbit(f64) {
+ return C_ADDCON
+ }
+ return C_ZCON
+ }
+ log.Fatalf("Unexpected nonzero FCONST operand %v", a)
+
+ case obj.TYPE_CONST,
+ obj.TYPE_ADDR:
+ switch a.Name {
+ case obj.NAME_NONE:
+ c.instoffset = a.Offset
+ if a.Reg != 0 {
+ if -BIG <= c.instoffset && c.instoffset < BIG {
+ return C_SACON
+ }
+ if isint32(c.instoffset) {
+ return C_LACON
+ }
+ return C_DACON
+ }
+
+ case obj.NAME_EXTERN,
+ obj.NAME_STATIC:
+ s := a.Sym
+ if s == nil {
+ return C_GOK
+ }
+ c.instoffset = a.Offset
+ return C_LACON
+
+ case obj.NAME_AUTO:
+ c.instoffset = int64(c.autosize) + a.Offset
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ case obj.NAME_PARAM:
+ c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize
+ if c.instoffset >= -BIG && c.instoffset < BIG {
+ return C_SACON
+ }
+ return C_LACON
+
+ default:
+ return C_GOK
+ }
+
+ if c.instoffset >= 0 {
+ sbits := bits.Len64(uint64(c.instoffset))
+ switch {
+ case sbits <= 5:
+ return C_ZCON + sbits
+ case sbits <= 8:
+ return C_U8CON
+ case sbits <= 15:
+ return C_U15CON
+ case sbits <= 16:
+ return C_U16CON
+ case sbits <= 31:
+ // Special case, a positive int32 value which is a multiple of 2^16
+ if c.instoffset&0xFFFF == 0 {
+ return C_U3216CON
+ }
+ return C_U32CON
+ case sbits <= 32:
+ return C_U32CON
+ case sbits <= 33:
+ return C_S34CON
+ default:
+ return C_64CON
+ }
+ } else {
+ sbits := bits.Len64(uint64(^c.instoffset))
+ switch {
+ case sbits <= 15:
+ return C_S16CON
+ case sbits <= 31:
+ // Special case, a negative int32 value which is a multiple of 2^16
+ if c.instoffset&0xFFFF == 0 {
+ return C_S3216CON
+ }
+ return C_S32CON
+ case sbits <= 33:
+ return C_S34CON
+ default:
+ return C_64CON
+ }
+ }
+
+ case obj.TYPE_BRANCH:
+ if a.Sym != nil && c.ctxt.Flag_dynlink && !pfxEnabled {
+ return C_LBRAPIC
+ }
+ return C_SBRA
+ }
+
+ return C_GOK
+}
+
+func prasm(p *obj.Prog) {
+ fmt.Printf("%v\n", p)
+}
+
+func (c *ctxt9) oplook(p *obj.Prog) *Optab {
+ a1 := int(p.Optab)
+ if a1 != 0 {
+ return &optab[a1-1]
+ }
+ a1 = int(p.From.Class)
+ if a1 == 0 {
+ a1 = c.aclass(&p.From) + 1
+ p.From.Class = int8(a1)
+ }
+ a1--
+
+ argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1}
+ for i, ap := range p.RestArgs {
+ argsv[i] = int(ap.Addr.Class)
+ if argsv[i] == 0 {
+ argsv[i] = c.aclass(&ap.Addr) + 1
+ ap.Addr.Class = int8(argsv[i])
+ }
+
+ }
+ a3 := argsv[0] - 1
+ a4 := argsv[1] - 1
+ a5 := argsv[2] - 1
+
+ a6 := int(p.To.Class)
+ if a6 == 0 {
+ a6 = c.aclass(&p.To) + 1
+ p.To.Class = int8(a6)
+ }
+ a6--
+
+ a2 := C_NONE
+ if p.Reg != 0 {
+ a2 = c.aclassreg(p.Reg)
+ }
+
+ // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6)
+ ops := oprange[p.As&obj.AMask]
+ c1 := &xcmp[a1]
+ c2 := &xcmp[a2]
+ c3 := &xcmp[a3]
+ c4 := &xcmp[a4]
+ c5 := &xcmp[a5]
+ c6 := &xcmp[a6]
+ for i := range ops {
+ op := &ops[i]
+ if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] {
+ p.Optab = uint16(cap(optab) - cap(ops) + i + 1)
+ return op
+ }
+ }
+
+ c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6))
+ prasm(p)
+ if ops == nil {
+ ops = optab
+ }
+ return &ops[0]
+}
+
+// Compare two operand types (ex C_REG, or C_SCON)
+// and return true if b is compatible with a.
+//
+// Argument comparison isn't reflexitive, so care must be taken.
+// a is the argument type as found in optab, b is the argument as
+// fitted by aclass.
+func cmp(a int, b int) bool {
+ if a == b {
+ return true
+ }
+ switch a {
+
+ case C_SPR:
+ if b == C_LR || b == C_XER || b == C_CTR {
+ return true
+ }
+
+ case C_U1CON:
+ return cmp(C_ZCON, b)
+ case C_U2CON:
+ return cmp(C_U1CON, b)
+ case C_U3CON:
+ return cmp(C_U2CON, b)
+ case C_U4CON:
+ return cmp(C_U3CON, b)
+ case C_U5CON:
+ return cmp(C_U4CON, b)
+ case C_U8CON:
+ return cmp(C_U5CON, b)
+ case C_U15CON:
+ return cmp(C_U8CON, b)
+ case C_U16CON:
+ return cmp(C_U15CON, b)
+
+ case C_S16CON:
+ return cmp(C_U15CON, b)
+ case C_32CON:
+ return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b)
+ case C_S34CON:
+ return cmp(C_32CON, b)
+ case C_64CON:
+ return cmp(C_S34CON, b)
+
+ case C_32S16CON:
+ return cmp(C_ZCON, b)
+
+ case C_LACON:
+ return cmp(C_SACON, b)
+
+ case C_LBRA:
+ return cmp(C_SBRA, b)
+
+ case C_SOREG:
+ return cmp(C_ZOREG, b)
+
+ case C_LOREG:
+ return cmp(C_SOREG, b)
+
+ case C_XOREG:
+ return cmp(C_REG, b) || cmp(C_ZOREG, b)
+
+ // An even/odd register input always matches the regular register types.
+ case C_REG:
+ return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0)
+ case C_FREG:
+ return cmp(C_FREGP, b)
+ case C_VSREG:
+ /* Allow any VR argument as a VSR operand. */
+ return cmp(C_VSREGP, b) || cmp(C_VREG, b)
+
+ case C_ANY:
+ return true
+ }
+
+ return false
+}
+
+// Used when sorting the optab. Sorting is
+// done in a way so that the best choice of
+// opcode/operand combination is considered first.
+func optabLess(i, j int) bool {
+ p1 := &optab[i]
+ p2 := &optab[j]
+ n := int(p1.as) - int(p2.as)
+ // same opcode
+ if n != 0 {
+ return n < 0
+ }
+ // Consider those that generate fewer
+ // instructions first.
+ n = int(p1.size) - int(p2.size)
+ if n != 0 {
+ return n < 0
+ }
+ // operand order should match
+ // better choices first
+ n = int(p1.a1) - int(p2.a1)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a2) - int(p2.a2)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a3) - int(p2.a3)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a4) - int(p2.a4)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a5) - int(p2.a5)
+ if n != 0 {
+ return n < 0
+ }
+ n = int(p1.a6) - int(p2.a6)
+ if n != 0 {
+ return n < 0
+ }
+ return false
+}
+
+// Add an entry to the opcode table for
+// a new opcode b0 with the same operand combinations
+// as opcode a.
+func opset(a, b0 obj.As) {
+ oprange[a&obj.AMask] = oprange[b0]
+}
+
+// Determine if the build configuration requires a TOC pointer.
+// It is assumed this always called after buildop.
+func NeedTOCpointer(ctxt *obj.Link) bool {
+ return !pfxEnabled && ctxt.Flag_shared
+}
+
+// Build the opcode table
+func buildop(ctxt *obj.Link) {
+ // Limit PC-relative prefix instruction usage to supported and tested targets.
+ pfxEnabled = buildcfg.GOPPC64 >= 10 && buildcfg.GOOS == "linux"
+ cfg := fmt.Sprintf("power%d/%s/%s", buildcfg.GOPPC64, buildcfg.GOARCH, buildcfg.GOOS)
+ if cfg == buildOpCfg {
+ // Already initialized to correct OS/cpu; stop now.
+ // This happens in the cmd/asm tests,
+ // each of which re-initializes the arch.
+ return
+ }
+ buildOpCfg = cfg
+
+ // Configure the optab entries which may generate prefix opcodes.
+ prefixOptab := make([]Optab, 0, len(prefixableOptab))
+ for _, entry := range prefixableOptab {
+ entry := entry
+ if pfxEnabled && buildcfg.GOPPC64 >= entry.minGOPPC64 {
+ // Enable prefix opcode generation and resize.
+ entry.ispfx = true
+ entry.size = entry.pfxsize
+ }
+ // Use the legacy assembler function if none provided.
+ if entry.asmout == nil {
+ entry.asmout = asmout
+ }
+ prefixOptab = append(prefixOptab, entry.Optab)
+
+ }
+
+ for i := 0; i < C_NCLASS; i++ {
+ for n := 0; n < C_NCLASS; n++ {
+ if cmp(n, i) {
+ xcmp[i][n] = true
+ }
+ }
+ }
+ for i := range optab {
+ // Use the legacy assembler function if none provided.
+ if optab[i].asmout == nil {
+ optab[i].asmout = asmout
+ }
+ }
+ // Append the generated entries, sort, and fill out oprange.
+ optab = append(optab, optabGen...)
+ optab = append(optab, prefixOptab...)
+ sort.Slice(optab, optabLess)
+
+ for i := 0; i < len(optab); {
+ r := optab[i].as
+ r0 := r & obj.AMask
+ start := i
+ for i < len(optab) && optab[i].as == r {
+ i++
+ }
+ oprange[r0] = optab[start:i]
+
+ switch r {
+ default:
+ if !opsetGen(r) {
+ ctxt.Diag("unknown op in build: %v", r)
+ log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r)
+ }
+
+ case ADCBF: /* unary indexed: op (b+a); op (b) */
+ opset(ADCBI, r0)
+
+ opset(ADCBST, r0)
+ opset(ADCBT, r0)
+ opset(ADCBTST, r0)
+ opset(ADCBZ, r0)
+ opset(AICBI, r0)
+
+ case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */
+ opset(ASTWCCC, r0)
+ opset(ASTHCCC, r0)
+ opset(ASTBCCC, r0)
+
+ case AREM: /* macro */
+ opset(AREM, r0)
+
+ case AREMU:
+ opset(AREMU, r0)
+
+ case AREMD:
+ opset(AREMDU, r0)
+
+ case AMULLW:
+ opset(AMULLD, r0)
+
+ case ADIVW: /* op Rb[,Ra],Rd */
+ opset(AMULHW, r0)
+
+ opset(AMULHWCC, r0)
+ opset(AMULHWU, r0)
+ opset(AMULHWUCC, r0)
+ opset(AMULLWCC, r0)
+ opset(AMULLWVCC, r0)
+ opset(AMULLWV, r0)
+ opset(ADIVWCC, r0)
+ opset(ADIVWV, r0)
+ opset(ADIVWVCC, r0)
+ opset(ADIVWU, r0)
+ opset(ADIVWUCC, r0)
+ opset(ADIVWUV, r0)
+ opset(ADIVWUVCC, r0)
+ opset(AMODUD, r0)
+ opset(AMODUW, r0)
+ opset(AMODSD, r0)
+ opset(AMODSW, r0)
+ opset(AADDCC, r0)
+ opset(AADDCV, r0)
+ opset(AADDCVCC, r0)
+ opset(AADDV, r0)
+ opset(AADDVCC, r0)
+ opset(AADDE, r0)
+ opset(AADDECC, r0)
+ opset(AADDEV, r0)
+ opset(AADDEVCC, r0)
+ opset(AMULHD, r0)
+ opset(AMULHDCC, r0)
+ opset(AMULHDU, r0)
+ opset(AMULHDUCC, r0)
+ opset(AMULLDCC, r0)
+ opset(AMULLDVCC, r0)
+ opset(AMULLDV, r0)
+ opset(ADIVD, r0)
+ opset(ADIVDCC, r0)
+ opset(ADIVDE, r0)
+ opset(ADIVDEU, r0)
+ opset(ADIVDECC, r0)
+ opset(ADIVDEUCC, r0)
+ opset(ADIVDVCC, r0)
+ opset(ADIVDV, r0)
+ opset(ADIVDU, r0)
+ opset(ADIVDUV, r0)
+ opset(ADIVDUVCC, r0)
+ opset(ADIVDUCC, r0)
+
+ case ACRAND:
+ opset(ACRANDN, r0)
+ opset(ACREQV, r0)
+ opset(ACRNAND, r0)
+ opset(ACRNOR, r0)
+ opset(ACROR, r0)
+ opset(ACRORN, r0)
+ opset(ACRXOR, r0)
+
+ case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */
+ opset(APOPCNTW, r0)
+ opset(APOPCNTB, r0)
+ opset(ACNTTZW, r0)
+ opset(ACNTTZWCC, r0)
+ opset(ACNTTZD, r0)
+ opset(ACNTTZDCC, r0)
+
+ case ACOPY: /* copy, paste. */
+ opset(APASTECC, r0)
+
+ case AMADDHD: /* maddhd, maddhdu, maddld */
+ opset(AMADDHDU, r0)
+ opset(AMADDLD, r0)
+
+ case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */
+ opset(AMOVH, r0)
+ opset(AMOVHZ, r0)
+
+ case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */
+ opset(AMOVHU, r0)
+
+ opset(AMOVHZU, r0)
+ opset(AMOVWU, r0)
+ opset(AMOVWZU, r0)
+ opset(AMOVDU, r0)
+ opset(AMOVMW, r0)
+
+ case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */
+ opset(ALVEHX, r0)
+ opset(ALVEWX, r0)
+ opset(ALVX, r0)
+ opset(ALVXL, r0)
+ opset(ALVSL, r0)
+ opset(ALVSR, r0)
+
+ case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */
+ opset(ASTVEHX, r0)
+ opset(ASTVEWX, r0)
+ opset(ASTVX, r0)
+ opset(ASTVXL, r0)
+
+ case AVAND: /* vand, vandc, vnand */
+ opset(AVAND, r0)
+ opset(AVANDC, r0)
+ opset(AVNAND, r0)
+
+ case AVMRGOW: /* vmrgew, vmrgow */
+ opset(AVMRGEW, r0)
+
+ case AVOR: /* vor, vorc, vxor, vnor, veqv */
+ opset(AVOR, r0)
+ opset(AVORC, r0)
+ opset(AVXOR, r0)
+ opset(AVNOR, r0)
+ opset(AVEQV, r0)
+
+ case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */
+ opset(AVADDUBM, r0)
+ opset(AVADDUHM, r0)
+ opset(AVADDUWM, r0)
+ opset(AVADDUDM, r0)
+ opset(AVADDUQM, r0)
+
+ case AVADDCU: /* vaddcuq, vaddcuw */
+ opset(AVADDCUQ, r0)
+ opset(AVADDCUW, r0)
+
+ case AVADDUS: /* vaddubs, vadduhs, vadduws */
+ opset(AVADDUBS, r0)
+ opset(AVADDUHS, r0)
+ opset(AVADDUWS, r0)
+
+ case AVADDSS: /* vaddsbs, vaddshs, vaddsws */
+ opset(AVADDSBS, r0)
+ opset(AVADDSHS, r0)
+ opset(AVADDSWS, r0)
+
+ case AVADDE: /* vaddeuqm, vaddecuq */
+ opset(AVADDEUQM, r0)
+ opset(AVADDECUQ, r0)
+
+ case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */
+ opset(AVSUBUBM, r0)
+ opset(AVSUBUHM, r0)
+ opset(AVSUBUWM, r0)
+ opset(AVSUBUDM, r0)
+ opset(AVSUBUQM, r0)
+
+ case AVSUBCU: /* vsubcuq, vsubcuw */
+ opset(AVSUBCUQ, r0)
+ opset(AVSUBCUW, r0)
+
+ case AVSUBUS: /* vsububs, vsubuhs, vsubuws */
+ opset(AVSUBUBS, r0)
+ opset(AVSUBUHS, r0)
+ opset(AVSUBUWS, r0)
+
+ case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */
+ opset(AVSUBSBS, r0)
+ opset(AVSUBSHS, r0)
+ opset(AVSUBSWS, r0)
+
+ case AVSUBE: /* vsubeuqm, vsubecuq */
+ opset(AVSUBEUQM, r0)
+ opset(AVSUBECUQ, r0)
+
+ case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */
+ opset(AVMULOSB, r0)
+ opset(AVMULEUB, r0)
+ opset(AVMULOUB, r0)
+ opset(AVMULESH, r0)
+ opset(AVMULOSH, r0)
+ opset(AVMULEUH, r0)
+ opset(AVMULOUH, r0)
+ opset(AVMULESW, r0)
+ opset(AVMULOSW, r0)
+ opset(AVMULEUW, r0)
+ opset(AVMULOUW, r0)
+ opset(AVMULUWM, r0)
+ case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */
+ opset(AVPMSUMB, r0)
+ opset(AVPMSUMH, r0)
+ opset(AVPMSUMW, r0)
+ opset(AVPMSUMD, r0)
+
+ case AVR: /* vrlb, vrlh, vrlw, vrld */
+ opset(AVRLB, r0)
+ opset(AVRLH, r0)
+ opset(AVRLW, r0)
+ opset(AVRLD, r0)
+
+ case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */
+ opset(AVSLB, r0)
+ opset(AVSLH, r0)
+ opset(AVSLW, r0)
+ opset(AVSL, r0)
+ opset(AVSLO, r0)
+ opset(AVSRB, r0)
+ opset(AVSRH, r0)
+ opset(AVSRW, r0)
+ opset(AVSR, r0)
+ opset(AVSRO, r0)
+ opset(AVSLD, r0)
+ opset(AVSRD, r0)
+
+ case AVSA: /* vsrab, vsrah, vsraw, vsrad */
+ opset(AVSRAB, r0)
+ opset(AVSRAH, r0)
+ opset(AVSRAW, r0)
+ opset(AVSRAD, r0)
+
+ case AVSOI: /* vsldoi */
+ opset(AVSLDOI, r0)
+
+ case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */
+ opset(AVCLZB, r0)
+ opset(AVCLZH, r0)
+ opset(AVCLZW, r0)
+ opset(AVCLZD, r0)
+
+ case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */
+ opset(AVPOPCNTB, r0)
+ opset(AVPOPCNTH, r0)
+ opset(AVPOPCNTW, r0)
+ opset(AVPOPCNTD, r0)
+
+ case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */
+ opset(AVCMPEQUB, r0)
+ opset(AVCMPEQUBCC, r0)
+ opset(AVCMPEQUH, r0)
+ opset(AVCMPEQUHCC, r0)
+ opset(AVCMPEQUW, r0)
+ opset(AVCMPEQUWCC, r0)
+ opset(AVCMPEQUD, r0)
+ opset(AVCMPEQUDCC, r0)
+
+ case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */
+ opset(AVCMPGTUB, r0)
+ opset(AVCMPGTUBCC, r0)
+ opset(AVCMPGTUH, r0)
+ opset(AVCMPGTUHCC, r0)
+ opset(AVCMPGTUW, r0)
+ opset(AVCMPGTUWCC, r0)
+ opset(AVCMPGTUD, r0)
+ opset(AVCMPGTUDCC, r0)
+ opset(AVCMPGTSB, r0)
+ opset(AVCMPGTSBCC, r0)
+ opset(AVCMPGTSH, r0)
+ opset(AVCMPGTSHCC, r0)
+ opset(AVCMPGTSW, r0)
+ opset(AVCMPGTSWCC, r0)
+ opset(AVCMPGTSD, r0)
+ opset(AVCMPGTSDCC, r0)
+
+ case AVCMPNEZB: /* vcmpnezb[.] */
+ opset(AVCMPNEZBCC, r0)
+ opset(AVCMPNEB, r0)
+ opset(AVCMPNEBCC, r0)
+ opset(AVCMPNEH, r0)
+ opset(AVCMPNEHCC, r0)
+ opset(AVCMPNEW, r0)
+ opset(AVCMPNEWCC, r0)
+
+ case AVPERM: /* vperm */
+ opset(AVPERMXOR, r0)
+ opset(AVPERMR, r0)
+
+ case AVBPERMQ: /* vbpermq, vbpermd */
+ opset(AVBPERMD, r0)
+
+ case AVSEL: /* vsel */
+ opset(AVSEL, r0)
+
+ case AVSPLTB: /* vspltb, vsplth, vspltw */
+ opset(AVSPLTH, r0)
+ opset(AVSPLTW, r0)
+
+ case AVSPLTISB: /* vspltisb, vspltish, vspltisw */
+ opset(AVSPLTISH, r0)
+ opset(AVSPLTISW, r0)
+
+ case AVCIPH: /* vcipher, vcipherlast */
+ opset(AVCIPHER, r0)
+ opset(AVCIPHERLAST, r0)
+
+ case AVNCIPH: /* vncipher, vncipherlast */
+ opset(AVNCIPHER, r0)
+ opset(AVNCIPHERLAST, r0)
+
+ case AVSBOX: /* vsbox */
+ opset(AVSBOX, r0)
+
+ case AVSHASIGMA: /* vshasigmaw, vshasigmad */
+ opset(AVSHASIGMAW, r0)
+ opset(AVSHASIGMAD, r0)
+
+ case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */
+ opset(ALXVDSX, r0)
+ opset(ALXVW4X, r0)
+ opset(ALXVH8X, r0)
+ opset(ALXVB16X, r0)
+
+ case ALXV: /* lxv */
+ opset(ALXV, r0)
+
+ case ALXVL: /* lxvl, lxvll, lxvx */
+ opset(ALXVLL, r0)
+ opset(ALXVX, r0)
+
+ case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */
+ opset(ASTXVW4X, r0)
+ opset(ASTXVH8X, r0)
+ opset(ASTXVB16X, r0)
+
+ case ASTXV: /* stxv */
+ opset(ASTXV, r0)
+
+ case ASTXVL: /* stxvl, stxvll, stvx */
+ opset(ASTXVLL, r0)
+ opset(ASTXVX, r0)
+
+ case ALXSDX: /* lxsdx */
+ opset(ALXSDX, r0)
+
+ case ASTXSDX: /* stxsdx */
+ opset(ASTXSDX, r0)
+
+ case ALXSIWAX: /* lxsiwax, lxsiwzx */
+ opset(ALXSIWZX, r0)
+
+ case ASTXSIWX: /* stxsiwx */
+ opset(ASTXSIWX, r0)
+
+ case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */
+ opset(AMFFPRD, r0)
+ opset(AMFVRD, r0)
+ opset(AMFVSRWZ, r0)
+ opset(AMFVSRLD, r0)
+
+ case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */
+ opset(AMTFPRD, r0)
+ opset(AMTVRD, r0)
+ opset(AMTVSRWA, r0)
+ opset(AMTVSRWZ, r0)
+ opset(AMTVSRWS, r0)
+
+ case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */
+ opset(AXXLANDC, r0)
+ opset(AXXLEQV, r0)
+ opset(AXXLNAND, r0)
+
+ case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */
+ opset(AXXLORC, r0)
+ opset(AXXLNOR, r0)
+ opset(AXXLORQ, r0)
+ opset(AXXLXOR, r0)
+
+ case AXXSEL: /* xxsel */
+ opset(AXXSEL, r0)
+
+ case AXXMRGHW: /* xxmrghw, xxmrglw */
+ opset(AXXMRGLW, r0)
+
+ case AXXSPLTW: /* xxspltw */
+ opset(AXXSPLTW, r0)
+
+ case AXXSPLTIB: /* xxspltib */
+ opset(AXXSPLTIB, r0)
+
+ case AXXPERM: /* xxpermdi */
+ opset(AXXPERM, r0)
+
+ case AXXSLDWI: /* xxsldwi */
+ opset(AXXPERMDI, r0)
+ opset(AXXSLDWI, r0)
+
+ case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */
+ opset(AXXBRD, r0)
+ opset(AXXBRW, r0)
+ opset(AXXBRH, r0)
+
+ case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */
+ opset(AXSCVSPDP, r0)
+ opset(AXSCVDPSPN, r0)
+ opset(AXSCVSPDPN, r0)
+
+ case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */
+ opset(AXVCVSPDP, r0)
+
+ case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */
+ opset(AXSCVDPSXWS, r0)
+ opset(AXSCVDPUXDS, r0)
+ opset(AXSCVDPUXWS, r0)
+
+ case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */
+ opset(AXSCVUXDDP, r0)
+ opset(AXSCVSXDSP, r0)
+ opset(AXSCVUXDSP, r0)
+
+ case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */
+ opset(AXVCVDPSXDS, r0)
+ opset(AXVCVDPSXWS, r0)
+ opset(AXVCVDPUXDS, r0)
+ opset(AXVCVDPUXWS, r0)
+ opset(AXVCVSPSXDS, r0)
+ opset(AXVCVSPSXWS, r0)
+ opset(AXVCVSPUXDS, r0)
+ opset(AXVCVSPUXWS, r0)
+
+ case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */
+ opset(AXVCVSXWDP, r0)
+ opset(AXVCVUXDDP, r0)
+ opset(AXVCVUXWDP, r0)
+ opset(AXVCVSXDSP, r0)
+ opset(AXVCVSXWSP, r0)
+ opset(AXVCVUXDSP, r0)
+ opset(AXVCVUXWSP, r0)
+
+ case AAND: /* logical op Rb,Rs,Ra; no literal */
+ opset(AANDN, r0)
+ opset(AANDNCC, r0)
+ opset(AEQV, r0)
+ opset(AEQVCC, r0)
+ opset(ANAND, r0)
+ opset(ANANDCC, r0)
+ opset(ANOR, r0)
+ opset(ANORCC, r0)
+ opset(AORCC, r0)
+ opset(AORN, r0)
+ opset(AORNCC, r0)
+ opset(AXORCC, r0)
+
+ case AADDME: /* op Ra, Rd */
+ opset(AADDMECC, r0)
+
+ opset(AADDMEV, r0)
+ opset(AADDMEVCC, r0)
+ opset(AADDZE, r0)
+ opset(AADDZECC, r0)
+ opset(AADDZEV, r0)
+ opset(AADDZEVCC, r0)
+ opset(ASUBME, r0)
+ opset(ASUBMECC, r0)
+ opset(ASUBMEV, r0)
+ opset(ASUBMEVCC, r0)
+ opset(ASUBZE, r0)
+ opset(ASUBZECC, r0)
+ opset(ASUBZEV, r0)
+ opset(ASUBZEVCC, r0)
+
+ case AADDC:
+ opset(AADDCCC, r0)
+
+ case ABEQ:
+ opset(ABGE, r0)
+ opset(ABGT, r0)
+ opset(ABLE, r0)
+ opset(ABLT, r0)
+ opset(ABNE, r0)
+ opset(ABVC, r0)
+ opset(ABVS, r0)
+
+ case ABR:
+ opset(ABL, r0)
+
+ case ABC:
+ opset(ABCL, r0)
+
+ case ABDNZ:
+ opset(ABDZ, r0)
+
+ case AEXTSB: /* op Rs, Ra */
+ opset(AEXTSBCC, r0)
+
+ opset(AEXTSH, r0)
+ opset(AEXTSHCC, r0)
+ opset(ACNTLZW, r0)
+ opset(ACNTLZWCC, r0)
+ opset(ACNTLZD, r0)
+ opset(AEXTSW, r0)
+ opset(AEXTSWCC, r0)
+ opset(ACNTLZDCC, r0)
+
+ case AFABS: /* fop [s,]d */
+ opset(AFABSCC, r0)
+
+ opset(AFNABS, r0)
+ opset(AFNABSCC, r0)
+ opset(AFNEG, r0)
+ opset(AFNEGCC, r0)
+ opset(AFRSP, r0)
+ opset(AFRSPCC, r0)
+ opset(AFCTIW, r0)
+ opset(AFCTIWCC, r0)
+ opset(AFCTIWZ, r0)
+ opset(AFCTIWZCC, r0)
+ opset(AFCTID, r0)
+ opset(AFCTIDCC, r0)
+ opset(AFCTIDZ, r0)
+ opset(AFCTIDZCC, r0)
+ opset(AFCFID, r0)
+ opset(AFCFIDCC, r0)
+ opset(AFCFIDU, r0)
+ opset(AFCFIDUCC, r0)
+ opset(AFCFIDS, r0)
+ opset(AFCFIDSCC, r0)
+ opset(AFRES, r0)
+ opset(AFRESCC, r0)
+ opset(AFRIM, r0)
+ opset(AFRIMCC, r0)
+ opset(AFRIP, r0)
+ opset(AFRIPCC, r0)
+ opset(AFRIZ, r0)
+ opset(AFRIZCC, r0)
+ opset(AFRIN, r0)
+ opset(AFRINCC, r0)
+ opset(AFRSQRTE, r0)
+ opset(AFRSQRTECC, r0)
+ opset(AFSQRT, r0)
+ opset(AFSQRTCC, r0)
+ opset(AFSQRTS, r0)
+ opset(AFSQRTSCC, r0)
+
+ case AFADD:
+ opset(AFADDS, r0)
+ opset(AFADDCC, r0)
+ opset(AFADDSCC, r0)
+ opset(AFCPSGN, r0)
+ opset(AFCPSGNCC, r0)
+ opset(AFDIV, r0)
+ opset(AFDIVS, r0)
+ opset(AFDIVCC, r0)
+ opset(AFDIVSCC, r0)
+ opset(AFSUB, r0)
+ opset(AFSUBS, r0)
+ opset(AFSUBCC, r0)
+ opset(AFSUBSCC, r0)
+
+ case AFMADD:
+ opset(AFMADDCC, r0)
+ opset(AFMADDS, r0)
+ opset(AFMADDSCC, r0)
+ opset(AFMSUB, r0)
+ opset(AFMSUBCC, r0)
+ opset(AFMSUBS, r0)
+ opset(AFMSUBSCC, r0)
+ opset(AFNMADD, r0)
+ opset(AFNMADDCC, r0)
+ opset(AFNMADDS, r0)
+ opset(AFNMADDSCC, r0)
+ opset(AFNMSUB, r0)
+ opset(AFNMSUBCC, r0)
+ opset(AFNMSUBS, r0)
+ opset(AFNMSUBSCC, r0)
+ opset(AFSEL, r0)
+ opset(AFSELCC, r0)
+
+ case AFMUL:
+ opset(AFMULS, r0)
+ opset(AFMULCC, r0)
+ opset(AFMULSCC, r0)
+
+ case AFCMPO:
+ opset(AFCMPU, r0)
+
+ case AMTFSB0:
+ opset(AMTFSB0CC, r0)
+ opset(AMTFSB1, r0)
+ opset(AMTFSB1CC, r0)
+
+ case ANEG: /* op [Ra,] Rd */
+ opset(ANEGCC, r0)
+
+ opset(ANEGV, r0)
+ opset(ANEGVCC, r0)
+
+ case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */
+ opset(AXOR, r0)
+
+ case AORIS: /* oris/xoris $uimm,Rs,Ra */
+ opset(AXORIS, r0)
+
+ case ASLW:
+ opset(ASLWCC, r0)
+ opset(ASRW, r0)
+ opset(ASRWCC, r0)
+ opset(AROTLW, r0)
+
+ case ASLD:
+ opset(ASLDCC, r0)
+ opset(ASRD, r0)
+ opset(ASRDCC, r0)
+ opset(AROTL, r0)
+
+ case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
+ opset(ASRAWCC, r0)
+
+ case AEXTSWSLI:
+ opset(AEXTSWSLICC, r0)
+
+ case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */
+ opset(ASRADCC, r0)
+
+ case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */
+ opset(ASUB, r0)
+
+ opset(ASUBCC, r0)
+ opset(ASUBV, r0)
+ opset(ASUBVCC, r0)
+ opset(ASUBCCC, r0)
+ opset(ASUBCV, r0)
+ opset(ASUBCVCC, r0)
+ opset(ASUBE, r0)
+ opset(ASUBECC, r0)
+ opset(ASUBEV, r0)
+ opset(ASUBEVCC, r0)
+
+ case ASYNC:
+ opset(AISYNC, r0)
+ opset(ALWSYNC, r0)
+ opset(APTESYNC, r0)
+ opset(ATLBSYNC, r0)
+
+ case ARLWMI:
+ opset(ARLWMICC, r0)
+ opset(ARLWNM, r0)
+ opset(ARLWNMCC, r0)
+
+ case ARLDMI:
+ opset(ARLDMICC, r0)
+ opset(ARLDIMI, r0)
+ opset(ARLDIMICC, r0)
+
+ case ARLDC:
+ opset(ARLDCCC, r0)
+
+ case ARLDCL:
+ opset(ARLDCR, r0)
+ opset(ARLDCLCC, r0)
+ opset(ARLDCRCC, r0)
+
+ case ARLDICL:
+ opset(ARLDICLCC, r0)
+ opset(ARLDICR, r0)
+ opset(ARLDICRCC, r0)
+ opset(ARLDIC, r0)
+ opset(ARLDICCC, r0)
+ opset(ACLRLSLDI, r0)
+
+ case AFMOVD:
+ opset(AFMOVDCC, r0)
+ opset(AFMOVDU, r0)
+ opset(AFMOVS, r0)
+ opset(AFMOVSU, r0)
+
+ case ALDAR:
+ opset(ALBAR, r0)
+ opset(ALHAR, r0)
+ opset(ALWAR, r0)
+
+ case ASYSCALL: /* just the op; flow of control */
+ opset(ARFI, r0)
+
+ opset(ARFCI, r0)
+ opset(ARFID, r0)
+ opset(AHRFID, r0)
+
+ case AMOVHBR:
+ opset(AMOVWBR, r0)
+ opset(AMOVDBR, r0)
+
+ case ASLBMFEE:
+ opset(ASLBMFEV, r0)
+
+ case ATW:
+ opset(ATD, r0)
+
+ case ATLBIE:
+ opset(ASLBIE, r0)
+ opset(ATLBIEL, r0)
+
+ case AEIEIO:
+ opset(ASLBIA, r0)
+
+ case ACMP:
+ opset(ACMPW, r0)
+
+ case ACMPU:
+ opset(ACMPWU, r0)
+
+ case ACMPB:
+ opset(ACMPB, r0)
+
+ case AFTDIV:
+ opset(AFTDIV, r0)
+
+ case AFTSQRT:
+ opset(AFTSQRT, r0)
+
+ case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */
+ opset(AMOVWZ, r0) /* Same as above, but zero extended */
+
+ case AVCLZLSBB:
+ opset(AVCTZLSBB, r0)
+
+ case AADD,
+ AADDIS,
+ AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */
+ AANDISCC,
+ AFMOVSX,
+ AFMOVSZ,
+ ALSW,
+ AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */
+ AMOVB, /* macro: move byte with sign extension */
+ AMOVBU, /* macro: move byte with sign extension & update */
+ AMOVFL,
+ /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */
+ ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */
+ ASTSW,
+ ASLBMTE,
+ AWORD,
+ ADWORD,
+ ADARN,
+ AVMSUMUDM,
+ AADDEX,
+ ACMPEQB,
+ ACLRLSLWI,
+ AMTVSRDD,
+ APNOP,
+ AISEL,
+ ASETB,
+ obj.ANOP,
+ obj.ATEXT,
+ obj.AUNDEF,
+ obj.AFUNCDATA,
+ obj.APCALIGN,
+ obj.APCDATA,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ break
+ }
+ }
+}
+
+func OPVXX1(o uint32, xo uint32, oe uint32) uint32 {
+ return o<<26 | xo<<1 | oe<<11
+}
+
+func OPVXX2(o uint32, xo uint32, oe uint32) uint32 {
+ return o<<26 | xo<<2 | oe<<11
+}
+
+func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 {
+ return o<<26 | xo<<2 | oe<<16
+}
+
+func OPVXX3(o uint32, xo uint32, oe uint32) uint32 {
+ return o<<26 | xo<<3 | oe<<11
+}
+
+func OPVXX4(o uint32, xo uint32, oe uint32) uint32 {
+ return o<<26 | xo<<4 | oe<<11
+}
+
+func OPDQ(o uint32, xo uint32, oe uint32) uint32 {
+ return o<<26 | xo | oe<<4
+}
+
+func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
+ return o<<26 | xo | oe<<11 | rc&1
+}
+
+func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
+ return o<<26 | xo | oe<<11 | (rc&1)<<10
+}
+
+func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 {
+ return o<<26 | xo<<1 | oe<<10 | rc&1
+}
+
+func OPCC(o uint32, xo uint32, rc uint32) uint32 {
+ return OPVCC(o, xo, 0, rc)
+}
+
+/* Generate MD-form opcode */
+func OPMD(o, xo, rc uint32) uint32 {
+ return o<<26 | xo<<2 | rc&1
+}
+
+/* the order is dest, a/s, b/imm for both arithmetic and logical operations. */
+func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11
+}
+
+/* VX-form 2-register operands, r/none/r */
+func AOP_RR(op uint32, d uint32, a uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<11
+}
+
+/* VA-form 4-register operands */
+func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6
+}
+
+func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF
+}
+
+/* VX-form 2-register + UIM operands */
+func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11
+}
+
+/* VX-form 2-register + ST + SIX operands */
+func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11
+}
+
+/* VA-form 3-register + SHB operands */
+func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6
+}
+
+/* VX-form 1-register + SIM operands */
+func AOP_IR(op uint32, d uint32, simm uint32) uint32 {
+ return op | (d&31)<<21 | (simm&31)<<16
+}
+
+/* XX1-form 3-register operands, 1 VSR operand */
+func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 {
+ return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5
+}
+
+/* XX2-form 3-register operands, 2 VSR operands */
+func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 {
+ return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5
+}
+
+/* XX3-form 3 VSR operands */
+func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 {
+ return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
+}
+
+/* XX3-form 3 VSR operands + immediate */
+func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 {
+ return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
+}
+
+/* XX4-form, 4 VSR operands */
+func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 {
+ return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5
+}
+
+/* DQ-form, VSR register, register + offset operands */
+func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 {
+ /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */
+ /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */
+ /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */
+ /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */
+ /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */
+ /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */
+ dq := b >> 4
+ return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2
+}
+
+/* Z23-form, 3-register operands + CY field */
+func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9
+}
+
+/* X-form, 3-register operands + EH field */
+func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 {
+ return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1)
+}
+
+func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11
+}
+
+func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF
+}
+
+func OP_BR(op uint32, li uint32, aa uint32) uint32 {
+ return op | li&0x03FFFFFC | aa<<1
+}
+
+func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 {
+ return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1
+}
+
+func OP_BCR(op uint32, bo uint32, bi uint32) uint32 {
+ return op | (bo&0x1F)<<21 | (bi&0x1F)<<16
+}
+
+func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1
+}
+
+func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 {
+ return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5
+}
+
+func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 {
+ return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1
+}
+
+func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 {
+ return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6
+}
+
+func AOP_PFX_00_8LS(r, ie uint32) uint32 {
+ return 1<<26 | 0<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
+}
+func AOP_PFX_10_MLS(r, ie uint32) uint32 {
+ return 1<<26 | 2<<24 | 0<<23 | (r&1)<<20 | (ie & 0x3FFFF)
+}
+
+const (
+ /* each rhs is OPVCC(_, _, _, _) */
+ OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0
+ OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0
+ OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0
+ OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0
+ OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0
+ OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0
+ OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0
+ OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0
+ OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0
+ OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0
+ OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0
+ OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0
+ OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0
+ OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0
+ OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0
+ OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0
+ OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0
+ OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0
+ OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0
+ OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0
+ OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0
+ OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0
+ OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0
+ OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0
+ OP_OR = 31<<26 | 444<<1 | 0<<10 | 0
+ OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0
+ OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0
+ OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0
+ OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0
+ OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0
+ OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0
+ OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0
+ OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0
+ OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0
+ OP_EXTSWSLI = 31<<26 | 445<<2
+ OP_SETB = 31<<26 | 128<<1
+)
+
+func pfxadd(rt, ra int16, r uint32, imm32 int64) (uint32, uint32) {
+ return AOP_PFX_10_MLS(r, uint32(imm32>>16)), AOP_IRR(14<<26, uint32(rt), uint32(ra), uint32(imm32))
+}
+
+func pfxload(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
+ switch a {
+ case AMOVH:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(42<<26, uint32(reg), uint32(base), 0)
+ case AMOVW:
+ return AOP_PFX_00_8LS(r, 0), AOP_IRR(41<<26, uint32(reg), uint32(base), 0)
+ case AMOVD:
+ return AOP_PFX_00_8LS(r, 0), AOP_IRR(57<<26, uint32(reg), uint32(base), 0)
+ case AMOVBZ, AMOVB:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(34<<26, uint32(reg), uint32(base), 0)
+ case AMOVHZ:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(40<<26, uint32(reg), uint32(base), 0)
+ case AMOVWZ:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(32<<26, uint32(reg), uint32(base), 0)
+ case AFMOVS:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(48<<26, uint32(reg), uint32(base), 0)
+ case AFMOVD:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(50<<26, uint32(reg), uint32(base), 0)
+ }
+ log.Fatalf("Error no pfxload for %v\n", a)
+ return 0, 0
+}
+
+func pfxstore(a obj.As, reg int16, base int16, r uint32) (uint32, uint32) {
+ switch a {
+ case AMOVD:
+ return AOP_PFX_00_8LS(r, 0), AOP_IRR(61<<26, uint32(reg), uint32(base), 0)
+ case AMOVBZ, AMOVB:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(38<<26, uint32(reg), uint32(base), 0)
+ case AMOVHZ, AMOVH:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(44<<26, uint32(reg), uint32(base), 0)
+ case AMOVWZ, AMOVW:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(36<<26, uint32(reg), uint32(base), 0)
+ case AFMOVS:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(52<<26, uint32(reg), uint32(base), 0)
+ case AFMOVD:
+ return AOP_PFX_10_MLS(r, 0), AOP_IRR(54<<26, uint32(reg), uint32(base), 0)
+ }
+ log.Fatalf("Error no pfxstore for %v\n", a)
+ return 0, 0
+}
+
+func oclass(a *obj.Addr) int {
+ return int(a.Class) - 1
+}
+
+const (
+ D_FORM = iota
+ DS_FORM
+)
+
+// This function determines when a non-indexed load or store is D or
+// DS form for use in finding the size of the offset field in the instruction.
+// The size is needed when setting the offset value in the instruction
+// and when generating relocation for that field.
+// DS form instructions include: ld, ldu, lwa, std, stdu. All other
+// loads and stores with an offset field are D form. This function should
+// only be called with the same opcodes as are handled by opstore and opload.
+func (c *ctxt9) opform(insn uint32) int {
+ switch insn {
+ default:
+ c.ctxt.Diag("bad insn in loadform: %x", insn)
+ case OPVCC(58, 0, 0, 0), // ld
+ OPVCC(58, 0, 0, 1), // ldu
+ OPVCC(58, 0, 0, 0) | 1<<1, // lwa
+ OPVCC(62, 0, 0, 0), // std
+ OPVCC(62, 0, 0, 1): //stdu
+ return DS_FORM
+ case OP_ADDI, // add
+ OPVCC(32, 0, 0, 0), // lwz
+ OPVCC(33, 0, 0, 0), // lwzu
+ OPVCC(34, 0, 0, 0), // lbz
+ OPVCC(35, 0, 0, 0), // lbzu
+ OPVCC(40, 0, 0, 0), // lhz
+ OPVCC(41, 0, 0, 0), // lhzu
+ OPVCC(42, 0, 0, 0), // lha
+ OPVCC(43, 0, 0, 0), // lhau
+ OPVCC(46, 0, 0, 0), // lmw
+ OPVCC(48, 0, 0, 0), // lfs
+ OPVCC(49, 0, 0, 0), // lfsu
+ OPVCC(50, 0, 0, 0), // lfd
+ OPVCC(51, 0, 0, 0), // lfdu
+ OPVCC(36, 0, 0, 0), // stw
+ OPVCC(37, 0, 0, 0), // stwu
+ OPVCC(38, 0, 0, 0), // stb
+ OPVCC(39, 0, 0, 0), // stbu
+ OPVCC(44, 0, 0, 0), // sth
+ OPVCC(45, 0, 0, 0), // sthu
+ OPVCC(47, 0, 0, 0), // stmw
+ OPVCC(52, 0, 0, 0), // stfs
+ OPVCC(53, 0, 0, 0), // stfsu
+ OPVCC(54, 0, 0, 0), // stfd
+ OPVCC(55, 0, 0, 0): // stfdu
+ return D_FORM
+ }
+ return 0
+}
+
+// Encode instructions and create relocation for accessing s+d according to the
+// instruction op with source or destination (as appropriate) register reg.
+func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32, rel *obj.Reloc) {
+ if c.ctxt.Headtype == objabi.Haix {
+ // Every symbol access must be made via a TOC anchor.
+ c.ctxt.Diag("symbolAccess called for %s", s.Name)
+ }
+ var base uint32
+ form := c.opform(op)
+ if c.ctxt.Flag_shared {
+ base = REG_R2
+ } else {
+ base = REG_R0
+ }
+ // If reg can be reused when computing the symbol address,
+ // use it instead of REGTMP.
+ if !reuse {
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0)
+ o2 = AOP_IRR(op, uint32(reg), REGTMP, 0)
+ } else {
+ o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0)
+ o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0)
+ }
+ rel = obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 8
+ rel.Sym = s
+ rel.Add = d
+ if c.ctxt.Flag_shared {
+ switch form {
+ case D_FORM:
+ rel.Type = objabi.R_ADDRPOWER_TOCREL
+ case DS_FORM:
+ rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
+ }
+
+ } else {
+ switch form {
+ case D_FORM:
+ rel.Type = objabi.R_ADDRPOWER
+ case DS_FORM:
+ rel.Type = objabi.R_ADDRPOWER_DS
+ }
+ }
+ return
+}
+
+/*
+ * 32-bit masks
+ */
+func getmask(m []byte, v uint32) bool {
+ m[1] = 0
+ m[0] = m[1]
+ if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */
+ if getmask(m, ^v) {
+ i := int(m[0])
+ m[0] = m[1] + 1
+ m[1] = byte(i - 1)
+ return true
+ }
+
+ return false
+ }
+
+ for i := 0; i < 32; i++ {
+ if v&(1<<uint(31-i)) != 0 {
+ m[0] = byte(i)
+ for {
+ m[1] = byte(i)
+ i++
+ if i >= 32 || v&(1<<uint(31-i)) == 0 {
+ break
+ }
+ }
+
+ for ; i < 32; i++ {
+ if v&(1<<uint(31-i)) != 0 {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) {
+ if !getmask(m, v) {
+ c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
+ }
+}
+
+/*
+ * 64-bit masks (rldic etc)
+ */
+func getmask64(m []byte, v uint64) bool {
+ m[1] = 0
+ m[0] = m[1]
+ for i := 0; i < 64; i++ {
+ if v&(uint64(1)<<uint(63-i)) != 0 {
+ m[0] = byte(i)
+ for {
+ m[1] = byte(i)
+ i++
+ if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 {
+ break
+ }
+ }
+
+ for ; i < 64; i++ {
+ if v&(uint64(1)<<uint(63-i)) != 0 {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) {
+ if !getmask64(m, v) {
+ c.ctxt.Diag("cannot generate mask #%x\n%v", v, p)
+ }
+}
+
+func loadu32(r int, d int64) uint32 {
+ v := int32(d >> 16)
+ if isuint32(uint64(d)) {
+ return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v))
+ }
+ return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v))
+}
+
+func high16adjusted(d int32) uint16 {
+ if d&0x8000 != 0 {
+ return uint16((d >> 16) + 1)
+ }
+ return uint16(d >> 16)
+}
+
+func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) {
+ o1 := uint32(0)
+ o2 := uint32(0)
+ o3 := uint32(0)
+ o4 := uint32(0)
+ o5 := uint32(0)
+
+ //print("%v => case %d\n", p, o->type);
+ switch o.type_ {
+ default:
+ c.ctxt.Diag("unknown type %d", o.type_)
+ prasm(p)
+
+ case 0: /* pseudo ops */
+ break
+
+ case 2: /* int/cr/fp op Rb,[Ra],Rd */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+
+ case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */
+ d := c.vregoff(&p.From)
+
+ v := int32(d)
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = c.getimpliedreg(&p.From, p)
+ }
+ if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) {
+ c.ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ a := OP_ADDI
+ if o.a1 == C_UCON {
+ if d&0xffff != 0 {
+ log.Fatalf("invalid handling of %v", p)
+ }
+ // For UCON operands the value is right shifted 16, using ADDIS if the
+ // value should be signed, ORIS if unsigned.
+ v >>= 16
+ if r == REGZERO && isuint32(uint64(d)) {
+ o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v))
+ break
+ }
+
+ a = OP_ADDIS
+ } else if int64(int16(d)) != d {
+ // Operand is 16 bit value with sign bit set
+ if o.a1 == C_ANDCON {
+ // Needs unsigned 16 bit so use ORI
+ if r == 0 || r == REGZERO {
+ o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v))
+ break
+ }
+ // With ADDCON, needs signed 16 bit value, fall through to use ADDI
+ } else if o.a1 != C_ADDCON {
+ log.Fatalf("invalid handling of %v", p)
+ }
+ }
+
+ o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 4: /* add/mul $scon,[r1],r2 */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 {
+ c.ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 5: /* syscall */
+ o1 = c.oprrr(p.As)
+
+ case 6: /* logical op Rb,[Rs,]Ra; no literal */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM.
+ switch p.As {
+ case AROTL:
+ o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0))
+ case AROTLW:
+ o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31)
+ default:
+ if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 {
+ // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred
+ // hardware no-op. This happens because $0 matches C_REG before C_ZCON.
+ o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0)
+ } else {
+ o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ }
+ }
+
+ case 7: /* mov r, soreg ==> stw o(r) */
+ r := int(p.To.Reg)
+
+ if r == 0 {
+ r = c.getimpliedreg(&p.To, p)
+ }
+ v := c.regoff(&p.To)
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ // Offsets in DS form stores must be a multiple of 4
+ inst := c.opstore(p.As)
+ if c.opform(inst) == DS_FORM && v&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+ o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v))
+
+ case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */
+ r := int(p.From.Reg)
+
+ if r == 0 {
+ r = c.getimpliedreg(&p.From, p)
+ }
+ v := c.regoff(&p.From)
+ if int32(int16(v)) != v {
+ log.Fatalf("mishandled instruction %v", p)
+ }
+ // Offsets in DS form loads must be a multiple of 4
+ inst := c.opload(p.As)
+ if c.opform(inst) == DS_FORM && v&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+ o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v))
+
+ // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
+ o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r))
+
+ case 11: /* br/bl lbra */
+ v := int32(0)
+
+ if p.To.Target() != nil {
+ v = int32(p.To.Target().Pc - p.Pc)
+ if v&03 != 0 {
+ c.ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ if v < -(1<<25) || v >= 1<<24 {
+ c.ctxt.Diag("branch too far\n%v", p)
+ }
+ }
+
+ o1 = OP_BR(c.opirr(p.As), uint32(v), 0)
+ if p.To.Sym != nil {
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 4
+ rel.Sym = p.To.Sym
+ v += int32(p.To.Offset)
+ if v&03 != 0 {
+ c.ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ rel.Add = int64(v)
+ rel.Type = objabi.R_CALLPOWER
+ }
+ o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking
+
+ case 13: /* mov[bhwd]{z,} r,r */
+ // This needs to handle "MOV* $0, Rx". This shows up because $0 also
+ // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON
+ // TODO: fix the above behavior and cleanup this exception.
+ if p.From.Type == obj.TYPE_CONST {
+ o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0)
+ break
+ }
+ if p.To.Type == obj.TYPE_CONST {
+ c.ctxt.Diag("cannot move into constant 0\n%v", p)
+ }
+
+ switch p.As {
+ case AMOVB:
+ o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ case AMOVBZ:
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31)
+ case AMOVH:
+ o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ case AMOVHZ:
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31)
+ case AMOVW:
+ o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0)
+ case AMOVWZ:
+ o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */
+ case AMOVD:
+ o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg))
+ default:
+ c.ctxt.Diag("internal: bad register move/truncation\n%v", p)
+ }
+
+ case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ d := c.vregoff(p.GetFrom3())
+ var a int
+ switch p.As {
+
+ // These opcodes expect a mask operand that has to be converted into the
+ // appropriate operand. The way these were defined, not all valid masks are possible.
+ // Left here for compatibility in case they were used or generated.
+ case ARLDCL, ARLDCLCC:
+ var mask [2]uint8
+ c.maskgen64(p, mask[:], uint64(d))
+
+ a = int(mask[0]) /* MB */
+ if mask[1] != 63 {
+ c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p)
+ }
+ o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 |= (uint32(a) & 31) << 6
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case ARLDCR, ARLDCRCC:
+ var mask [2]uint8
+ c.maskgen64(p, mask[:], uint64(d))
+
+ a = int(mask[1]) /* ME */
+ if mask[0] != 0 {
+ c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p)
+ }
+ o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg))
+ o1 |= (uint32(a) & 31) << 6
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ // These opcodes use a shift count like the ppc64 asm, no mask conversion done
+ case ARLDICR, ARLDICRCC:
+ me := int(d)
+ sh := c.regoff(&p.From)
+ if me < 0 || me > 63 || sh > 63 {
+ c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p)
+ }
+ o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me))
+
+ case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC:
+ mb := int(d)
+ sh := c.regoff(&p.From)
+ if mb < 0 || mb > 63 || sh > 63 {
+ c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p)
+ }
+ o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb))
+
+ case ACLRLSLDI:
+ // This is an extended mnemonic defined in the ISA section C.8.1
+ // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n
+ // It maps onto RLDIC so is directly generated here based on the operands from
+ // the clrlsldi.
+ n := int32(d)
+ b := c.regoff(&p.From)
+ if n > b || b > 63 {
+ c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p)
+ }
+ o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n))
+
+ default:
+ c.ctxt.Diag("unexpected op in rldc case\n%v", p)
+ a = 0
+ }
+
+ case 17, /* bc bo,bi,lbra (same for now) */
+ 16: /* bc bo,bi,sbra */
+ a := 0
+
+ r := int(p.Reg)
+
+ if p.From.Type == obj.TYPE_CONST {
+ a = int(c.regoff(&p.From))
+ } else if p.From.Type == obj.TYPE_REG {
+ if r != 0 {
+ c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r)
+ }
+ // BI values for the CR
+ switch p.From.Reg {
+ case REG_CR0:
+ r = BI_CR0
+ case REG_CR1:
+ r = BI_CR1
+ case REG_CR2:
+ r = BI_CR2
+ case REG_CR3:
+ r = BI_CR3
+ case REG_CR4:
+ r = BI_CR4
+ case REG_CR5:
+ r = BI_CR5
+ case REG_CR6:
+ r = BI_CR6
+ case REG_CR7:
+ r = BI_CR7
+ default:
+ c.ctxt.Diag("unrecognized register: expecting CR\n")
+ }
+ }
+ v := int32(0)
+ if p.To.Target() != nil {
+ v = int32(p.To.Target().Pc - p.Pc)
+ }
+ if v&03 != 0 {
+ c.ctxt.Diag("odd branch target address\n%v", p)
+ v &^= 03
+ }
+
+ if v < -(1<<16) || v >= 1<<15 {
+ c.ctxt.Diag("branch too far\n%v", p)
+ }
+ o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0)
+
+ case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */
+ var v int32
+ var bh uint32 = 0
+ if p.As == ABC || p.As == ABCL {
+ v = c.regoff(&p.From) & 31
+ } else {
+ v = 20 /* unconditional */
+ }
+ r := int(p.Reg)
+ if r == 0 {
+ r = 0
+ }
+ switch oclass(&p.To) {
+ case C_CTR:
+ o1 = OPVCC(19, 528, 0, 0)
+
+ case C_LR:
+ o1 = OPVCC(19, 16, 0, 0)
+
+ default:
+ c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p)
+ v = 0
+ }
+
+ // Insert optional branch hint for bclr[l]/bcctr[l]
+ if p.From3Type() != obj.TYPE_NONE {
+ bh = uint32(p.GetFrom3().Offset)
+ if bh == 2 || bh > 3 {
+ log.Fatalf("BH must be 0,1,3 for %v", p)
+ }
+ o1 |= bh << 11
+ }
+
+ if p.As == ABL || p.As == ABCL {
+ o1 |= 1
+ }
+ o1 = OP_BCR(o1, uint32(v), uint32(r))
+
+ case 19: /* mov $lcon,r ==> cau+or */
+ d := c.vregoff(&p.From)
+ if o.ispfx {
+ o1, o2 = pfxadd(p.To.Reg, REG_R0, PFX_R_ABS, d)
+ } else {
+ o1 = loadu32(int(p.To.Reg), d)
+ o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d)))
+ }
+
+ case 20: /* add $ucon,,r | addis $addcon,r,r */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) {
+ c.ctxt.Diag("literal operation on R0\n%v", p)
+ }
+ if p.As == AADDIS {
+ o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
+ } else {
+ o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
+ }
+
+ case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */
+ if p.To.Reg == REGTMP || p.Reg == REGTMP {
+ c.ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ d := c.vregoff(&p.From)
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ if p.From.Sym != nil {
+ c.ctxt.Diag("%v is not supported", p)
+ }
+ // If operand is ANDCON, generate 2 instructions using
+ // ORI for unsigned value; with LCON 3 instructions.
+ if o.size == 8 {
+ o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d)))
+ o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
+ } else {
+ o1 = loadu32(REGTMP, d)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+ o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
+ }
+
+ if o.ispfx {
+ o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, d)
+ }
+
+ case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */
+ if p.To.Reg == REGTMP || p.Reg == REGTMP {
+ c.ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ d := c.vregoff(&p.From)
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ // With ADDCON operand, generate 2 instructions using ADDI for signed value,
+ // with LCON operand generate 3 instructions.
+ if o.size == 8 {
+ o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d)))
+ o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
+ } else {
+ o1 = loadu32(REGTMP, d)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d)))
+ o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r))
+ }
+ if p.From.Sym != nil {
+ c.ctxt.Diag("%v is not supported", p)
+ }
+
+ case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */
+ o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0))
+ // This is needed for -0.
+ if o.size == 8 {
+ o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg))
+ }
+
+ case 25:
+ /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */
+ v := c.regoff(&p.From)
+
+ if v < 0 {
+ v = 0
+ } else if v > 63 {
+ v = 63
+ }
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ var a int
+ op := uint32(0)
+ switch p.As {
+ case ASLD, ASLDCC:
+ a = int(63 - v)
+ op = OP_RLDICR
+
+ case ASRD, ASRDCC:
+ a = int(v)
+ v = 64 - v
+ op = OP_RLDICL
+ case AROTL:
+ a = int(0)
+ op = OP_RLDICL
+ case AEXTSWSLI, AEXTSWSLICC:
+ a = int(v)
+ default:
+ c.ctxt.Diag("unexpected op in sldi case\n%v", p)
+ a = 0
+ o1 = 0
+ }
+
+ if p.As == AEXTSWSLI || p.As == AEXTSWSLICC {
+ o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v))
+
+ } else {
+ o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a))
+ }
+ if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC {
+ o1 |= 1 // Set the condition code bit
+ }
+
+ case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */
+ v := c.vregoff(&p.From)
+ r := int(p.From.Reg)
+ var rel *obj.Reloc
+
+ switch p.From.Name {
+ case obj.NAME_EXTERN, obj.NAME_STATIC:
+ // Load a 32 bit constant, or relocation depending on if a symbol is attached
+ o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true)
+ default:
+ if r == 0 {
+ r = c.getimpliedreg(&p.From, p)
+ }
+ // Add a 32 bit offset to a register.
+ o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v))))
+ o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
+ }
+
+ if o.ispfx {
+ if rel == nil {
+ o1, o2 = pfxadd(int16(p.To.Reg), int16(r), PFX_R_ABS, v)
+ } else {
+ o1, o2 = pfxadd(int16(p.To.Reg), REG_R0, PFX_R_PCREL, 0)
+ rel.Type = objabi.R_ADDRPOWER_PCREL34
+ }
+ }
+
+ case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */
+ v := c.regoff(p.GetFrom3())
+
+ r := int(p.From.Reg)
+ o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */
+ if p.To.Reg == REGTMP || p.From.Reg == REGTMP {
+ c.ctxt.Diag("can't synthesize large constant\n%v", p)
+ }
+ v := c.regoff(p.GetFrom3())
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16)
+ o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v))
+ o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP)
+ if p.From.Sym != nil {
+ c.ctxt.Diag("%v is not supported", p)
+ }
+
+ case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */
+ v := c.regoff(&p.From)
+
+ d := c.vregoff(p.GetFrom3())
+ var mask [2]uint8
+ c.maskgen64(p, mask[:], uint64(d))
+ var a int
+ switch p.As {
+ case ARLDC, ARLDCCC:
+ a = int(mask[0]) /* MB */
+ if int32(mask[1]) != (63 - v) {
+ c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
+ }
+
+ case ARLDCL, ARLDCLCC:
+ a = int(mask[0]) /* MB */
+ if mask[1] != 63 {
+ c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p)
+ }
+
+ case ARLDCR, ARLDCRCC:
+ a = int(mask[1]) /* ME */
+ if mask[0] != 0 {
+ c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p)
+ }
+
+ default:
+ c.ctxt.Diag("unexpected op in rldic case\n%v", p)
+ a = 0
+ }
+
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(a) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if a&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ case 30: /* rldimi $sh,s,$mask,a */
+ v := c.regoff(&p.From)
+
+ d := c.vregoff(p.GetFrom3())
+
+ // Original opcodes had mask operands which had to be converted to a shift count as expected by
+ // the ppc64 asm.
+ switch p.As {
+ case ARLDMI, ARLDMICC:
+ var mask [2]uint8
+ c.maskgen64(p, mask[:], uint64(d))
+ if int32(mask[1]) != (63 - v) {
+ c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p)
+ }
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(mask[0]) & 31) << 6
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ if mask[0]&0x20 != 0 {
+ o1 |= 1 << 5 /* mb[5] is top bit */
+ }
+
+ // Opcodes with shift count operands.
+ case ARLDIMI, ARLDIMICC:
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F))
+ o1 |= (uint32(d) & 31) << 6
+ if d&0x20 != 0 {
+ o1 |= 1 << 5
+ }
+ if v&0x20 != 0 {
+ o1 |= 1 << 1
+ }
+ }
+
+ case 31: /* dword */
+ d := c.vregoff(&p.From)
+
+ if c.ctxt.Arch.ByteOrder == binary.BigEndian {
+ o1 = uint32(d >> 32)
+ o2 = uint32(d)
+ } else {
+ o1 = uint32(d)
+ o2 = uint32(d >> 32)
+ }
+
+ if p.From.Sym != nil {
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ rel.Add = p.From.Offset
+ rel.Type = objabi.R_ADDR
+ o2 = 0
+ o1 = o2
+ }
+
+ case 32: /* fmul frc,fra,frd */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6
+
+ case 33: /* fabs [frb,]frd; fmr. frb,frd */
+ r := int(p.From.Reg)
+
+ if oclass(&p.From) == C_NONE {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r))
+
+ case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6
+
+ case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */
+ v := c.regoff(&p.To)
+
+ r := int(p.To.Reg)
+ if r == 0 {
+ r = c.getimpliedreg(&p.To, p)
+ }
+ // Offsets in DS form stores must be a multiple of 4
+ if o.ispfx {
+ o1, o2 = pfxstore(p.As, p.From.Reg, int16(r), PFX_R_ABS)
+ o1 |= uint32((v >> 16) & 0x3FFFF)
+ o2 |= uint32(v & 0xFFFF)
+ } else {
+ inst := c.opstore(p.As)
+ if c.opform(inst) == DS_FORM && v&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+ o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v))
+ }
+
+ case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */
+ v := c.regoff(&p.From)
+
+ r := int(p.From.Reg)
+ if r == 0 {
+ r = c.getimpliedreg(&p.From, p)
+ }
+
+ if o.ispfx {
+ o1, o2 = pfxload(p.As, p.To.Reg, int16(r), PFX_R_ABS)
+ o1 |= uint32((v >> 16) & 0x3FFFF)
+ o2 |= uint32(v & 0xFFFF)
+ } else {
+ if o.a6 == C_REG {
+ // Reuse the base register when loading a GPR (C_REG) to avoid
+ // using REGTMP (R31) when possible.
+ o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v))
+ } else {
+ o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v)))
+ o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v))
+ }
+ }
+
+ // Sign extend MOVB if needed
+ o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 40: /* word */
+ o1 = uint32(c.regoff(&p.From))
+
+ case 41: /* stswi */
+ if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 {
+ c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
+ }
+
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
+
+ case 42: /* lswi */
+ if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 {
+ c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As)
+ }
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11
+
+ case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */
+ /* TH field for dcbt/dcbtst: */
+ /* 0 = Block access - program will soon access EA. */
+ /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */
+ /* 16 = Block access - program will soon make a transient access to EA. */
+ /* 17 = Block access - program will not access EA for a long time. */
+
+ /* L field for dcbf: */
+ /* 0 = invalidates the block containing EA in all processors. */
+ /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */
+ /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */
+ if p.To.Type == obj.TYPE_NONE {
+ o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg))
+ } else {
+ th := c.regoff(&p.To)
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg))
+ }
+
+ case 44: /* indexed store */
+ o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
+
+ case 45: /* indexed load */
+ switch p.As {
+ /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */
+ /* The EH field can be used as a lock acquire/release hint as follows: */
+ /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */
+ /* 1 = Exclusive Access (lock acquire and release) */
+ case ALBAR, ALHAR, ALWAR, ALDAR:
+ if p.From3Type() != obj.TYPE_NONE {
+ eh := int(c.regoff(p.GetFrom3()))
+ if eh > 1 {
+ c.ctxt.Diag("illegal EH field\n%v", p)
+ }
+ o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh))
+ } else {
+ o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
+ }
+ default:
+ o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
+ }
+ case 46: /* plain op */
+ o1 = c.oprrr(p.As)
+
+ case 47: /* op Ra, Rd; also op [Ra,] Rd */
+ r := int(p.From.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
+
+ case 48: /* op Rs, Ra */
+ r := int(p.From.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0)
+
+ case 49: /* op Rb; op $n, Rb */
+ if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */
+ v := c.regoff(&p.From) & 1
+ o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21
+ } else {
+ o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg))
+ }
+
+ case 50: /* rem[u] r1[,r2],r3 */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ v := c.oprrr(p.As)
+ t := v & (1<<10 | 1) /* OE|Rc */
+ o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
+ o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg))
+ o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
+ if p.As == AREMU {
+ o4 = o3
+
+ /* Clear top 32 bits */
+ o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5
+ }
+
+ case 51: /* remd[u] r1[,r2],r3 */
+ r := int(p.Reg)
+
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ v := c.oprrr(p.As)
+ t := v & (1<<10 | 1) /* OE|Rc */
+ o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg))
+ o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg))
+ o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r))
+ /* cases 50,51: removed; can be reused. */
+
+ /* cases 50,51: removed; can be reused. */
+
+ case 52: /* mtfsbNx cr(n) */
+ v := c.regoff(&p.From) & 31
+
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0)
+
+ case 53: /* mffsX ,fr1 */
+ o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0)
+
+ case 55: /* op Rb, Rd */
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg))
+
+ case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31)
+ if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) {
+ o1 |= 1 << 1 /* mb[5] */
+ }
+
+ case 57: /* slw $sh,[s,]a -> rlwinm ... */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+
+ /*
+ * Let user (gs) shoot himself in the foot.
+ * qc has already complained.
+ *
+ if(v < 0 || v > 31)
+ ctxt->diag("illegal shift %ld\n%v", v, p);
+ */
+ if v < 0 {
+ v = 0
+ } else if v > 32 {
+ v = 32
+ }
+ var mask [2]uint8
+ switch p.As {
+ case AROTLW:
+ mask[0], mask[1] = 0, 31
+ case ASRW, ASRWCC:
+ mask[0], mask[1] = uint8(v), 31
+ v = 32 - v
+ default:
+ mask[0], mask[1] = 0, uint8(31-v)
+ }
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1]))
+ if p.As == ASLWCC || p.As == ASRWCC {
+ o1 |= 1 // set the condition code
+ }
+
+ case 58: /* logical $andcon,[s],a */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
+
+ case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */
+ v := c.regoff(&p.From)
+
+ r := int(p.Reg)
+ if r == 0 {
+ r = int(p.To.Reg)
+ }
+ switch p.As {
+ case AOR:
+ o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */
+ case AXOR:
+ o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
+ case AANDCC:
+ o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16)
+ default:
+ o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v))
+ }
+
+ case 60: /* tw to,a,b */
+ r := int(c.regoff(&p.From) & 31)
+
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg))
+
+ case 61: /* tw to,a,$simm */
+ r := int(c.regoff(&p.From) & 31)
+
+ v := c.regoff(&p.To)
+ o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v))
+
+ case 62: /* rlwmi $sh,s,$mask,a */
+ v := c.regoff(&p.From)
+ switch p.As {
+ case ACLRLSLWI:
+ n := c.regoff(p.GetFrom3())
+ // This is an extended mnemonic described in the ISA C.8.2
+ // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n
+ // It maps onto rlwinm which is directly generated here.
+ if n > v || v >= 32 {
+ c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p)
+ }
+
+ o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n))
+ default:
+ var mask [2]uint8
+ c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v))
+ o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+ }
+
+ case 63: /* rlwmi b,s,$mask,a */
+ var mask [2]uint8
+ c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3())))
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg))
+ o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1
+
+ case 64: /* mtfsf fr[, $m] {,fpcsr} */
+ var v int32
+ if p.From3Type() != obj.TYPE_NONE {
+ v = c.regoff(p.GetFrom3()) & 255
+ } else {
+ v = 255
+ }
+ o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11
+
+ case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */
+ if p.To.Reg == 0 {
+ c.ctxt.Diag("must specify FPSCR(n)\n%v", p)
+ }
+ o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12
+
+ case 66: /* mov spr,r1; mov r1,spr */
+ var r int
+ var v int32
+ if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 {
+ r = int(p.From.Reg)
+ v = int32(p.To.Reg)
+ o1 = OPVCC(31, 467, 0, 0) /* mtspr */
+ } else {
+ r = int(p.To.Reg)
+ v = int32(p.From.Reg)
+ o1 = OPVCC(31, 339, 0, 0) /* mfspr */
+ }
+
+ o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
+
+ case 67: /* mcrf crfD,crfS */
+ if p.From.Reg == REG_CR || p.To.Reg == REG_CR {
+ c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p)
+ }
+ o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0)
+
+ case 68: /* mfcr rD; mfocrf CRM,rD */
+ o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */
+ if p.From.Reg != REG_CR {
+ v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */
+ o1 |= 1<<20 | v<<12 /* new form, mfocrf */
+ }
+
+ case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */
+ var v uint32
+ if p.To.Reg == REG_CR {
+ v = 0xff
+ } else if p.To.Offset != 0 { // MOVFL gpr, constant
+ v = uint32(p.To.Offset)
+ } else { // p.To.Reg == REG_CRx
+ v = 1 << uint(7-(p.To.Reg&7))
+ }
+ // Use mtocrf form if only one CR field moved.
+ if bits.OnesCount32(v) == 1 {
+ v |= 1 << 8
+ }
+
+ o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12
+
+ case 70: /* [f]cmp r,r,cr*/
+ var r int
+ if p.Reg == 0 {
+ r = 0
+ } else {
+ r = (int(p.Reg) & 7) << 2
+ }
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg))
+
+ case 71: /* cmp[l] r,i,cr*/
+ var r int
+ if p.Reg == 0 {
+ r = 0
+ } else {
+ r = (int(p.Reg) & 7) << 2
+ }
+ o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff
+
+ case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg))
+
+ case 73: /* mcrfs crfD,crfS */
+ if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg {
+ c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p)
+ }
+ o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0)
+
+ case 77: /* syscall $scon, syscall Rx */
+ if p.From.Type == obj.TYPE_CONST {
+ if p.From.Offset > BIG || p.From.Offset < -BIG {
+ c.ctxt.Diag("illegal syscall, sysnum too large: %v", p)
+ }
+ o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset))
+ } else if p.From.Type == obj.TYPE_REG {
+ o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg))
+ } else {
+ c.ctxt.Diag("illegal syscall: %v", p)
+ o1 = 0x7fe00008 // trap always
+ }
+
+ o2 = c.oprrr(p.As)
+ o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0
+
+ case 78: /* undef */
+ o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed
+ always to be an illegal instruction." */
+
+ /* relocation operations */
+ case 74:
+ var rel *obj.Reloc
+ v := c.vregoff(&p.To)
+ // Offsets in DS form stores must be a multiple of 4
+ inst := c.opstore(p.As)
+
+ // Can't reuse base for store instructions.
+ o1, o2, rel = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false)
+
+ // Rewrite as a prefixed store if supported.
+ if o.ispfx {
+ o1, o2 = pfxstore(p.As, p.From.Reg, REG_R0, PFX_R_PCREL)
+ rel.Type = objabi.R_ADDRPOWER_PCREL34
+ } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+
+ case 75: // 32 bit offset symbol loads (got/toc/addr)
+ var rel *obj.Reloc
+ v := p.From.Offset
+
+ // Offsets in DS form loads must be a multiple of 4
+ inst := c.opload(p.As)
+ switch p.From.Name {
+ case obj.NAME_GOTREF, obj.NAME_TOCREF:
+ if v != 0 {
+ c.ctxt.Diag("invalid offset for GOT/TOC access %v", p)
+ }
+ o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
+ o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+ rel = obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ switch p.From.Name {
+ case obj.NAME_GOTREF:
+ rel.Type = objabi.R_ADDRPOWER_GOT
+ case obj.NAME_TOCREF:
+ rel.Type = objabi.R_ADDRPOWER_TOCREL_DS
+ }
+ default:
+ reuseBaseReg := o.a6 == C_REG
+ // Reuse To.Reg as base register if it is a GPR.
+ o1, o2, rel = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg)
+ }
+
+ // Convert to prefixed forms if supported.
+ if o.ispfx {
+ switch rel.Type {
+ case objabi.R_ADDRPOWER, objabi.R_ADDRPOWER_DS,
+ objabi.R_ADDRPOWER_TOCREL, objabi.R_ADDRPOWER_TOCREL_DS:
+ o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
+ rel.Type = objabi.R_ADDRPOWER_PCREL34
+ case objabi.R_POWER_TLS_IE:
+ o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
+ rel.Type = objabi.R_POWER_TLS_IE_PCREL34
+ case objabi.R_ADDRPOWER_GOT:
+ o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
+ rel.Type = objabi.R_ADDRPOWER_GOT_PCREL34
+ default:
+ // We've failed to convert a TOC-relative relocation to a PC-relative one.
+ log.Fatalf("Unable convert TOC-relative relocation %v to PC-relative", rel.Type)
+ }
+ } else if c.opform(inst) == DS_FORM && v&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+
+ o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 79:
+ if p.From.Offset != 0 {
+ c.ctxt.Diag("invalid offset against tls var %v", p)
+ }
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ if !o.ispfx {
+ o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0)
+ o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+ rel.Type = objabi.R_POWER_TLS_LE
+ } else {
+ o1, o2 = pfxadd(p.To.Reg, REG_R13, PFX_R_ABS, 0)
+ rel.Type = objabi.R_POWER_TLS_LE_TPREL34
+ }
+
+ case 80:
+ if p.From.Offset != 0 {
+ c.ctxt.Diag("invalid offset against tls var %v", p)
+ }
+ rel := obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc)
+ rel.Siz = 8
+ rel.Sym = p.From.Sym
+ rel.Type = objabi.R_POWER_TLS_IE
+ if !o.ispfx {
+ o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0)
+ o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0)
+ } else {
+ o1, o2 = pfxload(p.As, p.To.Reg, REG_R0, PFX_R_PCREL)
+ rel.Type = objabi.R_POWER_TLS_IE_PCREL34
+ }
+ o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13)
+ rel = obj.Addrel(c.cursym)
+ rel.Off = int32(c.pc) + 8
+ rel.Siz = 4
+ rel.Sym = p.From.Sym
+ rel.Type = objabi.R_POWER_TLS
+
+ case 82: /* vector instructions, VX-form and VC-form */
+ if p.From.Type == obj.TYPE_REG {
+ /* reg reg none OR reg reg reg */
+ /* 3-register operand order: VRA, VRB, VRT */
+ /* 2-register operand order: VRA, VRT */
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
+ } else if p.From3Type() == obj.TYPE_CONST {
+ /* imm imm reg reg */
+ /* operand order: SIX, VRA, ST, VRT */
+ six := int(c.regoff(&p.From))
+ st := int(c.regoff(p.GetFrom3()))
+ o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six))
+ } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 {
+ /* imm reg reg */
+ /* operand order: UIM, VRB, VRT */
+ uim := int(c.regoff(&p.From))
+ o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim))
+ } else {
+ /* imm reg */
+ /* operand order: SIM, VRT */
+ sim := int(c.regoff(&p.From))
+ o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim))
+ }
+
+ case 83: /* vector instructions, VA-form */
+ if p.From.Type == obj.TYPE_REG {
+ /* reg reg reg reg */
+ /* 4-register operand order: VRA, VRB, VRC, VRT */
+ o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
+ } else if p.From.Type == obj.TYPE_CONST {
+ /* imm reg reg reg */
+ /* operand order: SHB, VRA, VRB, VRT */
+ shb := int(c.regoff(&p.From))
+ o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb))
+ }
+
+ case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc
+ bc := c.vregoff(&p.From)
+ if o.a1 == C_CRBIT {
+ // CR bit is encoded as a register, not a constant.
+ bc = int64(p.From.Reg)
+ }
+
+ // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg
+ o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc))
+
+ case 85: /* vector instructions, VX-form */
+ /* reg none reg */
+ /* 2-register operand order: VRB, VRT */
+ o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg))
+
+ case 86: /* VSX indexed store, XX1-form */
+ /* reg reg reg */
+ /* 3-register operand order: XT, (RB)(RA*1) */
+ o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg))
+
+ case 87: /* VSX indexed load, XX1-form */
+ /* reg reg reg */
+ /* 3-register operand order: (RB)(RA*1), XT */
+ o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg))
+
+ case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */
+ o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
+
+ case 89: /* VSX instructions, XX2-form */
+ /* reg none reg OR reg imm reg */
+ /* 2-register operand order: XB, XT or XB, UIM, XT*/
+ uim := int(c.regoff(p.GetFrom3()))
+ o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg))
+
+ case 90: /* VSX instructions, XX3-form */
+ if p.From3Type() == obj.TYPE_NONE {
+ /* reg reg reg */
+ /* 3-register operand order: XA, XB, XT */
+ o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
+ } else if p.From3Type() == obj.TYPE_CONST {
+ /* reg reg reg imm */
+ /* operand order: XA, XB, DM, XT */
+ dm := int(c.regoff(p.GetFrom3()))
+ o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm))
+ }
+
+ case 91: /* VSX instructions, XX4-form */
+ /* reg reg reg reg */
+ /* 3-register operand order: XA, XB, XC, XT */
+ o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg))
+
+ case 92: /* X-form instructions, 3-operands */
+ if p.To.Type == obj.TYPE_CONST {
+ /* imm reg reg */
+ xf := int32(p.From.Reg)
+ if REG_F0 <= xf && xf <= REG_F31 {
+ /* operand order: FRA, FRB, BF */
+ bf := int(c.regoff(&p.To)) << 2
+ o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
+ } else {
+ /* operand order: RA, RB, L */
+ l := int(c.regoff(&p.To))
+ o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg))
+ }
+ } else if p.From3Type() == obj.TYPE_CONST {
+ /* reg reg imm */
+ /* operand order: RB, L, RA */
+ l := int(c.regoff(p.GetFrom3()))
+ o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg))
+ } else if p.To.Type == obj.TYPE_REG {
+ cr := int32(p.To.Reg)
+ if REG_CR0 <= cr && cr <= REG_CR7 {
+ /* cr reg reg */
+ /* operand order: RA, RB, BF */
+ bf := (int(p.To.Reg) & 7) << 2
+ o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg))
+ } else if p.From.Type == obj.TYPE_CONST {
+ /* reg imm */
+ /* operand order: L, RT */
+ l := int(c.regoff(&p.From))
+ o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg))
+ } else {
+ switch p.As {
+ case ACOPY, APASTECC:
+ o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg))
+ default:
+ /* reg reg reg */
+ /* operand order: RS, RB, RA */
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
+ }
+ }
+ }
+
+ case 93: /* X-form instructions, 2-operands */
+ if p.To.Type == obj.TYPE_CONST {
+ /* imm reg */
+ /* operand order: FRB, BF */
+ bf := int(c.regoff(&p.To)) << 2
+ o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg))
+ } else if p.Reg == 0 {
+ /* popcnt* r,r, X-form */
+ /* operand order: RS, RA */
+ o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg))
+ }
+
+ case 94: /* Z23-form instructions, 4-operands */
+ /* reg reg reg imm */
+ /* operand order: RA, RB, CY, RT */
+ cy := int(c.regoff(p.GetFrom3()))
+ o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy))
+
+ case 96: /* VSX load, DQ-form */
+ /* reg imm reg */
+ /* operand order: (RA)(DQ), XT */
+ dq := int16(c.regoff(&p.From))
+ if (dq & 15) != 0 {
+ c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
+ }
+ o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq))
+
+ case 97: /* VSX store, DQ-form */
+ /* reg imm reg */
+ /* operand order: XT, (RA)(DQ) */
+ dq := int16(c.regoff(&p.To))
+ if (dq & 15) != 0 {
+ c.ctxt.Diag("invalid offset for DQ form load/store %v", dq)
+ }
+ o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq))
+ case 98: /* VSX indexed load or load with length (also left-justified), x-form */
+ /* vsreg, reg, reg */
+ o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
+ case 99: /* VSX store with length (also left-justified) x-form */
+ /* reg, reg, vsreg */
+ o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg))
+ case 100: /* VSX X-form XXSPLTIB */
+ if p.From.Type == obj.TYPE_CONST {
+ /* imm reg */
+ uim := int(c.regoff(&p.From))
+ /* imm reg */
+ /* Use AOP_XX1 form with 0 for one of the registers. */
+ o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim))
+ } else {
+ c.ctxt.Diag("invalid ops for %v", p.As)
+ }
+ case 101:
+ o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg))
+
+ case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/
+ mb := uint32(c.regoff(&p.RestArgs[0].Addr))
+ me := uint32(c.regoff(&p.RestArgs[1].Addr))
+ sh := uint32(c.regoff(&p.From))
+ o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me)
+
+ case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/
+ mb := uint32(c.regoff(&p.RestArgs[0].Addr))
+ me := uint32(c.regoff(&p.RestArgs[1].Addr))
+ o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me)
+
+ case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */
+ o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg))
+
+ case 106: /* MOVD spr, soreg */
+ v := int32(p.From.Reg)
+ o1 = OPVCC(31, 339, 0, 0) /* mfspr */
+ o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
+ so := c.regoff(&p.To)
+ o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so))
+ if so&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+ if p.To.Reg == REGTMP {
+ log.Fatalf("SPR move to memory will clobber R31 %v", p)
+ }
+
+ case 107: /* MOVD soreg, spr */
+ v := int32(p.From.Reg)
+ so := c.regoff(&p.From)
+ o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so))
+ o2 = OPVCC(31, 467, 0, 0) /* mtspr */
+ v = int32(p.To.Reg)
+ o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11
+ if so&0x3 != 0 {
+ log.Fatalf("invalid offset for DS form load/store %v", p)
+ }
+
+ case 108: /* mov r, xoreg ==> stwx rx,ry */
+ r := int(p.To.Reg)
+ o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r))
+
+ case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */
+ r := int(p.From.Reg)
+
+ o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r))
+ // Sign extend MOVB operations. This is ignored for other cases (o.size == 4).
+ o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0)
+
+ case 110: /* SETB creg, rt */
+ bfa := uint32(p.From.Reg) << 2
+ rt := uint32(p.To.Reg)
+ o1 = LOP_RRR(OP_SETB, bfa, rt, 0)
+ }
+
+ out[0] = o1
+ out[1] = o2
+ out[2] = o3
+ out[3] = o4
+ out[4] = o5
+}
+
+func (c *ctxt9) vregoff(a *obj.Addr) int64 {
+ c.instoffset = 0
+ if a != nil {
+ c.aclass(a)
+ }
+ return c.instoffset
+}
+
+func (c *ctxt9) regoff(a *obj.Addr) int32 {
+ return int32(c.vregoff(a))
+}
+
+func (c *ctxt9) oprrr(a obj.As) uint32 {
+ switch a {
+ case AADD:
+ return OPVCC(31, 266, 0, 0)
+ case AADDCC:
+ return OPVCC(31, 266, 0, 1)
+ case AADDV:
+ return OPVCC(31, 266, 1, 0)
+ case AADDVCC:
+ return OPVCC(31, 266, 1, 1)
+ case AADDC:
+ return OPVCC(31, 10, 0, 0)
+ case AADDCCC:
+ return OPVCC(31, 10, 0, 1)
+ case AADDCV:
+ return OPVCC(31, 10, 1, 0)
+ case AADDCVCC:
+ return OPVCC(31, 10, 1, 1)
+ case AADDE:
+ return OPVCC(31, 138, 0, 0)
+ case AADDECC:
+ return OPVCC(31, 138, 0, 1)
+ case AADDEV:
+ return OPVCC(31, 138, 1, 0)
+ case AADDEVCC:
+ return OPVCC(31, 138, 1, 1)
+ case AADDME:
+ return OPVCC(31, 234, 0, 0)
+ case AADDMECC:
+ return OPVCC(31, 234, 0, 1)
+ case AADDMEV:
+ return OPVCC(31, 234, 1, 0)
+ case AADDMEVCC:
+ return OPVCC(31, 234, 1, 1)
+ case AADDZE:
+ return OPVCC(31, 202, 0, 0)
+ case AADDZECC:
+ return OPVCC(31, 202, 0, 1)
+ case AADDZEV:
+ return OPVCC(31, 202, 1, 0)
+ case AADDZEVCC:
+ return OPVCC(31, 202, 1, 1)
+ case AADDEX:
+ return OPVCC(31, 170, 0, 0) /* addex - v3.0b */
+
+ case AAND:
+ return OPVCC(31, 28, 0, 0)
+ case AANDCC:
+ return OPVCC(31, 28, 0, 1)
+ case AANDN:
+ return OPVCC(31, 60, 0, 0)
+ case AANDNCC:
+ return OPVCC(31, 60, 0, 1)
+
+ case ACMP:
+ return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */
+ case ACMPU:
+ return OPVCC(31, 32, 0, 0) | 1<<21
+ case ACMPW:
+ return OPVCC(31, 0, 0, 0) /* L=0 */
+ case ACMPWU:
+ return OPVCC(31, 32, 0, 0)
+ case ACMPB:
+ return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */
+ case ACMPEQB:
+ return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
+
+ case ACNTLZW:
+ return OPVCC(31, 26, 0, 0)
+ case ACNTLZWCC:
+ return OPVCC(31, 26, 0, 1)
+ case ACNTLZD:
+ return OPVCC(31, 58, 0, 0)
+ case ACNTLZDCC:
+ return OPVCC(31, 58, 0, 1)
+
+ case ACRAND:
+ return OPVCC(19, 257, 0, 0)
+ case ACRANDN:
+ return OPVCC(19, 129, 0, 0)
+ case ACREQV:
+ return OPVCC(19, 289, 0, 0)
+ case ACRNAND:
+ return OPVCC(19, 225, 0, 0)
+ case ACRNOR:
+ return OPVCC(19, 33, 0, 0)
+ case ACROR:
+ return OPVCC(19, 449, 0, 0)
+ case ACRORN:
+ return OPVCC(19, 417, 0, 0)
+ case ACRXOR:
+ return OPVCC(19, 193, 0, 0)
+
+ case ADCBF:
+ return OPVCC(31, 86, 0, 0)
+ case ADCBI:
+ return OPVCC(31, 470, 0, 0)
+ case ADCBST:
+ return OPVCC(31, 54, 0, 0)
+ case ADCBT:
+ return OPVCC(31, 278, 0, 0)
+ case ADCBTST:
+ return OPVCC(31, 246, 0, 0)
+ case ADCBZ:
+ return OPVCC(31, 1014, 0, 0)
+
+ case AMODUD:
+ return OPVCC(31, 265, 0, 0) /* modud - v3.0 */
+ case AMODUW:
+ return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */
+ case AMODSD:
+ return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */
+ case AMODSW:
+ return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */
+
+ case ADIVW, AREM:
+ return OPVCC(31, 491, 0, 0)
+
+ case ADIVWCC:
+ return OPVCC(31, 491, 0, 1)
+
+ case ADIVWV:
+ return OPVCC(31, 491, 1, 0)
+
+ case ADIVWVCC:
+ return OPVCC(31, 491, 1, 1)
+
+ case ADIVWU, AREMU:
+ return OPVCC(31, 459, 0, 0)
+
+ case ADIVWUCC:
+ return OPVCC(31, 459, 0, 1)
+
+ case ADIVWUV:
+ return OPVCC(31, 459, 1, 0)
+
+ case ADIVWUVCC:
+ return OPVCC(31, 459, 1, 1)
+
+ case ADIVD, AREMD:
+ return OPVCC(31, 489, 0, 0)
+
+ case ADIVDCC:
+ return OPVCC(31, 489, 0, 1)
+
+ case ADIVDE:
+ return OPVCC(31, 425, 0, 0)
+
+ case ADIVDECC:
+ return OPVCC(31, 425, 0, 1)
+
+ case ADIVDEU:
+ return OPVCC(31, 393, 0, 0)
+
+ case ADIVDEUCC:
+ return OPVCC(31, 393, 0, 1)
+
+ case ADIVDV:
+ return OPVCC(31, 489, 1, 0)
+
+ case ADIVDVCC:
+ return OPVCC(31, 489, 1, 1)
+
+ case ADIVDU, AREMDU:
+ return OPVCC(31, 457, 0, 0)
+
+ case ADIVDUCC:
+ return OPVCC(31, 457, 0, 1)
+
+ case ADIVDUV:
+ return OPVCC(31, 457, 1, 0)
+
+ case ADIVDUVCC:
+ return OPVCC(31, 457, 1, 1)
+
+ case AEIEIO:
+ return OPVCC(31, 854, 0, 0)
+
+ case AEQV:
+ return OPVCC(31, 284, 0, 0)
+ case AEQVCC:
+ return OPVCC(31, 284, 0, 1)
+
+ case AEXTSB:
+ return OPVCC(31, 954, 0, 0)
+ case AEXTSBCC:
+ return OPVCC(31, 954, 0, 1)
+ case AEXTSH:
+ return OPVCC(31, 922, 0, 0)
+ case AEXTSHCC:
+ return OPVCC(31, 922, 0, 1)
+ case AEXTSW:
+ return OPVCC(31, 986, 0, 0)
+ case AEXTSWCC:
+ return OPVCC(31, 986, 0, 1)
+
+ case AFABS:
+ return OPVCC(63, 264, 0, 0)
+ case AFABSCC:
+ return OPVCC(63, 264, 0, 1)
+ case AFADD:
+ return OPVCC(63, 21, 0, 0)
+ case AFADDCC:
+ return OPVCC(63, 21, 0, 1)
+ case AFADDS:
+ return OPVCC(59, 21, 0, 0)
+ case AFADDSCC:
+ return OPVCC(59, 21, 0, 1)
+ case AFCMPO:
+ return OPVCC(63, 32, 0, 0)
+ case AFCMPU:
+ return OPVCC(63, 0, 0, 0)
+ case AFCFID:
+ return OPVCC(63, 846, 0, 0)
+ case AFCFIDCC:
+ return OPVCC(63, 846, 0, 1)
+ case AFCFIDU:
+ return OPVCC(63, 974, 0, 0)
+ case AFCFIDUCC:
+ return OPVCC(63, 974, 0, 1)
+ case AFCFIDS:
+ return OPVCC(59, 846, 0, 0)
+ case AFCFIDSCC:
+ return OPVCC(59, 846, 0, 1)
+ case AFCTIW:
+ return OPVCC(63, 14, 0, 0)
+ case AFCTIWCC:
+ return OPVCC(63, 14, 0, 1)
+ case AFCTIWZ:
+ return OPVCC(63, 15, 0, 0)
+ case AFCTIWZCC:
+ return OPVCC(63, 15, 0, 1)
+ case AFCTID:
+ return OPVCC(63, 814, 0, 0)
+ case AFCTIDCC:
+ return OPVCC(63, 814, 0, 1)
+ case AFCTIDZ:
+ return OPVCC(63, 815, 0, 0)
+ case AFCTIDZCC:
+ return OPVCC(63, 815, 0, 1)
+ case AFDIV:
+ return OPVCC(63, 18, 0, 0)
+ case AFDIVCC:
+ return OPVCC(63, 18, 0, 1)
+ case AFDIVS:
+ return OPVCC(59, 18, 0, 0)
+ case AFDIVSCC:
+ return OPVCC(59, 18, 0, 1)
+ case AFMADD:
+ return OPVCC(63, 29, 0, 0)
+ case AFMADDCC:
+ return OPVCC(63, 29, 0, 1)
+ case AFMADDS:
+ return OPVCC(59, 29, 0, 0)
+ case AFMADDSCC:
+ return OPVCC(59, 29, 0, 1)
+
+ case AFMOVS, AFMOVD:
+ return OPVCC(63, 72, 0, 0) /* load */
+ case AFMOVDCC:
+ return OPVCC(63, 72, 0, 1)
+ case AFMSUB:
+ return OPVCC(63, 28, 0, 0)
+ case AFMSUBCC:
+ return OPVCC(63, 28, 0, 1)
+ case AFMSUBS:
+ return OPVCC(59, 28, 0, 0)
+ case AFMSUBSCC:
+ return OPVCC(59, 28, 0, 1)
+ case AFMUL:
+ return OPVCC(63, 25, 0, 0)
+ case AFMULCC:
+ return OPVCC(63, 25, 0, 1)
+ case AFMULS:
+ return OPVCC(59, 25, 0, 0)
+ case AFMULSCC:
+ return OPVCC(59, 25, 0, 1)
+ case AFNABS:
+ return OPVCC(63, 136, 0, 0)
+ case AFNABSCC:
+ return OPVCC(63, 136, 0, 1)
+ case AFNEG:
+ return OPVCC(63, 40, 0, 0)
+ case AFNEGCC:
+ return OPVCC(63, 40, 0, 1)
+ case AFNMADD:
+ return OPVCC(63, 31, 0, 0)
+ case AFNMADDCC:
+ return OPVCC(63, 31, 0, 1)
+ case AFNMADDS:
+ return OPVCC(59, 31, 0, 0)
+ case AFNMADDSCC:
+ return OPVCC(59, 31, 0, 1)
+ case AFNMSUB:
+ return OPVCC(63, 30, 0, 0)
+ case AFNMSUBCC:
+ return OPVCC(63, 30, 0, 1)
+ case AFNMSUBS:
+ return OPVCC(59, 30, 0, 0)
+ case AFNMSUBSCC:
+ return OPVCC(59, 30, 0, 1)
+ case AFCPSGN:
+ return OPVCC(63, 8, 0, 0)
+ case AFCPSGNCC:
+ return OPVCC(63, 8, 0, 1)
+ case AFRES:
+ return OPVCC(59, 24, 0, 0)
+ case AFRESCC:
+ return OPVCC(59, 24, 0, 1)
+ case AFRIM:
+ return OPVCC(63, 488, 0, 0)
+ case AFRIMCC:
+ return OPVCC(63, 488, 0, 1)
+ case AFRIP:
+ return OPVCC(63, 456, 0, 0)
+ case AFRIPCC:
+ return OPVCC(63, 456, 0, 1)
+ case AFRIZ:
+ return OPVCC(63, 424, 0, 0)
+ case AFRIZCC:
+ return OPVCC(63, 424, 0, 1)
+ case AFRIN:
+ return OPVCC(63, 392, 0, 0)
+ case AFRINCC:
+ return OPVCC(63, 392, 0, 1)
+ case AFRSP:
+ return OPVCC(63, 12, 0, 0)
+ case AFRSPCC:
+ return OPVCC(63, 12, 0, 1)
+ case AFRSQRTE:
+ return OPVCC(63, 26, 0, 0)
+ case AFRSQRTECC:
+ return OPVCC(63, 26, 0, 1)
+ case AFSEL:
+ return OPVCC(63, 23, 0, 0)
+ case AFSELCC:
+ return OPVCC(63, 23, 0, 1)
+ case AFSQRT:
+ return OPVCC(63, 22, 0, 0)
+ case AFSQRTCC:
+ return OPVCC(63, 22, 0, 1)
+ case AFSQRTS:
+ return OPVCC(59, 22, 0, 0)
+ case AFSQRTSCC:
+ return OPVCC(59, 22, 0, 1)
+ case AFSUB:
+ return OPVCC(63, 20, 0, 0)
+ case AFSUBCC:
+ return OPVCC(63, 20, 0, 1)
+ case AFSUBS:
+ return OPVCC(59, 20, 0, 0)
+ case AFSUBSCC:
+ return OPVCC(59, 20, 0, 1)
+
+ case AICBI:
+ return OPVCC(31, 982, 0, 0)
+ case AISYNC:
+ return OPVCC(19, 150, 0, 0)
+
+ case AMTFSB0:
+ return OPVCC(63, 70, 0, 0)
+ case AMTFSB0CC:
+ return OPVCC(63, 70, 0, 1)
+ case AMTFSB1:
+ return OPVCC(63, 38, 0, 0)
+ case AMTFSB1CC:
+ return OPVCC(63, 38, 0, 1)
+
+ case AMULHW:
+ return OPVCC(31, 75, 0, 0)
+ case AMULHWCC:
+ return OPVCC(31, 75, 0, 1)
+ case AMULHWU:
+ return OPVCC(31, 11, 0, 0)
+ case AMULHWUCC:
+ return OPVCC(31, 11, 0, 1)
+ case AMULLW:
+ return OPVCC(31, 235, 0, 0)
+ case AMULLWCC:
+ return OPVCC(31, 235, 0, 1)
+ case AMULLWV:
+ return OPVCC(31, 235, 1, 0)
+ case AMULLWVCC:
+ return OPVCC(31, 235, 1, 1)
+
+ case AMULHD:
+ return OPVCC(31, 73, 0, 0)
+ case AMULHDCC:
+ return OPVCC(31, 73, 0, 1)
+ case AMULHDU:
+ return OPVCC(31, 9, 0, 0)
+ case AMULHDUCC:
+ return OPVCC(31, 9, 0, 1)
+ case AMULLD:
+ return OPVCC(31, 233, 0, 0)
+ case AMULLDCC:
+ return OPVCC(31, 233, 0, 1)
+ case AMULLDV:
+ return OPVCC(31, 233, 1, 0)
+ case AMULLDVCC:
+ return OPVCC(31, 233, 1, 1)
+
+ case ANAND:
+ return OPVCC(31, 476, 0, 0)
+ case ANANDCC:
+ return OPVCC(31, 476, 0, 1)
+ case ANEG:
+ return OPVCC(31, 104, 0, 0)
+ case ANEGCC:
+ return OPVCC(31, 104, 0, 1)
+ case ANEGV:
+ return OPVCC(31, 104, 1, 0)
+ case ANEGVCC:
+ return OPVCC(31, 104, 1, 1)
+ case ANOR:
+ return OPVCC(31, 124, 0, 0)
+ case ANORCC:
+ return OPVCC(31, 124, 0, 1)
+ case AOR:
+ return OPVCC(31, 444, 0, 0)
+ case AORCC:
+ return OPVCC(31, 444, 0, 1)
+ case AORN:
+ return OPVCC(31, 412, 0, 0)
+ case AORNCC:
+ return OPVCC(31, 412, 0, 1)
+
+ case APOPCNTD:
+ return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */
+ case APOPCNTW:
+ return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */
+ case APOPCNTB:
+ return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */
+ case ACNTTZW:
+ return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */
+ case ACNTTZWCC:
+ return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */
+ case ACNTTZD:
+ return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */
+ case ACNTTZDCC:
+ return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */
+
+ case ARFI:
+ return OPVCC(19, 50, 0, 0)
+ case ARFCI:
+ return OPVCC(19, 51, 0, 0)
+ case ARFID:
+ return OPVCC(19, 18, 0, 0)
+ case AHRFID:
+ return OPVCC(19, 274, 0, 0)
+
+ case ARLWMI:
+ return OPVCC(20, 0, 0, 0)
+ case ARLWMICC:
+ return OPVCC(20, 0, 0, 1)
+ case ARLWNM:
+ return OPVCC(23, 0, 0, 0)
+ case ARLWNMCC:
+ return OPVCC(23, 0, 0, 1)
+
+ case ARLDCL:
+ return OPVCC(30, 8, 0, 0)
+ case ARLDCLCC:
+ return OPVCC(30, 0, 0, 1)
+
+ case ARLDCR:
+ return OPVCC(30, 9, 0, 0)
+ case ARLDCRCC:
+ return OPVCC(30, 9, 0, 1)
+
+ case ARLDICL:
+ return OPVCC(30, 0, 0, 0)
+ case ARLDICLCC:
+ return OPVCC(30, 0, 0, 1)
+ case ARLDICR:
+ return OPMD(30, 1, 0) // rldicr
+ case ARLDICRCC:
+ return OPMD(30, 1, 1) // rldicr.
+
+ case ARLDIC:
+ return OPMD(30, 2, 0) // rldic
+ case ARLDICCC:
+ return OPMD(30, 2, 1) // rldic.
+
+ case ASYSCALL:
+ return OPVCC(17, 1, 0, 0)
+
+ case ASLW:
+ return OPVCC(31, 24, 0, 0)
+ case ASLWCC:
+ return OPVCC(31, 24, 0, 1)
+ case ASLD:
+ return OPVCC(31, 27, 0, 0)
+ case ASLDCC:
+ return OPVCC(31, 27, 0, 1)
+
+ case ASRAW:
+ return OPVCC(31, 792, 0, 0)
+ case ASRAWCC:
+ return OPVCC(31, 792, 0, 1)
+ case ASRAD:
+ return OPVCC(31, 794, 0, 0)
+ case ASRADCC:
+ return OPVCC(31, 794, 0, 1)
+
+ case AEXTSWSLI:
+ return OPVCC(31, 445, 0, 0)
+ case AEXTSWSLICC:
+ return OPVCC(31, 445, 0, 1)
+
+ case ASRW:
+ return OPVCC(31, 536, 0, 0)
+ case ASRWCC:
+ return OPVCC(31, 536, 0, 1)
+ case ASRD:
+ return OPVCC(31, 539, 0, 0)
+ case ASRDCC:
+ return OPVCC(31, 539, 0, 1)
+
+ case ASUB:
+ return OPVCC(31, 40, 0, 0)
+ case ASUBCC:
+ return OPVCC(31, 40, 0, 1)
+ case ASUBV:
+ return OPVCC(31, 40, 1, 0)
+ case ASUBVCC:
+ return OPVCC(31, 40, 1, 1)
+ case ASUBC:
+ return OPVCC(31, 8, 0, 0)
+ case ASUBCCC:
+ return OPVCC(31, 8, 0, 1)
+ case ASUBCV:
+ return OPVCC(31, 8, 1, 0)
+ case ASUBCVCC:
+ return OPVCC(31, 8, 1, 1)
+ case ASUBE:
+ return OPVCC(31, 136, 0, 0)
+ case ASUBECC:
+ return OPVCC(31, 136, 0, 1)
+ case ASUBEV:
+ return OPVCC(31, 136, 1, 0)
+ case ASUBEVCC:
+ return OPVCC(31, 136, 1, 1)
+ case ASUBME:
+ return OPVCC(31, 232, 0, 0)
+ case ASUBMECC:
+ return OPVCC(31, 232, 0, 1)
+ case ASUBMEV:
+ return OPVCC(31, 232, 1, 0)
+ case ASUBMEVCC:
+ return OPVCC(31, 232, 1, 1)
+ case ASUBZE:
+ return OPVCC(31, 200, 0, 0)
+ case ASUBZECC:
+ return OPVCC(31, 200, 0, 1)
+ case ASUBZEV:
+ return OPVCC(31, 200, 1, 0)
+ case ASUBZEVCC:
+ return OPVCC(31, 200, 1, 1)
+
+ case ASYNC:
+ return OPVCC(31, 598, 0, 0)
+ case ALWSYNC:
+ return OPVCC(31, 598, 0, 0) | 1<<21
+
+ case APTESYNC:
+ return OPVCC(31, 598, 0, 0) | 2<<21
+
+ case ATLBIE:
+ return OPVCC(31, 306, 0, 0)
+ case ATLBIEL:
+ return OPVCC(31, 274, 0, 0)
+ case ATLBSYNC:
+ return OPVCC(31, 566, 0, 0)
+ case ASLBIA:
+ return OPVCC(31, 498, 0, 0)
+ case ASLBIE:
+ return OPVCC(31, 434, 0, 0)
+ case ASLBMFEE:
+ return OPVCC(31, 915, 0, 0)
+ case ASLBMFEV:
+ return OPVCC(31, 851, 0, 0)
+ case ASLBMTE:
+ return OPVCC(31, 402, 0, 0)
+
+ case ATW:
+ return OPVCC(31, 4, 0, 0)
+ case ATD:
+ return OPVCC(31, 68, 0, 0)
+
+ /* Vector (VMX/Altivec) instructions */
+ /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
+ /* are enabled starting at POWER6 (ISA 2.05). */
+ case AVAND:
+ return OPVX(4, 1028, 0, 0) /* vand - v2.03 */
+ case AVANDC:
+ return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */
+ case AVNAND:
+ return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */
+
+ case AVOR:
+ return OPVX(4, 1156, 0, 0) /* vor - v2.03 */
+ case AVORC:
+ return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */
+ case AVNOR:
+ return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */
+ case AVXOR:
+ return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */
+ case AVEQV:
+ return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */
+
+ case AVADDUBM:
+ return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */
+ case AVADDUHM:
+ return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */
+ case AVADDUWM:
+ return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */
+ case AVADDUDM:
+ return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */
+ case AVADDUQM:
+ return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */
+
+ case AVADDCUQ:
+ return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */
+ case AVADDCUW:
+ return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */
+
+ case AVADDUBS:
+ return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */
+ case AVADDUHS:
+ return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */
+ case AVADDUWS:
+ return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */
+
+ case AVADDSBS:
+ return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */
+ case AVADDSHS:
+ return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */
+ case AVADDSWS:
+ return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */
+
+ case AVADDEUQM:
+ return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */
+ case AVADDECUQ:
+ return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */
+
+ case AVMULESB:
+ return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */
+ case AVMULOSB:
+ return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */
+ case AVMULEUB:
+ return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */
+ case AVMULOUB:
+ return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */
+ case AVMULESH:
+ return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */
+ case AVMULOSH:
+ return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */
+ case AVMULEUH:
+ return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */
+ case AVMULOUH:
+ return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */
+ case AVMULESW:
+ return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */
+ case AVMULOSW:
+ return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */
+ case AVMULEUW:
+ return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */
+ case AVMULOUW:
+ return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */
+ case AVMULUWM:
+ return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */
+
+ case AVPMSUMB:
+ return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */
+ case AVPMSUMH:
+ return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */
+ case AVPMSUMW:
+ return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */
+ case AVPMSUMD:
+ return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */
+
+ case AVMSUMUDM:
+ return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */
+
+ case AVSUBUBM:
+ return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */
+ case AVSUBUHM:
+ return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */
+ case AVSUBUWM:
+ return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */
+ case AVSUBUDM:
+ return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */
+ case AVSUBUQM:
+ return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */
+
+ case AVSUBCUQ:
+ return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */
+ case AVSUBCUW:
+ return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */
+
+ case AVSUBUBS:
+ return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */
+ case AVSUBUHS:
+ return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */
+ case AVSUBUWS:
+ return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */
+
+ case AVSUBSBS:
+ return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */
+ case AVSUBSHS:
+ return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */
+ case AVSUBSWS:
+ return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */
+
+ case AVSUBEUQM:
+ return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */
+ case AVSUBECUQ:
+ return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */
+
+ case AVRLB:
+ return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */
+ case AVRLH:
+ return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */
+ case AVRLW:
+ return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */
+ case AVRLD:
+ return OPVX(4, 196, 0, 0) /* vrld - v2.07 */
+
+ case AVMRGOW:
+ return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */
+ case AVMRGEW:
+ return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */
+
+ case AVSLB:
+ return OPVX(4, 260, 0, 0) /* vslh - v2.03 */
+ case AVSLH:
+ return OPVX(4, 324, 0, 0) /* vslh - v2.03 */
+ case AVSLW:
+ return OPVX(4, 388, 0, 0) /* vslw - v2.03 */
+ case AVSL:
+ return OPVX(4, 452, 0, 0) /* vsl - v2.03 */
+ case AVSLO:
+ return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */
+ case AVSRB:
+ return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */
+ case AVSRH:
+ return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */
+ case AVSRW:
+ return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */
+ case AVSR:
+ return OPVX(4, 708, 0, 0) /* vsr - v2.03 */
+ case AVSRO:
+ return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */
+ case AVSLD:
+ return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */
+ case AVSRD:
+ return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */
+
+ case AVSRAB:
+ return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */
+ case AVSRAH:
+ return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */
+ case AVSRAW:
+ return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */
+ case AVSRAD:
+ return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */
+
+ case AVBPERMQ:
+ return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */
+ case AVBPERMD:
+ return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */
+
+ case AVCLZB:
+ return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */
+ case AVCLZH:
+ return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */
+ case AVCLZW:
+ return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */
+ case AVCLZD:
+ return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */
+
+ case AVCLZLSBB:
+ return OPVX(4, 1538, 0, 0) /* vclzlsbb - v3.0 */
+ case AVCTZLSBB:
+ return OPVX(4, 1538, 0, 0) | 1<<16 /* vctzlsbb - v3.0 */
+
+ case AVPOPCNTB:
+ return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */
+ case AVPOPCNTH:
+ return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */
+ case AVPOPCNTW:
+ return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */
+ case AVPOPCNTD:
+ return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */
+
+ case AVCMPEQUB:
+ return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */
+ case AVCMPEQUBCC:
+ return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */
+ case AVCMPEQUH:
+ return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */
+ case AVCMPEQUHCC:
+ return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */
+ case AVCMPEQUW:
+ return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */
+ case AVCMPEQUWCC:
+ return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */
+ case AVCMPEQUD:
+ return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */
+ case AVCMPEQUDCC:
+ return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */
+
+ case AVCMPGTUB:
+ return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */
+ case AVCMPGTUBCC:
+ return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */
+ case AVCMPGTUH:
+ return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */
+ case AVCMPGTUHCC:
+ return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */
+ case AVCMPGTUW:
+ return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */
+ case AVCMPGTUWCC:
+ return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */
+ case AVCMPGTUD:
+ return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */
+ case AVCMPGTUDCC:
+ return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */
+ case AVCMPGTSB:
+ return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */
+ case AVCMPGTSBCC:
+ return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */
+ case AVCMPGTSH:
+ return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */
+ case AVCMPGTSHCC:
+ return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */
+ case AVCMPGTSW:
+ return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */
+ case AVCMPGTSWCC:
+ return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */
+ case AVCMPGTSD:
+ return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */
+ case AVCMPGTSDCC:
+ return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */
+
+ case AVCMPNEZB:
+ return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */
+ case AVCMPNEZBCC:
+ return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */
+ case AVCMPNEB:
+ return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */
+ case AVCMPNEBCC:
+ return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */
+ case AVCMPNEH:
+ return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */
+ case AVCMPNEHCC:
+ return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */
+ case AVCMPNEW:
+ return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */
+ case AVCMPNEWCC:
+ return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */
+
+ case AVPERM:
+ return OPVX(4, 43, 0, 0) /* vperm - v2.03 */
+ case AVPERMXOR:
+ return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */
+ case AVPERMR:
+ return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */
+
+ case AVSEL:
+ return OPVX(4, 42, 0, 0) /* vsel - v2.03 */
+
+ case AVCIPHER:
+ return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */
+ case AVCIPHERLAST:
+ return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */
+ case AVNCIPHER:
+ return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */
+ case AVNCIPHERLAST:
+ return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */
+ case AVSBOX:
+ return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */
+ /* End of vector instructions */
+
+ /* Vector scalar (VSX) instructions */
+ /* ISA 2.06 enables these for POWER7. */
+ case AMFVSRD, AMFVRD, AMFFPRD:
+ return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */
+ case AMFVSRWZ:
+ return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */
+ case AMFVSRLD:
+ return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */
+
+ case AMTVSRD, AMTFPRD, AMTVRD:
+ return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */
+ case AMTVSRWA:
+ return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */
+ case AMTVSRWZ:
+ return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */
+ case AMTVSRDD:
+ return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */
+ case AMTVSRWS:
+ return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */
+
+ case AXXLAND:
+ return OPVXX3(60, 130, 0) /* xxland - v2.06 */
+ case AXXLANDC:
+ return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */
+ case AXXLEQV:
+ return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */
+ case AXXLNAND:
+ return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */
+
+ case AXXLORC:
+ return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */
+ case AXXLNOR:
+ return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */
+ case AXXLOR, AXXLORQ:
+ return OPVXX3(60, 146, 0) /* xxlor - v2.06 */
+ case AXXLXOR:
+ return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */
+
+ case AXXSEL:
+ return OPVXX4(60, 3, 0) /* xxsel - v2.06 */
+
+ case AXXMRGHW:
+ return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */
+ case AXXMRGLW:
+ return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */
+
+ case AXXSPLTW:
+ return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */
+
+ case AXXSPLTIB:
+ return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */
+
+ case AXXPERM:
+ return OPVXX3(60, 26, 0) /* xxperm - v2.06 */
+ case AXXPERMDI:
+ return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */
+
+ case AXXSLDWI:
+ return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */
+
+ case AXXBRQ:
+ return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */
+ case AXXBRD:
+ return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */
+ case AXXBRW:
+ return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */
+ case AXXBRH:
+ return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */
+
+ case AXSCVDPSP:
+ return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */
+ case AXSCVSPDP:
+ return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */
+ case AXSCVDPSPN:
+ return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */
+ case AXSCVSPDPN:
+ return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */
+
+ case AXVCVDPSP:
+ return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */
+ case AXVCVSPDP:
+ return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */
+
+ case AXSCVDPSXDS:
+ return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */
+ case AXSCVDPSXWS:
+ return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */
+ case AXSCVDPUXDS:
+ return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */
+ case AXSCVDPUXWS:
+ return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */
+
+ case AXSCVSXDDP:
+ return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */
+ case AXSCVUXDDP:
+ return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */
+ case AXSCVSXDSP:
+ return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */
+ case AXSCVUXDSP:
+ return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */
+
+ case AXVCVDPSXDS:
+ return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */
+ case AXVCVDPSXWS:
+ return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */
+ case AXVCVDPUXDS:
+ return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */
+ case AXVCVDPUXWS:
+ return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */
+ case AXVCVSPSXDS:
+ return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */
+ case AXVCVSPSXWS:
+ return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */
+ case AXVCVSPUXDS:
+ return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */
+ case AXVCVSPUXWS:
+ return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */
+
+ case AXVCVSXDDP:
+ return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */
+ case AXVCVSXWDP:
+ return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */
+ case AXVCVUXDDP:
+ return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */
+ case AXVCVUXWDP:
+ return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */
+ case AXVCVSXDSP:
+ return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */
+ case AXVCVSXWSP:
+ return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */
+ case AXVCVUXDSP:
+ return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */
+ case AXVCVUXWSP:
+ return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */
+ /* End of VSX instructions */
+
+ case AMADDHD:
+ return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */
+ case AMADDHDU:
+ return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */
+ case AMADDLD:
+ return OPVX(4, 51, 0, 0) /* maddld - v3.00 */
+
+ case AXOR:
+ return OPVCC(31, 316, 0, 0)
+ case AXORCC:
+ return OPVCC(31, 316, 0, 1)
+ }
+
+ c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a)
+ return 0
+}
+
+func (c *ctxt9) opirrr(a obj.As) uint32 {
+ switch a {
+ /* Vector (VMX/Altivec) instructions */
+ /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
+ /* are enabled starting at POWER6 (ISA 2.05). */
+ case AVSLDOI:
+ return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */
+ }
+
+ c.ctxt.Diag("bad i/r/r/r opcode %v", a)
+ return 0
+}
+
+func (c *ctxt9) opiirr(a obj.As) uint32 {
+ switch a {
+ /* Vector (VMX/Altivec) instructions */
+ /* ISA 2.07 enables these for POWER8 and beyond. */
+ case AVSHASIGMAW:
+ return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */
+ case AVSHASIGMAD:
+ return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */
+ }
+
+ c.ctxt.Diag("bad i/i/r/r opcode %v", a)
+ return 0
+}
+
+func (c *ctxt9) opirr(a obj.As) uint32 {
+ switch a {
+ case AADD:
+ return OPVCC(14, 0, 0, 0)
+ case AADDC:
+ return OPVCC(12, 0, 0, 0)
+ case AADDCCC:
+ return OPVCC(13, 0, 0, 0)
+ case AADDIS:
+ return OPVCC(15, 0, 0, 0) /* ADDIS */
+
+ case AANDCC:
+ return OPVCC(28, 0, 0, 0)
+ case AANDISCC:
+ return OPVCC(29, 0, 0, 0) /* ANDIS. */
+
+ case ABR:
+ return OPVCC(18, 0, 0, 0)
+ case ABL:
+ return OPVCC(18, 0, 0, 0) | 1
+ case obj.ADUFFZERO:
+ return OPVCC(18, 0, 0, 0) | 1
+ case obj.ADUFFCOPY:
+ return OPVCC(18, 0, 0, 0) | 1
+ case ABC:
+ return OPVCC(16, 0, 0, 0)
+ case ABCL:
+ return OPVCC(16, 0, 0, 0) | 1
+
+ case ABEQ:
+ return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0)
+ case ABGE:
+ return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0)
+ case ABGT:
+ return AOP_RRR(16<<26, BO_BCR, BI_GT, 0)
+ case ABLE:
+ return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0)
+ case ABLT:
+ return AOP_RRR(16<<26, BO_BCR, BI_LT, 0)
+ case ABNE:
+ return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0)
+ case ABVC:
+ return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0)
+ case ABVS:
+ return AOP_RRR(16<<26, BO_BCR, BI_FU, 0)
+ case ABDZ:
+ return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0)
+ case ABDNZ:
+ return AOP_RRR(16<<26, BO_BCTR, 0, 0)
+
+ case ACMP:
+ return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */
+ case ACMPU:
+ return OPVCC(10, 0, 0, 0) | 1<<21
+ case ACMPW:
+ return OPVCC(11, 0, 0, 0) /* L=0 */
+ case ACMPWU:
+ return OPVCC(10, 0, 0, 0)
+ case ACMPEQB:
+ return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */
+
+ case ALSW:
+ return OPVCC(31, 597, 0, 0)
+
+ case ACOPY:
+ return OPVCC(31, 774, 0, 0) /* copy - v3.00 */
+ case APASTECC:
+ return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */
+ case ADARN:
+ return OPVCC(31, 755, 0, 0) /* darn - v3.00 */
+
+ case AMULLW, AMULLD:
+ return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */
+
+ case AOR:
+ return OPVCC(24, 0, 0, 0)
+ case AORIS:
+ return OPVCC(25, 0, 0, 0) /* ORIS */
+
+ case ARLWMI:
+ return OPVCC(20, 0, 0, 0) /* rlwimi */
+ case ARLWMICC:
+ return OPVCC(20, 0, 0, 1)
+ case ARLDMI:
+ return OPMD(30, 3, 0) /* rldimi */
+ case ARLDMICC:
+ return OPMD(30, 3, 1) /* rldimi. */
+ case ARLDIMI:
+ return OPMD(30, 3, 0) /* rldimi */
+ case ARLDIMICC:
+ return OPMD(30, 3, 1) /* rldimi. */
+ case ARLWNM:
+ return OPVCC(21, 0, 0, 0) /* rlwinm */
+ case ARLWNMCC:
+ return OPVCC(21, 0, 0, 1)
+
+ case ARLDCL:
+ return OPMD(30, 0, 0) /* rldicl */
+ case ARLDCLCC:
+ return OPMD(30, 0, 1) /* rldicl. */
+ case ARLDCR:
+ return OPMD(30, 1, 0) /* rldicr */
+ case ARLDCRCC:
+ return OPMD(30, 1, 1) /* rldicr. */
+ case ARLDC:
+ return OPMD(30, 2, 0) /* rldic */
+ case ARLDCCC:
+ return OPMD(30, 2, 1) /* rldic. */
+
+ case ASRAW:
+ return OPVCC(31, 824, 0, 0)
+ case ASRAWCC:
+ return OPVCC(31, 824, 0, 1)
+ case ASRAD:
+ return OPVCC(31, (413 << 1), 0, 0)
+ case ASRADCC:
+ return OPVCC(31, (413 << 1), 0, 1)
+ case AEXTSWSLI:
+ return OPVCC(31, 445, 0, 0)
+ case AEXTSWSLICC:
+ return OPVCC(31, 445, 0, 1)
+
+ case ASTSW:
+ return OPVCC(31, 725, 0, 0)
+
+ case ASUBC:
+ return OPVCC(8, 0, 0, 0)
+
+ case ATW:
+ return OPVCC(3, 0, 0, 0)
+ case ATD:
+ return OPVCC(2, 0, 0, 0)
+
+ /* Vector (VMX/Altivec) instructions */
+ /* ISA 2.03 enables these for PPC970. For POWERx processors, these */
+ /* are enabled starting at POWER6 (ISA 2.05). */
+ case AVSPLTB:
+ return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */
+ case AVSPLTH:
+ return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */
+ case AVSPLTW:
+ return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */
+
+ case AVSPLTISB:
+ return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */
+ case AVSPLTISH:
+ return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */
+ case AVSPLTISW:
+ return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */
+ /* End of vector instructions */
+
+ case AFTDIV:
+ return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */
+ case AFTSQRT:
+ return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */
+
+ case AXOR:
+ return OPVCC(26, 0, 0, 0) /* XORIL */
+ case AXORIS:
+ return OPVCC(27, 0, 0, 0) /* XORIS */
+ }
+
+ c.ctxt.Diag("bad opcode i/r or i/r/r %v", a)
+ return 0
+}
+
+/*
+ * load o(a),d
+ */
+func (c *ctxt9) opload(a obj.As) uint32 {
+ switch a {
+ case AMOVD:
+ return OPVCC(58, 0, 0, 0) /* ld */
+ case AMOVDU:
+ return OPVCC(58, 0, 0, 1) /* ldu */
+ case AMOVWZ:
+ return OPVCC(32, 0, 0, 0) /* lwz */
+ case AMOVWZU:
+ return OPVCC(33, 0, 0, 0) /* lwzu */
+ case AMOVW:
+ return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */
+ case ALXV:
+ return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */
+ case ALXVL:
+ return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */
+ case ALXVLL:
+ return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */
+ case ALXVX:
+ return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
+
+ /* no AMOVWU */
+ case AMOVB, AMOVBZ:
+ return OPVCC(34, 0, 0, 0)
+ /* load */
+
+ case AMOVBU, AMOVBZU:
+ return OPVCC(35, 0, 0, 0)
+ case AFMOVD:
+ return OPVCC(50, 0, 0, 0)
+ case AFMOVDU:
+ return OPVCC(51, 0, 0, 0)
+ case AFMOVS:
+ return OPVCC(48, 0, 0, 0)
+ case AFMOVSU:
+ return OPVCC(49, 0, 0, 0)
+ case AMOVH:
+ return OPVCC(42, 0, 0, 0)
+ case AMOVHU:
+ return OPVCC(43, 0, 0, 0)
+ case AMOVHZ:
+ return OPVCC(40, 0, 0, 0)
+ case AMOVHZU:
+ return OPVCC(41, 0, 0, 0)
+ case AMOVMW:
+ return OPVCC(46, 0, 0, 0) /* lmw */
+ }
+
+ c.ctxt.Diag("bad load opcode %v", a)
+ return 0
+}
+
+/*
+ * indexed load a(b),d
+ */
+func (c *ctxt9) oploadx(a obj.As) uint32 {
+ switch a {
+ case AMOVWZ:
+ return OPVCC(31, 23, 0, 0) /* lwzx */
+ case AMOVWZU:
+ return OPVCC(31, 55, 0, 0) /* lwzux */
+ case AMOVW:
+ return OPVCC(31, 341, 0, 0) /* lwax */
+ case AMOVWU:
+ return OPVCC(31, 373, 0, 0) /* lwaux */
+
+ case AMOVB, AMOVBZ:
+ return OPVCC(31, 87, 0, 0) /* lbzx */
+
+ case AMOVBU, AMOVBZU:
+ return OPVCC(31, 119, 0, 0) /* lbzux */
+ case AFMOVD:
+ return OPVCC(31, 599, 0, 0) /* lfdx */
+ case AFMOVDU:
+ return OPVCC(31, 631, 0, 0) /* lfdux */
+ case AFMOVS:
+ return OPVCC(31, 535, 0, 0) /* lfsx */
+ case AFMOVSU:
+ return OPVCC(31, 567, 0, 0) /* lfsux */
+ case AFMOVSX:
+ return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */
+ case AFMOVSZ:
+ return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */
+ case AMOVH:
+ return OPVCC(31, 343, 0, 0) /* lhax */
+ case AMOVHU:
+ return OPVCC(31, 375, 0, 0) /* lhaux */
+ case AMOVHBR:
+ return OPVCC(31, 790, 0, 0) /* lhbrx */
+ case AMOVWBR:
+ return OPVCC(31, 534, 0, 0) /* lwbrx */
+ case AMOVDBR:
+ return OPVCC(31, 532, 0, 0) /* ldbrx */
+ case AMOVHZ:
+ return OPVCC(31, 279, 0, 0) /* lhzx */
+ case AMOVHZU:
+ return OPVCC(31, 311, 0, 0) /* lhzux */
+ case ALBAR:
+ return OPVCC(31, 52, 0, 0) /* lbarx */
+ case ALHAR:
+ return OPVCC(31, 116, 0, 0) /* lharx */
+ case ALWAR:
+ return OPVCC(31, 20, 0, 0) /* lwarx */
+ case ALDAR:
+ return OPVCC(31, 84, 0, 0) /* ldarx */
+ case ALSW:
+ return OPVCC(31, 533, 0, 0) /* lswx */
+ case AMOVD:
+ return OPVCC(31, 21, 0, 0) /* ldx */
+ case AMOVDU:
+ return OPVCC(31, 53, 0, 0) /* ldux */
+
+ /* Vector (VMX/Altivec) instructions */
+ case ALVEBX:
+ return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */
+ case ALVEHX:
+ return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */
+ case ALVEWX:
+ return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */
+ case ALVX:
+ return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */
+ case ALVXL:
+ return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */
+ case ALVSL:
+ return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */
+ case ALVSR:
+ return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */
+ /* End of vector instructions */
+
+ /* Vector scalar (VSX) instructions */
+ case ALXVX:
+ return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */
+ case ALXVD2X:
+ return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */
+ case ALXVW4X:
+ return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */
+ case ALXVH8X:
+ return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */
+ case ALXVB16X:
+ return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */
+ case ALXVDSX:
+ return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */
+ case ALXSDX:
+ return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */
+ case ALXSIWAX:
+ return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */
+ case ALXSIWZX:
+ return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */
+ }
+
+ c.ctxt.Diag("bad loadx opcode %v", a)
+ return 0
+}
+
+/*
+ * store s,o(d)
+ */
+func (c *ctxt9) opstore(a obj.As) uint32 {
+ switch a {
+ case AMOVB, AMOVBZ:
+ return OPVCC(38, 0, 0, 0) /* stb */
+
+ case AMOVBU, AMOVBZU:
+ return OPVCC(39, 0, 0, 0) /* stbu */
+ case AFMOVD:
+ return OPVCC(54, 0, 0, 0) /* stfd */
+ case AFMOVDU:
+ return OPVCC(55, 0, 0, 0) /* stfdu */
+ case AFMOVS:
+ return OPVCC(52, 0, 0, 0) /* stfs */
+ case AFMOVSU:
+ return OPVCC(53, 0, 0, 0) /* stfsu */
+
+ case AMOVHZ, AMOVH:
+ return OPVCC(44, 0, 0, 0) /* sth */
+
+ case AMOVHZU, AMOVHU:
+ return OPVCC(45, 0, 0, 0) /* sthu */
+ case AMOVMW:
+ return OPVCC(47, 0, 0, 0) /* stmw */
+ case ASTSW:
+ return OPVCC(31, 725, 0, 0) /* stswi */
+
+ case AMOVWZ, AMOVW:
+ return OPVCC(36, 0, 0, 0) /* stw */
+
+ case AMOVWZU, AMOVWU:
+ return OPVCC(37, 0, 0, 0) /* stwu */
+ case AMOVD:
+ return OPVCC(62, 0, 0, 0) /* std */
+ case AMOVDU:
+ return OPVCC(62, 0, 0, 1) /* stdu */
+ case ASTXV:
+ return OPDQ(61, 5, 0) /* stxv ISA 3.0 */
+ case ASTXVL:
+ return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */
+ case ASTXVLL:
+ return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */
+ case ASTXVX:
+ return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */
+
+ }
+
+ c.ctxt.Diag("unknown store opcode %v", a)
+ return 0
+}
+
+/*
+ * indexed store s,a(b)
+ */
+func (c *ctxt9) opstorex(a obj.As) uint32 {
+ switch a {
+ case AMOVB, AMOVBZ:
+ return OPVCC(31, 215, 0, 0) /* stbx */
+
+ case AMOVBU, AMOVBZU:
+ return OPVCC(31, 247, 0, 0) /* stbux */
+ case AFMOVD:
+ return OPVCC(31, 727, 0, 0) /* stfdx */
+ case AFMOVDU:
+ return OPVCC(31, 759, 0, 0) /* stfdux */
+ case AFMOVS:
+ return OPVCC(31, 663, 0, 0) /* stfsx */
+ case AFMOVSU:
+ return OPVCC(31, 695, 0, 0) /* stfsux */
+ case AFMOVSX:
+ return OPVCC(31, 983, 0, 0) /* stfiwx */
+
+ case AMOVHZ, AMOVH:
+ return OPVCC(31, 407, 0, 0) /* sthx */
+ case AMOVHBR:
+ return OPVCC(31, 918, 0, 0) /* sthbrx */
+
+ case AMOVHZU, AMOVHU:
+ return OPVCC(31, 439, 0, 0) /* sthux */
+
+ case AMOVWZ, AMOVW:
+ return OPVCC(31, 151, 0, 0) /* stwx */
+
+ case AMOVWZU, AMOVWU:
+ return OPVCC(31, 183, 0, 0) /* stwux */
+ case ASTSW:
+ return OPVCC(31, 661, 0, 0) /* stswx */
+ case AMOVWBR:
+ return OPVCC(31, 662, 0, 0) /* stwbrx */
+ case AMOVDBR:
+ return OPVCC(31, 660, 0, 0) /* stdbrx */
+ case ASTBCCC:
+ return OPVCC(31, 694, 0, 1) /* stbcx. */
+ case ASTHCCC:
+ return OPVCC(31, 726, 0, 1) /* sthcx. */
+ case ASTWCCC:
+ return OPVCC(31, 150, 0, 1) /* stwcx. */
+ case ASTDCCC:
+ return OPVCC(31, 214, 0, 1) /* stwdx. */
+ case AMOVD:
+ return OPVCC(31, 149, 0, 0) /* stdx */
+ case AMOVDU:
+ return OPVCC(31, 181, 0, 0) /* stdux */
+
+ /* Vector (VMX/Altivec) instructions */
+ case ASTVEBX:
+ return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */
+ case ASTVEHX:
+ return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */
+ case ASTVEWX:
+ return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */
+ case ASTVX:
+ return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */
+ case ASTVXL:
+ return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */
+ /* End of vector instructions */
+
+ /* Vector scalar (VSX) instructions */
+ case ASTXVX:
+ return OPVXX1(31, 396, 0) /* stxvx - v3.0 */
+ case ASTXVD2X:
+ return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */
+ case ASTXVW4X:
+ return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */
+ case ASTXVH8X:
+ return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */
+ case ASTXVB16X:
+ return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */
+
+ case ASTXSDX:
+ return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */
+
+ case ASTXSIWX:
+ return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */
+
+ /* End of vector scalar instructions */
+
+ }
+
+ c.ctxt.Diag("unknown storex opcode %v", a)
+ return 0
+}
diff --git a/src/cmd/internal/obj/ppc64/asm9_gtables.go b/src/cmd/internal/obj/ppc64/asm9_gtables.go
new file mode 100644
index 0000000..953b148
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/asm9_gtables.go
@@ -0,0 +1,1660 @@
+// DO NOT EDIT
+// generated by: ppc64map -fmt=encoder pp64.csv
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+)
+
+const (
+ AXXSETACCZ = ALASTAOUT + iota
+ AXXMTACC
+ AXXMFACC
+ AXXGENPCVWM
+ AXXGENPCVHM
+ AXXGENPCVDM
+ AXXGENPCVBM
+ AXVTLSBB
+ AXVI8GER4SPP
+ AXVI8GER4PP
+ AXVI8GER4
+ AXVI4GER8PP
+ AXVI4GER8
+ AXVI16GER2SPP
+ AXVI16GER2S
+ AXVI16GER2PP
+ AXVI16GER2
+ AXVF64GERPP
+ AXVF64GERPN
+ AXVF64GERNP
+ AXVF64GERNN
+ AXVF64GER
+ AXVF32GERPP
+ AXVF32GERPN
+ AXVF32GERNP
+ AXVF32GERNN
+ AXVF32GER
+ AXVF16GER2PP
+ AXVF16GER2PN
+ AXVF16GER2NP
+ AXVF16GER2NN
+ AXVF16GER2
+ AXVCVSPBF16
+ AXVCVBF16SPN
+ AXVBF16GER2PP
+ AXVBF16GER2PN
+ AXVBF16GER2NP
+ AXVBF16GER2NN
+ AXVBF16GER2
+ AXSMINCQP
+ AXSMAXCQP
+ AXSCVUQQP
+ AXSCVSQQP
+ AXSCVQPUQZ
+ AXSCVQPSQZ
+ AXSCMPGTQP
+ AXSCMPGEQP
+ AXSCMPEQQP
+ AVSTRIHRCC
+ AVSTRIHR
+ AVSTRIHLCC
+ AVSTRIHL
+ AVSTRIBRCC
+ AVSTRIBR
+ AVSTRIBLCC
+ AVSTRIBL
+ AVSRQ
+ AVSRDBI
+ AVSRAQ
+ AVSLQ
+ AVSLDBI
+ AVRLQNM
+ AVRLQMI
+ AVRLQ
+ AVPEXTD
+ AVPDEPD
+ AVMULOUD
+ AVMULOSD
+ AVMULLD
+ AVMULHUW
+ AVMULHUD
+ AVMULHSW
+ AVMULHSD
+ AVMULEUD
+ AVMULESD
+ AVMSUMCUD
+ AVMODUW
+ AVMODUQ
+ AVMODUD
+ AVMODSW
+ AVMODSQ
+ AVMODSD
+ AVINSWVRX
+ AVINSWVLX
+ AVINSWRX
+ AVINSWLX
+ AVINSW
+ AVINSHVRX
+ AVINSHVLX
+ AVINSHRX
+ AVINSHLX
+ AVINSDRX
+ AVINSDLX
+ AVINSD
+ AVINSBVRX
+ AVINSBVLX
+ AVINSBRX
+ AVINSBLX
+ AVGNB
+ AVEXTSD2Q
+ AVEXTRACTWM
+ AVEXTRACTQM
+ AVEXTRACTHM
+ AVEXTRACTDM
+ AVEXTRACTBM
+ AVEXTDUWVRX
+ AVEXTDUWVLX
+ AVEXTDUHVRX
+ AVEXTDUHVLX
+ AVEXTDUBVRX
+ AVEXTDUBVLX
+ AVEXTDDVRX
+ AVEXTDDVLX
+ AVEXPANDWM
+ AVEXPANDQM
+ AVEXPANDHM
+ AVEXPANDDM
+ AVEXPANDBM
+ AVDIVUW
+ AVDIVUQ
+ AVDIVUD
+ AVDIVSW
+ AVDIVSQ
+ AVDIVSD
+ AVDIVEUW
+ AVDIVEUQ
+ AVDIVEUD
+ AVDIVESW
+ AVDIVESQ
+ AVDIVESD
+ AVCTZDM
+ AVCNTMBW
+ AVCNTMBH
+ AVCNTMBD
+ AVCNTMBB
+ AVCMPUQ
+ AVCMPSQ
+ AVCMPGTUQCC
+ AVCMPGTUQ
+ AVCMPGTSQCC
+ AVCMPGTSQ
+ AVCMPEQUQCC
+ AVCMPEQUQ
+ AVCLZDM
+ AVCLRRB
+ AVCLRLB
+ AVCFUGED
+ ASTXVRWX
+ ASTXVRHX
+ ASTXVRDX
+ ASTXVRBX
+ ASTXVPX
+ ASTXVP
+ ASETNBCR
+ ASETNBC
+ ASETBCR
+ ASETBC
+ APEXTD
+ APDEPD
+ AMTVSRWM
+ AMTVSRQM
+ AMTVSRHM
+ AMTVSRDM
+ AMTVSRBMI
+ AMTVSRBM
+ ALXVRWX
+ ALXVRHX
+ ALXVRDX
+ ALXVRBX
+ ALXVPX
+ ALXVP
+ ALXVKQ
+ ADCTFIXQQ
+ ADCFFIXQQ
+ ACNTTZDM
+ ACNTLZDM
+ ACFUGED
+ ABRW
+ ABRH
+ ABRD
+ AHASHSTP
+ AHASHST
+ AHASHCHKP
+ AHASHCHK
+ AXXSPLTIW
+ AXXSPLTIDP
+ AXXSPLTI32DX
+ AXXPERMX
+ AXXEVAL
+ AXXBLENDVW
+ AXXBLENDVH
+ AXXBLENDVD
+ AXXBLENDVB
+ APSTXVP
+ APSTXV
+ APSTXSSP
+ APSTXSD
+ APSTW
+ APSTQ
+ APSTH
+ APSTFS
+ APSTFD
+ APSTD
+ APSTB
+ APNOP
+ APMXVI8GER4SPP
+ APMXVI8GER4PP
+ APMXVI8GER4
+ APMXVI4GER8PP
+ APMXVI4GER8
+ APMXVI16GER2SPP
+ APMXVI16GER2S
+ APMXVI16GER2PP
+ APMXVI16GER2
+ APMXVF64GERPP
+ APMXVF64GERPN
+ APMXVF64GERNP
+ APMXVF64GERNN
+ APMXVF64GER
+ APMXVF32GERPP
+ APMXVF32GERPN
+ APMXVF32GERNP
+ APMXVF32GERNN
+ APMXVF32GER
+ APMXVF16GER2PP
+ APMXVF16GER2PN
+ APMXVF16GER2NP
+ APMXVF16GER2NN
+ APMXVF16GER2
+ APMXVBF16GER2PP
+ APMXVBF16GER2PN
+ APMXVBF16GER2NP
+ APMXVBF16GER2NN
+ APMXVBF16GER2
+ APLXVP
+ APLXV
+ APLXSSP
+ APLXSD
+ APLWZ
+ APLWA
+ APLQ
+ APLHZ
+ APLHA
+ APLFS
+ APLFD
+ APLD
+ APLBZ
+ APADDI
+ ALASTGEN
+ AFIRSTGEN = AXXSETACCZ
+)
+
+var GenAnames = []string{
+ "XXSETACCZ",
+ "XXMTACC",
+ "XXMFACC",
+ "XXGENPCVWM",
+ "XXGENPCVHM",
+ "XXGENPCVDM",
+ "XXGENPCVBM",
+ "XVTLSBB",
+ "XVI8GER4SPP",
+ "XVI8GER4PP",
+ "XVI8GER4",
+ "XVI4GER8PP",
+ "XVI4GER8",
+ "XVI16GER2SPP",
+ "XVI16GER2S",
+ "XVI16GER2PP",
+ "XVI16GER2",
+ "XVF64GERPP",
+ "XVF64GERPN",
+ "XVF64GERNP",
+ "XVF64GERNN",
+ "XVF64GER",
+ "XVF32GERPP",
+ "XVF32GERPN",
+ "XVF32GERNP",
+ "XVF32GERNN",
+ "XVF32GER",
+ "XVF16GER2PP",
+ "XVF16GER2PN",
+ "XVF16GER2NP",
+ "XVF16GER2NN",
+ "XVF16GER2",
+ "XVCVSPBF16",
+ "XVCVBF16SPN",
+ "XVBF16GER2PP",
+ "XVBF16GER2PN",
+ "XVBF16GER2NP",
+ "XVBF16GER2NN",
+ "XVBF16GER2",
+ "XSMINCQP",
+ "XSMAXCQP",
+ "XSCVUQQP",
+ "XSCVSQQP",
+ "XSCVQPUQZ",
+ "XSCVQPSQZ",
+ "XSCMPGTQP",
+ "XSCMPGEQP",
+ "XSCMPEQQP",
+ "VSTRIHRCC",
+ "VSTRIHR",
+ "VSTRIHLCC",
+ "VSTRIHL",
+ "VSTRIBRCC",
+ "VSTRIBR",
+ "VSTRIBLCC",
+ "VSTRIBL",
+ "VSRQ",
+ "VSRDBI",
+ "VSRAQ",
+ "VSLQ",
+ "VSLDBI",
+ "VRLQNM",
+ "VRLQMI",
+ "VRLQ",
+ "VPEXTD",
+ "VPDEPD",
+ "VMULOUD",
+ "VMULOSD",
+ "VMULLD",
+ "VMULHUW",
+ "VMULHUD",
+ "VMULHSW",
+ "VMULHSD",
+ "VMULEUD",
+ "VMULESD",
+ "VMSUMCUD",
+ "VMODUW",
+ "VMODUQ",
+ "VMODUD",
+ "VMODSW",
+ "VMODSQ",
+ "VMODSD",
+ "VINSWVRX",
+ "VINSWVLX",
+ "VINSWRX",
+ "VINSWLX",
+ "VINSW",
+ "VINSHVRX",
+ "VINSHVLX",
+ "VINSHRX",
+ "VINSHLX",
+ "VINSDRX",
+ "VINSDLX",
+ "VINSD",
+ "VINSBVRX",
+ "VINSBVLX",
+ "VINSBRX",
+ "VINSBLX",
+ "VGNB",
+ "VEXTSD2Q",
+ "VEXTRACTWM",
+ "VEXTRACTQM",
+ "VEXTRACTHM",
+ "VEXTRACTDM",
+ "VEXTRACTBM",
+ "VEXTDUWVRX",
+ "VEXTDUWVLX",
+ "VEXTDUHVRX",
+ "VEXTDUHVLX",
+ "VEXTDUBVRX",
+ "VEXTDUBVLX",
+ "VEXTDDVRX",
+ "VEXTDDVLX",
+ "VEXPANDWM",
+ "VEXPANDQM",
+ "VEXPANDHM",
+ "VEXPANDDM",
+ "VEXPANDBM",
+ "VDIVUW",
+ "VDIVUQ",
+ "VDIVUD",
+ "VDIVSW",
+ "VDIVSQ",
+ "VDIVSD",
+ "VDIVEUW",
+ "VDIVEUQ",
+ "VDIVEUD",
+ "VDIVESW",
+ "VDIVESQ",
+ "VDIVESD",
+ "VCTZDM",
+ "VCNTMBW",
+ "VCNTMBH",
+ "VCNTMBD",
+ "VCNTMBB",
+ "VCMPUQ",
+ "VCMPSQ",
+ "VCMPGTUQCC",
+ "VCMPGTUQ",
+ "VCMPGTSQCC",
+ "VCMPGTSQ",
+ "VCMPEQUQCC",
+ "VCMPEQUQ",
+ "VCLZDM",
+ "VCLRRB",
+ "VCLRLB",
+ "VCFUGED",
+ "STXVRWX",
+ "STXVRHX",
+ "STXVRDX",
+ "STXVRBX",
+ "STXVPX",
+ "STXVP",
+ "SETNBCR",
+ "SETNBC",
+ "SETBCR",
+ "SETBC",
+ "PEXTD",
+ "PDEPD",
+ "MTVSRWM",
+ "MTVSRQM",
+ "MTVSRHM",
+ "MTVSRDM",
+ "MTVSRBMI",
+ "MTVSRBM",
+ "LXVRWX",
+ "LXVRHX",
+ "LXVRDX",
+ "LXVRBX",
+ "LXVPX",
+ "LXVP",
+ "LXVKQ",
+ "DCTFIXQQ",
+ "DCFFIXQQ",
+ "CNTTZDM",
+ "CNTLZDM",
+ "CFUGED",
+ "BRW",
+ "BRH",
+ "BRD",
+ "HASHSTP",
+ "HASHST",
+ "HASHCHKP",
+ "HASHCHK",
+ "XXSPLTIW",
+ "XXSPLTIDP",
+ "XXSPLTI32DX",
+ "XXPERMX",
+ "XXEVAL",
+ "XXBLENDVW",
+ "XXBLENDVH",
+ "XXBLENDVD",
+ "XXBLENDVB",
+ "PSTXVP",
+ "PSTXV",
+ "PSTXSSP",
+ "PSTXSD",
+ "PSTW",
+ "PSTQ",
+ "PSTH",
+ "PSTFS",
+ "PSTFD",
+ "PSTD",
+ "PSTB",
+ "PNOP",
+ "PMXVI8GER4SPP",
+ "PMXVI8GER4PP",
+ "PMXVI8GER4",
+ "PMXVI4GER8PP",
+ "PMXVI4GER8",
+ "PMXVI16GER2SPP",
+ "PMXVI16GER2S",
+ "PMXVI16GER2PP",
+ "PMXVI16GER2",
+ "PMXVF64GERPP",
+ "PMXVF64GERPN",
+ "PMXVF64GERNP",
+ "PMXVF64GERNN",
+ "PMXVF64GER",
+ "PMXVF32GERPP",
+ "PMXVF32GERPN",
+ "PMXVF32GERNP",
+ "PMXVF32GERNN",
+ "PMXVF32GER",
+ "PMXVF16GER2PP",
+ "PMXVF16GER2PN",
+ "PMXVF16GER2NP",
+ "PMXVF16GER2NN",
+ "PMXVF16GER2",
+ "PMXVBF16GER2PP",
+ "PMXVBF16GER2PN",
+ "PMXVBF16GER2NP",
+ "PMXVBF16GER2NN",
+ "PMXVBF16GER2",
+ "PLXVP",
+ "PLXV",
+ "PLXSSP",
+ "PLXSD",
+ "PLWZ",
+ "PLWA",
+ "PLQ",
+ "PLHZ",
+ "PLHA",
+ "PLFS",
+ "PLFD",
+ "PLD",
+ "PLBZ",
+ "PADDI",
+}
+
+var GenOpcodes = [...]uint32{
+ 0x7c030162, // AXXSETACCZ
+ 0x7c010162, // AXXMTACC
+ 0x7c000162, // AXXMFACC
+ 0xf0000768, // AXXGENPCVWM
+ 0xf000072a, // AXXGENPCVHM
+ 0xf000076a, // AXXGENPCVDM
+ 0xf0000728, // AXXGENPCVBM
+ 0xf002076c, // AXVTLSBB
+ 0xec000318, // AXVI8GER4SPP
+ 0xec000010, // AXVI8GER4PP
+ 0xec000018, // AXVI8GER4
+ 0xec000110, // AXVI4GER8PP
+ 0xec000118, // AXVI4GER8
+ 0xec000150, // AXVI16GER2SPP
+ 0xec000158, // AXVI16GER2S
+ 0xec000358, // AXVI16GER2PP
+ 0xec000258, // AXVI16GER2
+ 0xec0001d0, // AXVF64GERPP
+ 0xec0005d0, // AXVF64GERPN
+ 0xec0003d0, // AXVF64GERNP
+ 0xec0007d0, // AXVF64GERNN
+ 0xec0001d8, // AXVF64GER
+ 0xec0000d0, // AXVF32GERPP
+ 0xec0004d0, // AXVF32GERPN
+ 0xec0002d0, // AXVF32GERNP
+ 0xec0006d0, // AXVF32GERNN
+ 0xec0000d8, // AXVF32GER
+ 0xec000090, // AXVF16GER2PP
+ 0xec000490, // AXVF16GER2PN
+ 0xec000290, // AXVF16GER2NP
+ 0xec000690, // AXVF16GER2NN
+ 0xec000098, // AXVF16GER2
+ 0xf011076c, // AXVCVSPBF16
+ 0xf010076c, // AXVCVBF16SPN
+ 0xec000190, // AXVBF16GER2PP
+ 0xec000590, // AXVBF16GER2PN
+ 0xec000390, // AXVBF16GER2NP
+ 0xec000790, // AXVBF16GER2NN
+ 0xec000198, // AXVBF16GER2
+ 0xfc0005c8, // AXSMINCQP
+ 0xfc000548, // AXSMAXCQP
+ 0xfc030688, // AXSCVUQQP
+ 0xfc0b0688, // AXSCVSQQP
+ 0xfc000688, // AXSCVQPUQZ
+ 0xfc080688, // AXSCVQPSQZ
+ 0xfc0001c8, // AXSCMPGTQP
+ 0xfc000188, // AXSCMPGEQP
+ 0xfc000088, // AXSCMPEQQP
+ 0x1003040d, // AVSTRIHRCC
+ 0x1003000d, // AVSTRIHR
+ 0x1002040d, // AVSTRIHLCC
+ 0x1002000d, // AVSTRIHL
+ 0x1001040d, // AVSTRIBRCC
+ 0x1001000d, // AVSTRIBR
+ 0x1000040d, // AVSTRIBLCC
+ 0x1000000d, // AVSTRIBL
+ 0x10000205, // AVSRQ
+ 0x10000216, // AVSRDBI
+ 0x10000305, // AVSRAQ
+ 0x10000105, // AVSLQ
+ 0x10000016, // AVSLDBI
+ 0x10000145, // AVRLQNM
+ 0x10000045, // AVRLQMI
+ 0x10000005, // AVRLQ
+ 0x1000058d, // AVPEXTD
+ 0x100005cd, // AVPDEPD
+ 0x100000c8, // AVMULOUD
+ 0x100001c8, // AVMULOSD
+ 0x100001c9, // AVMULLD
+ 0x10000289, // AVMULHUW
+ 0x100002c9, // AVMULHUD
+ 0x10000389, // AVMULHSW
+ 0x100003c9, // AVMULHSD
+ 0x100002c8, // AVMULEUD
+ 0x100003c8, // AVMULESD
+ 0x10000017, // AVMSUMCUD
+ 0x1000068b, // AVMODUW
+ 0x1000060b, // AVMODUQ
+ 0x100006cb, // AVMODUD
+ 0x1000078b, // AVMODSW
+ 0x1000070b, // AVMODSQ
+ 0x100007cb, // AVMODSD
+ 0x1000018f, // AVINSWVRX
+ 0x1000008f, // AVINSWVLX
+ 0x1000038f, // AVINSWRX
+ 0x1000028f, // AVINSWLX
+ 0x100000cf, // AVINSW
+ 0x1000014f, // AVINSHVRX
+ 0x1000004f, // AVINSHVLX
+ 0x1000034f, // AVINSHRX
+ 0x1000024f, // AVINSHLX
+ 0x100003cf, // AVINSDRX
+ 0x100002cf, // AVINSDLX
+ 0x100001cf, // AVINSD
+ 0x1000010f, // AVINSBVRX
+ 0x1000000f, // AVINSBVLX
+ 0x1000030f, // AVINSBRX
+ 0x1000020f, // AVINSBLX
+ 0x100004cc, // AVGNB
+ 0x101b0602, // AVEXTSD2Q
+ 0x100a0642, // AVEXTRACTWM
+ 0x100c0642, // AVEXTRACTQM
+ 0x10090642, // AVEXTRACTHM
+ 0x100b0642, // AVEXTRACTDM
+ 0x10080642, // AVEXTRACTBM
+ 0x1000001d, // AVEXTDUWVRX
+ 0x1000001c, // AVEXTDUWVLX
+ 0x1000001b, // AVEXTDUHVRX
+ 0x1000001a, // AVEXTDUHVLX
+ 0x10000019, // AVEXTDUBVRX
+ 0x10000018, // AVEXTDUBVLX
+ 0x1000001f, // AVEXTDDVRX
+ 0x1000001e, // AVEXTDDVLX
+ 0x10020642, // AVEXPANDWM
+ 0x10040642, // AVEXPANDQM
+ 0x10010642, // AVEXPANDHM
+ 0x10030642, // AVEXPANDDM
+ 0x10000642, // AVEXPANDBM
+ 0x1000008b, // AVDIVUW
+ 0x1000000b, // AVDIVUQ
+ 0x100000cb, // AVDIVUD
+ 0x1000018b, // AVDIVSW
+ 0x1000010b, // AVDIVSQ
+ 0x100001cb, // AVDIVSD
+ 0x1000028b, // AVDIVEUW
+ 0x1000020b, // AVDIVEUQ
+ 0x100002cb, // AVDIVEUD
+ 0x1000038b, // AVDIVESW
+ 0x1000030b, // AVDIVESQ
+ 0x100003cb, // AVDIVESD
+ 0x100007c4, // AVCTZDM
+ 0x101c0642, // AVCNTMBW
+ 0x101a0642, // AVCNTMBH
+ 0x101e0642, // AVCNTMBD
+ 0x10180642, // AVCNTMBB
+ 0x10000101, // AVCMPUQ
+ 0x10000141, // AVCMPSQ
+ 0x10000687, // AVCMPGTUQCC
+ 0x10000287, // AVCMPGTUQ
+ 0x10000787, // AVCMPGTSQCC
+ 0x10000387, // AVCMPGTSQ
+ 0x100005c7, // AVCMPEQUQCC
+ 0x100001c7, // AVCMPEQUQ
+ 0x10000784, // AVCLZDM
+ 0x100001cd, // AVCLRRB
+ 0x1000018d, // AVCLRLB
+ 0x1000054d, // AVCFUGED
+ 0x7c00019a, // ASTXVRWX
+ 0x7c00015a, // ASTXVRHX
+ 0x7c0001da, // ASTXVRDX
+ 0x7c00011a, // ASTXVRBX
+ 0x7c00039a, // ASTXVPX
+ 0x18000001, // ASTXVP
+ 0x7c0003c0, // ASETNBCR
+ 0x7c000380, // ASETNBC
+ 0x7c000340, // ASETBCR
+ 0x7c000300, // ASETBC
+ 0x7c000178, // APEXTD
+ 0x7c000138, // APDEPD
+ 0x10120642, // AMTVSRWM
+ 0x10140642, // AMTVSRQM
+ 0x10110642, // AMTVSRHM
+ 0x10130642, // AMTVSRDM
+ 0x10000014, // AMTVSRBMI
+ 0x10100642, // AMTVSRBM
+ 0x7c00009a, // ALXVRWX
+ 0x7c00005a, // ALXVRHX
+ 0x7c0000da, // ALXVRDX
+ 0x7c00001a, // ALXVRBX
+ 0x7c00029a, // ALXVPX
+ 0x18000000, // ALXVP
+ 0xf01f02d0, // ALXVKQ
+ 0xfc0107c4, // ADCTFIXQQ
+ 0xfc0007c4, // ADCFFIXQQ
+ 0x7c000476, // ACNTTZDM
+ 0x7c000076, // ACNTLZDM
+ 0x7c0001b8, // ACFUGED
+ 0x7c000136, // ABRW
+ 0x7c0001b6, // ABRH
+ 0x7c000176, // ABRD
+ 0x7c000524, // AHASHSTP
+ 0x7c0005a4, // AHASHST
+ 0x7c000564, // AHASHCHKP
+ 0x7c0005e4, // AHASHCHK
+ 0x80060000, // AXXSPLTIW
+ 0x80040000, // AXXSPLTIDP
+ 0x80000000, // AXXSPLTI32DX
+ 0x88000000, // AXXPERMX
+ 0x88000010, // AXXEVAL
+ 0x84000020, // AXXBLENDVW
+ 0x84000010, // AXXBLENDVH
+ 0x84000030, // AXXBLENDVD
+ 0x84000000, // AXXBLENDVB
+ 0xf8000000, // APSTXVP
+ 0xd8000000, // APSTXV
+ 0xbc000000, // APSTXSSP
+ 0xb8000000, // APSTXSD
+ 0x90000000, // APSTW
+ 0xf0000000, // APSTQ
+ 0xb0000000, // APSTH
+ 0xd0000000, // APSTFS
+ 0xd8000000, // APSTFD
+ 0xf4000000, // APSTD
+ 0x98000000, // APSTB
+ 0x00000000, // APNOP
+ 0xec000318, // APMXVI8GER4SPP
+ 0xec000010, // APMXVI8GER4PP
+ 0xec000018, // APMXVI8GER4
+ 0xec000110, // APMXVI4GER8PP
+ 0xec000118, // APMXVI4GER8
+ 0xec000150, // APMXVI16GER2SPP
+ 0xec000158, // APMXVI16GER2S
+ 0xec000358, // APMXVI16GER2PP
+ 0xec000258, // APMXVI16GER2
+ 0xec0001d0, // APMXVF64GERPP
+ 0xec0005d0, // APMXVF64GERPN
+ 0xec0003d0, // APMXVF64GERNP
+ 0xec0007d0, // APMXVF64GERNN
+ 0xec0001d8, // APMXVF64GER
+ 0xec0000d0, // APMXVF32GERPP
+ 0xec0004d0, // APMXVF32GERPN
+ 0xec0002d0, // APMXVF32GERNP
+ 0xec0006d0, // APMXVF32GERNN
+ 0xec0000d8, // APMXVF32GER
+ 0xec000090, // APMXVF16GER2PP
+ 0xec000490, // APMXVF16GER2PN
+ 0xec000290, // APMXVF16GER2NP
+ 0xec000690, // APMXVF16GER2NN
+ 0xec000098, // APMXVF16GER2
+ 0xec000190, // APMXVBF16GER2PP
+ 0xec000590, // APMXVBF16GER2PN
+ 0xec000390, // APMXVBF16GER2NP
+ 0xec000790, // APMXVBF16GER2NN
+ 0xec000198, // APMXVBF16GER2
+ 0xe8000000, // APLXVP
+ 0xc8000000, // APLXV
+ 0xac000000, // APLXSSP
+ 0xa8000000, // APLXSD
+ 0x80000000, // APLWZ
+ 0xa4000000, // APLWA
+ 0xe0000000, // APLQ
+ 0xa0000000, // APLHZ
+ 0xa8000000, // APLHA
+ 0xc0000000, // APLFS
+ 0xc8000000, // APLFD
+ 0xe4000000, // APLD
+ 0x88000000, // APLBZ
+ 0x38000000, // APADDI
+}
+
+var GenPfxOpcodes = [...]uint32{
+ 0x05000000, // AXXSPLTIW
+ 0x05000000, // AXXSPLTIDP
+ 0x05000000, // AXXSPLTI32DX
+ 0x05000000, // AXXPERMX
+ 0x05000000, // AXXEVAL
+ 0x05000000, // AXXBLENDVW
+ 0x05000000, // AXXBLENDVH
+ 0x05000000, // AXXBLENDVD
+ 0x05000000, // AXXBLENDVB
+ 0x04000000, // APSTXVP
+ 0x04000000, // APSTXV
+ 0x04000000, // APSTXSSP
+ 0x04000000, // APSTXSD
+ 0x06000000, // APSTW
+ 0x04000000, // APSTQ
+ 0x06000000, // APSTH
+ 0x06000000, // APSTFS
+ 0x06000000, // APSTFD
+ 0x04000000, // APSTD
+ 0x06000000, // APSTB
+ 0x07000000, // APNOP
+ 0x07900000, // APMXVI8GER4SPP
+ 0x07900000, // APMXVI8GER4PP
+ 0x07900000, // APMXVI8GER4
+ 0x07900000, // APMXVI4GER8PP
+ 0x07900000, // APMXVI4GER8
+ 0x07900000, // APMXVI16GER2SPP
+ 0x07900000, // APMXVI16GER2S
+ 0x07900000, // APMXVI16GER2PP
+ 0x07900000, // APMXVI16GER2
+ 0x07900000, // APMXVF64GERPP
+ 0x07900000, // APMXVF64GERPN
+ 0x07900000, // APMXVF64GERNP
+ 0x07900000, // APMXVF64GERNN
+ 0x07900000, // APMXVF64GER
+ 0x07900000, // APMXVF32GERPP
+ 0x07900000, // APMXVF32GERPN
+ 0x07900000, // APMXVF32GERNP
+ 0x07900000, // APMXVF32GERNN
+ 0x07900000, // APMXVF32GER
+ 0x07900000, // APMXVF16GER2PP
+ 0x07900000, // APMXVF16GER2PN
+ 0x07900000, // APMXVF16GER2NP
+ 0x07900000, // APMXVF16GER2NN
+ 0x07900000, // APMXVF16GER2
+ 0x07900000, // APMXVBF16GER2PP
+ 0x07900000, // APMXVBF16GER2PN
+ 0x07900000, // APMXVBF16GER2NP
+ 0x07900000, // APMXVBF16GER2NN
+ 0x07900000, // APMXVBF16GER2
+ 0x04000000, // APLXVP
+ 0x04000000, // APLXV
+ 0x04000000, // APLXSSP
+ 0x04000000, // APLXSD
+ 0x06000000, // APLWZ
+ 0x04000000, // APLWA
+ 0x04000000, // APLQ
+ 0x06000000, // APLHZ
+ 0x06000000, // APLHA
+ 0x06000000, // APLFS
+ 0x06000000, // APLFD
+ 0x04000000, // APLD
+ 0x06000000, // APLBZ
+ 0x06000000, // APADDI
+}
+
+var optabGen = []Optab{
+ {as: ABRW, a1: C_REG, a6: C_REG, asmout: type_brw, size: 4},
+ {as: ADCFFIXQQ, a1: C_VREG, a6: C_FREGP, asmout: type_xscvuqqp, size: 4},
+ {as: ADCTFIXQQ, a1: C_FREGP, a6: C_VREG, asmout: type_xscvuqqp, size: 4},
+ {as: AHASHCHKP, a1: C_SOREG, a6: C_REG, asmout: type_hashchkp, size: 4},
+ {as: AHASHSTP, a1: C_REG, a6: C_SOREG, asmout: type_hashstp, size: 4},
+ {as: ALXVKQ, a1: C_U5CON, a6: C_VSREG, asmout: type_lxvkq, size: 4},
+ {as: ALXVP, a1: C_SOREG, a6: C_VSREGP, asmout: type_lxvp, size: 4},
+ {as: ALXVPX, a1: C_XOREG, a6: C_VSREGP, asmout: type_lxvpx, size: 4},
+ {as: ALXVRWX, a1: C_XOREG, a6: C_VSREG, asmout: type_lxvrwx, size: 4},
+ {as: AMTVSRBMI, a1: C_U16CON, a6: C_VREG, asmout: type_mtvsrbmi, size: 4},
+ {as: AMTVSRWM, a1: C_REG, a6: C_VREG, asmout: type_xscvuqqp, size: 4},
+ {as: APADDI, a1: C_REG, a3: C_S34CON, a4: C_U1CON, a6: C_REG, asmout: type_paddi, ispfx: true, size: 8},
+ {as: APEXTD, a1: C_REG, a2: C_REG, a6: C_REG, asmout: type_pextd, size: 4},
+ {as: APLFS, a1: C_LOREG, a3: C_U1CON, a6: C_FREG, asmout: type_plxssp, ispfx: true, size: 8},
+ {as: APLQ, a1: C_LOREG, a3: C_U1CON, a6: C_REGP, asmout: type_plxssp, ispfx: true, size: 8},
+ {as: APLWZ, a1: C_LOREG, a3: C_U1CON, a6: C_REG, asmout: type_plxssp, ispfx: true, size: 8},
+ {as: APLXSSP, a1: C_LOREG, a3: C_U1CON, a6: C_VREG, asmout: type_plxssp, ispfx: true, size: 8},
+ {as: APLXV, a1: C_LOREG, a3: C_U1CON, a6: C_VSREG, asmout: type_plxv, ispfx: true, size: 8},
+ {as: APLXVP, a1: C_LOREG, a3: C_U1CON, a6: C_VSREGP, asmout: type_plxvp, ispfx: true, size: 8},
+ {as: APMXVF32GERPP, a1: C_VSREG, a2: C_VSREG, a3: C_U4CON, a4: C_U4CON, a6: C_AREG, asmout: type_pmxvf32gerpp, ispfx: true, size: 8},
+ {as: APMXVF64GERPP, a1: C_VSREGP, a2: C_VSREG, a3: C_U4CON, a4: C_U2CON, a6: C_AREG, asmout: type_pmxvf64gerpp, ispfx: true, size: 8},
+ {as: APMXVI16GER2SPP, a1: C_VSREG, a2: C_VSREG, a3: C_U4CON, a4: C_U4CON, a5: C_U2CON, a6: C_AREG, asmout: type_pmxvi16ger2spp, ispfx: true, size: 8},
+ {as: APMXVI4GER8PP, a1: C_VSREG, a2: C_VSREG, a3: C_U4CON, a4: C_U4CON, a5: C_U8CON, a6: C_AREG, asmout: type_pmxvi4ger8pp, ispfx: true, size: 8},
+ {as: APMXVI8GER4SPP, a1: C_VSREG, a2: C_VSREG, a3: C_U4CON, a4: C_U4CON, a5: C_U4CON, a6: C_AREG, asmout: type_pmxvi8ger4spp, ispfx: true, size: 8},
+ {as: APNOP, asmout: type_pnop, ispfx: true, size: 8},
+ {as: APSTFS, a1: C_FREG, a3: C_U1CON, a6: C_LOREG, asmout: type_pstxssp, ispfx: true, size: 8},
+ {as: APSTQ, a1: C_REGP, a3: C_U1CON, a6: C_LOREG, asmout: type_pstxssp, ispfx: true, size: 8},
+ {as: APSTW, a1: C_REG, a3: C_U1CON, a6: C_LOREG, asmout: type_pstxssp, ispfx: true, size: 8},
+ {as: APSTXSSP, a1: C_VREG, a3: C_U1CON, a6: C_LOREG, asmout: type_pstxssp, ispfx: true, size: 8},
+ {as: APSTXV, a1: C_VSREG, a3: C_U1CON, a6: C_LOREG, asmout: type_pstxv, ispfx: true, size: 8},
+ {as: APSTXVP, a1: C_VSREGP, a3: C_U1CON, a6: C_LOREG, asmout: type_pstxvp, ispfx: true, size: 8},
+ {as: ASETNBCR, a1: C_CRBIT, a6: C_REG, asmout: type_setnbcr, size: 4},
+ {as: ASTXVP, a1: C_VSREGP, a6: C_SOREG, asmout: type_stxvp, size: 4},
+ {as: ASTXVPX, a1: C_VSREGP, a6: C_XOREG, asmout: type_stxvpx, size: 4},
+ {as: ASTXVRWX, a1: C_VSREG, a6: C_XOREG, asmout: type_stxvrwx, size: 4},
+ {as: AVCLRRB, a1: C_VREG, a2: C_REG, a6: C_VREG, asmout: type_xsmincqp, size: 4},
+ {as: AVCMPUQ, a1: C_VREG, a2: C_VREG, a6: C_CREG, asmout: type_vcmpuq, size: 4},
+ {as: AVCNTMBW, a1: C_VREG, a3: C_U1CON, a6: C_REG, asmout: type_vcntmbw, size: 4},
+ {as: AVEXTDUWVRX, a1: C_VREG, a2: C_VREG, a3: C_REG, a6: C_VREG, asmout: type_vmsumcud, size: 4},
+ {as: AVEXTRACTWM, a1: C_VREG, a6: C_REG, asmout: type_xscvuqqp, size: 4},
+ {as: AVGNB, a1: C_VREG, a3: C_U3CON, a6: C_REG, asmout: type_vgnb, size: 4},
+ {as: AVINSW, a1: C_REG, a3: C_U4CON, a6: C_VREG, asmout: type_vinsw, size: 4},
+ {as: AVINSWRX, a1: C_REG, a2: C_REG, a6: C_VREG, asmout: type_xsmincqp, size: 4},
+ {as: AVINSWVRX, a1: C_REG, a2: C_VREG, a6: C_VREG, asmout: type_xsmincqp, size: 4},
+ {as: AVMSUMCUD, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, asmout: type_vmsumcud, size: 4},
+ {as: AVSRDBI, a1: C_VREG, a2: C_VREG, a3: C_U3CON, a6: C_VREG, asmout: type_vsrdbi, size: 4},
+ {as: AXSCVUQQP, a1: C_VREG, a6: C_VREG, asmout: type_xscvuqqp, size: 4},
+ {as: AXSMINCQP, a1: C_VREG, a2: C_VREG, a6: C_VREG, asmout: type_xsmincqp, size: 4},
+ {as: AXVCVSPBF16, a1: C_VSREG, a6: C_VSREG, asmout: type_xvcvspbf16, size: 4},
+ {as: AXVI8GER4SPP, a1: C_VSREG, a2: C_VSREG, a6: C_AREG, asmout: type_xvi8ger4spp, size: 4},
+ {as: AXVTLSBB, a1: C_VSREG, a6: C_CREG, asmout: type_xvtlsbb, size: 4},
+ {as: AXXBLENDVW, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, asmout: type_xxblendvw, ispfx: true, size: 8},
+ {as: AXXEVAL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a4: C_U8CON, a6: C_VSREG, asmout: type_xxeval, ispfx: true, size: 8},
+ {as: AXXGENPCVWM, a1: C_VREG, a3: C_U5CON, a6: C_VSREG, asmout: type_xxgenpcvwm, size: 4},
+ {as: AXXPERMX, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a4: C_U3CON, a6: C_VSREG, asmout: type_xxpermx, ispfx: true, size: 8},
+ {as: AXXSETACCZ, a6: C_AREG, asmout: type_xxsetaccz, size: 4},
+ {as: AXXSPLTI32DX, a1: C_U1CON, a3: C_U32CON, a6: C_VSREG, asmout: type_xxsplti32dx, ispfx: true, size: 8},
+ {as: AXXSPLTIW, a1: C_U32CON, a6: C_VSREG, asmout: type_xxspltiw, ispfx: true, size: 8},
+}
+
+// brw RA,RS
+func type_brw(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.From.Reg&0x1f) << 21 // RS
+ out[0] = o0
+}
+
+// hashchkp RB,offset(RA)
+func type_hashchkp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 11 // RB
+ o0 |= uint32((p.From.Offset>>8)&0x1) << 0 // DX
+ o0 |= uint32((p.From.Offset>>3)&0x1f) << 21 // D
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // RA
+ if p.From.Offset&0xfffffe07 != 0xfffffe00 {
+ c.ctxt.Diag("Constant(%d) must within the range of [-512,-8] in steps of 8\n%v", p.From.Offset, p)
+ }
+ out[0] = o0
+}
+
+// hashstp RB,offset(RA)
+func type_hashstp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // RB
+ o0 |= uint32((p.To.Offset>>8)&0x1) << 0 // DX
+ o0 |= uint32((p.To.Offset>>3)&0x1f) << 21 // D
+ o0 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ if p.To.Offset&0xfffffe07 != 0xfffffe00 {
+ c.ctxt.Diag("Constant(%d) must within the range of [-512,-8] in steps of 8\n%v", p.To.Offset, p)
+ }
+ out[0] = o0
+}
+
+// lxvkq XT,UIM
+func type_lxvkq(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o0 |= uint32(p.From.Offset&0x1f) << 11 // UIM
+ out[0] = o0
+}
+
+// lxvp XTp,DQ(RA)
+func type_lxvp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.To.Reg>>5)&0x1) << 21 // TX
+ o0 |= uint32((p.To.Reg>>1)&0xf) << 22 // Tp
+ o0 |= uint32((p.From.Offset>>4)&0xfff) << 4 // DQ
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // RA
+ if p.From.Offset&0xf != 0 {
+ c.ctxt.Diag("Constant 0x%x (%d) is not a multiple of 16\n%v", p.From.Offset, p.From.Offset, p)
+ }
+ out[0] = o0
+}
+
+// lxvpx XTp,RA,RB
+func type_lxvpx(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.To.Reg>>5)&0x1) << 21 // TX
+ o0 |= uint32((p.To.Reg>>1)&0xf) << 22 // Tp
+ o0 |= uint32(p.From.Index&0x1f) << 16 // RA
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // RB
+ out[0] = o0
+}
+
+// lxvrwx XT,RA,RB
+func type_lxvrwx(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o0 |= uint32(p.From.Index&0x1f) << 16 // RA
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // RB
+ out[0] = o0
+}
+
+// mtvsrbmi VRT,bm
+func type_mtvsrbmi(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32((p.From.Offset>>6)&0x3ff) << 6 // b0
+ o0 |= uint32((p.From.Offset>>1)&0x1f) << 16 // b1
+ o0 |= uint32(p.From.Offset&0x1) << 0 // b2
+ out[0] = o0
+}
+
+// paddi RT,RA,SI,R
+func type_paddi(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // RT
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // RA
+ o0 |= uint32((p.RestArgs[0].Addr.Offset>>16)&0x3ffff) << 0 // si0
+ o1 |= uint32(p.RestArgs[0].Addr.Offset&0xffff) << 0 // si1
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// pextd RA,RS,RB
+func type_pextd(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.From.Reg&0x1f) << 21 // RS
+ o0 |= uint32(p.Reg&0x1f) << 11 // RB
+ out[0] = o0
+}
+
+// plxssp VRT,D(RA),R
+func type_plxssp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32((p.From.Offset>>16)&0x3ffff) << 0 // d0
+ o1 |= uint32(p.From.Offset&0xffff) << 0 // d1
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// plxv XT,D(RA),R
+func type_plxv(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 26 // TX
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o0 |= uint32((p.From.Offset>>16)&0x3ffff) << 0 // d0
+ o1 |= uint32(p.From.Offset&0xffff) << 0 // d1
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// plxvp XTp,D(RA),R
+func type_plxvp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 21 // TX
+ o1 |= uint32((p.To.Reg>>1)&0xf) << 22 // Tp
+ o0 |= uint32((p.From.Offset>>16)&0x3ffff) << 0 // d0
+ o1 |= uint32(p.From.Offset&0xffff) << 0 // d1
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// pmxvf32gerpp AT,XA,XB,XMSK,YMSK
+func type_pmxvf32gerpp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x7) << 23 // AT
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0xf) << 4 // XMSK
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0xf) << 0 // YMSK
+ out[1] = o1
+ out[0] = o0
+}
+
+// pmxvf64gerpp AT,XAp,XB,XMSK,YMSK
+func type_pmxvf64gerpp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x7) << 23 // AT
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // Ap
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0xf) << 4 // XMSK
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0x3) << 2 // YMSK
+ out[1] = o1
+ out[0] = o0
+}
+
+// pmxvi16ger2spp AT,XA,XB,XMSK,YMSK,PMSK
+func type_pmxvi16ger2spp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x7) << 23 // AT
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0xf) << 4 // XMSK
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0xf) << 0 // YMSK
+ o0 |= uint32(p.RestArgs[2].Addr.Offset&0x3) << 14 // PMSK
+ out[1] = o1
+ out[0] = o0
+}
+
+// pmxvi4ger8pp AT,XA,XB,XMSK,YMSK,PMSK
+func type_pmxvi4ger8pp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x7) << 23 // AT
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0xf) << 4 // XMSK
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0xf) << 0 // YMSK
+ o0 |= uint32(p.RestArgs[2].Addr.Offset&0xff) << 8 // PMSK
+ out[1] = o1
+ out[0] = o0
+}
+
+// pmxvi8ger4spp AT,XA,XB,XMSK,YMSK,PMSK
+func type_pmxvi8ger4spp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.To.Reg&0x7) << 23 // AT
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0xf) << 4 // XMSK
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0xf) << 0 // YMSK
+ o0 |= uint32(p.RestArgs[2].Addr.Offset&0xf) << 12 // PMSK
+ out[1] = o1
+ out[0] = o0
+}
+
+// pnop
+func type_pnop(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ out[1] = o1
+ out[0] = o0
+}
+
+// pstxssp VRS,D(RA),R
+func type_pstxssp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32(p.From.Reg&0x1f) << 21 // VRS
+ o0 |= uint32((p.To.Offset>>16)&0x3ffff) << 0 // d0
+ o1 |= uint32(p.To.Offset&0xffff) << 0 // d1
+ o1 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// pstxv XS,D(RA),R
+func type_pstxv(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 26 // SX
+ o1 |= uint32(p.From.Reg&0x1f) << 21 // S
+ o0 |= uint32((p.To.Offset>>16)&0x3ffff) << 0 // d0
+ o1 |= uint32(p.To.Offset&0xffff) << 0 // d1
+ o1 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// pstxvp XSp,D(RA),R
+func type_pstxvp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 21 // SX
+ o1 |= uint32((p.From.Reg>>1)&0xf) << 22 // Sp
+ o0 |= uint32((p.To.Offset>>16)&0x3ffff) << 0 // d0
+ o1 |= uint32(p.To.Offset&0xffff) << 0 // d1
+ o1 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 20 // R
+ out[1] = o1
+ out[0] = o0
+}
+
+// setnbcr RT,BI
+func type_setnbcr(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // RT
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // BI
+ out[0] = o0
+}
+
+// stxvp XSp,DQ(RA)
+func type_stxvp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.From.Reg>>5)&0x1) << 21 // SX
+ o0 |= uint32((p.From.Reg>>1)&0xf) << 22 // Sp
+ o0 |= uint32((p.To.Offset>>4)&0xfff) << 4 // DQ
+ o0 |= uint32(p.To.Reg&0x1f) << 16 // RA
+ if p.To.Offset&0xf != 0 {
+ c.ctxt.Diag("Constant 0x%x (%d) is not a multiple of 16\n%v", p.To.Offset, p.To.Offset, p)
+ }
+ out[0] = o0
+}
+
+// stxvpx XSp,RA,RB
+func type_stxvpx(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.From.Reg>>5)&0x1) << 21 // SX
+ o0 |= uint32((p.From.Reg>>1)&0xf) << 22 // Sp
+ o0 |= uint32(p.To.Index&0x1f) << 16 // RA
+ o0 |= uint32(p.To.Reg&0x1f) << 11 // RB
+ out[0] = o0
+}
+
+// stxvrwx XS,RA,RB
+func type_stxvrwx(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.From.Reg>>5)&0x1) << 0 // SX
+ o0 |= uint32(p.From.Reg&0x1f) << 21 // S
+ o0 |= uint32(p.To.Index&0x1f) << 16 // RA
+ o0 |= uint32(p.To.Reg&0x1f) << 11 // RB
+ out[0] = o0
+}
+
+// vcmpuq BF,VRA,VRB
+func type_vcmpuq(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x7) << 23 // BF
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // VRA
+ o0 |= uint32(p.Reg&0x1f) << 11 // VRB
+ out[0] = o0
+}
+
+// vcntmbw RT,VRB,MP
+func type_vcntmbw(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // RT
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // VRB
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1) << 16 // MP
+ out[0] = o0
+}
+
+// vgnb RT,VRB,N
+func type_vgnb(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // RT
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // VRB
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x7) << 16 // N
+ out[0] = o0
+}
+
+// vinsw VRT,RB,UIM
+func type_vinsw(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // RB
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0xf) << 16 // UIM
+ out[0] = o0
+}
+
+// vmsumcud VRT,VRA,VRB,VRC
+func type_vmsumcud(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // VRA
+ o0 |= uint32(p.Reg&0x1f) << 11 // VRB
+ o0 |= uint32(p.RestArgs[0].Addr.Reg&0x1f) << 6 // VRC
+ out[0] = o0
+}
+
+// vsrdbi VRT,VRA,VRB,SH
+func type_vsrdbi(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // VRA
+ o0 |= uint32(p.Reg&0x1f) << 11 // VRB
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x7) << 6 // SH
+ out[0] = o0
+}
+
+// xscvuqqp VRT,VRB
+func type_xscvuqqp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // VRB
+ out[0] = o0
+}
+
+// xsmincqp VRT,VRA,VRB
+func type_xsmincqp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // VRT
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // VRA
+ o0 |= uint32(p.Reg&0x1f) << 11 // VRB
+ out[0] = o0
+}
+
+// xvcvspbf16 XT,XB
+func type_xvcvspbf16(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o0 |= uint32((p.From.Reg>>5)&0x1) << 1 // BX
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // B
+ out[0] = o0
+}
+
+// xvi8ger4spp AT,XA,XB
+func type_xvi8ger4spp(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x7) << 23 // AT
+ o0 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o0 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o0 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o0 |= uint32(p.Reg&0x1f) << 11 // B
+ out[0] = o0
+}
+
+// xvtlsbb BF,XB
+func type_xvtlsbb(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x7) << 23 // BF
+ o0 |= uint32((p.From.Reg>>5)&0x1) << 1 // BX
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // B
+ out[0] = o0
+}
+
+// xxblendvw XT,XA,XB,XC
+func type_xxblendvw(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o1 |= uint32((p.RestArgs[0].Addr.Reg>>5)&0x1) << 3 // CX
+ o1 |= uint32(p.RestArgs[0].Addr.Reg&0x1f) << 6 // C
+ out[1] = o1
+ out[0] = o0
+}
+
+// xxeval XT,XA,XB,XC,IMM
+func type_xxeval(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o1 |= uint32((p.RestArgs[0].Addr.Reg>>5)&0x1) << 3 // CX
+ o1 |= uint32(p.RestArgs[0].Addr.Reg&0x1f) << 6 // C
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0xff) << 0 // IMM
+ out[1] = o1
+ out[0] = o0
+}
+
+// xxgenpcvwm XT,VRB,IMM
+func type_xxgenpcvwm(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o0 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o0 |= uint32(p.From.Reg&0x1f) << 11 // VRB
+ o0 |= uint32(p.RestArgs[0].Addr.Offset&0x1f) << 16 // IMM
+ out[0] = o0
+}
+
+// xxpermx XT,XA,XB,XC,UIM
+func type_xxpermx(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 0 // TX
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o1 |= uint32((p.From.Reg>>5)&0x1) << 2 // AX
+ o1 |= uint32(p.From.Reg&0x1f) << 16 // A
+ o1 |= uint32((p.Reg>>5)&0x1) << 1 // BX
+ o1 |= uint32(p.Reg&0x1f) << 11 // B
+ o1 |= uint32((p.RestArgs[0].Addr.Reg>>5)&0x1) << 3 // CX
+ o1 |= uint32(p.RestArgs[0].Addr.Reg&0x1f) << 6 // C
+ o0 |= uint32(p.RestArgs[1].Addr.Offset&0x7) << 0 // UIM
+ out[1] = o1
+ out[0] = o0
+}
+
+// xxsetaccz AT
+func type_xxsetaccz(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenOpcodes[p.As-AXXSETACCZ]
+ o0 |= uint32(p.To.Reg&0x7) << 23 // AT
+ out[0] = o0
+}
+
+// xxsplti32dx XT,IX,IMM32
+func type_xxsplti32dx(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 16 // TX
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o1 |= uint32(p.From.Offset&0x1) << 17 // IX
+ o0 |= uint32((p.RestArgs[0].Addr.Offset>>16)&0xffff) << 0 // imm0
+ o1 |= uint32(p.RestArgs[0].Addr.Offset&0xffff) << 0 // imm1
+ out[1] = o1
+ out[0] = o0
+}
+
+// xxspltiw XT,IMM32
+func type_xxspltiw(c *ctxt9, p *obj.Prog, t *Optab, out *[5]uint32) {
+ o0 := GenPfxOpcodes[p.As-AXXSPLTIW]
+ o1 := GenOpcodes[p.As-AXXSETACCZ]
+ o1 |= uint32((p.To.Reg>>5)&0x1) << 16 // TX
+ o1 |= uint32(p.To.Reg&0x1f) << 21 // T
+ o0 |= uint32((p.From.Offset>>16)&0xffff) << 0 // imm0
+ o1 |= uint32(p.From.Offset&0xffff) << 0 // imm1
+ out[1] = o1
+ out[0] = o0
+}
+
+func opsetGen(from obj.As) bool {
+ r0 := from & obj.AMask
+ switch from {
+ case ABRW:
+ opset(ABRH, r0)
+ opset(ABRD, r0)
+ case ADCFFIXQQ:
+ case ADCTFIXQQ:
+ case AHASHCHKP:
+ opset(AHASHCHK, r0)
+ case AHASHSTP:
+ opset(AHASHST, r0)
+ case ALXVKQ:
+ case ALXVP:
+ case ALXVPX:
+ case ALXVRWX:
+ opset(ALXVRHX, r0)
+ opset(ALXVRDX, r0)
+ opset(ALXVRBX, r0)
+ case AMTVSRBMI:
+ case AMTVSRWM:
+ opset(AMTVSRQM, r0)
+ opset(AMTVSRHM, r0)
+ opset(AMTVSRDM, r0)
+ opset(AMTVSRBM, r0)
+ case APADDI:
+ case APEXTD:
+ opset(APDEPD, r0)
+ opset(ACNTTZDM, r0)
+ opset(ACNTLZDM, r0)
+ opset(ACFUGED, r0)
+ case APLFS:
+ opset(APLFD, r0)
+ case APLQ:
+ case APLWZ:
+ opset(APLWA, r0)
+ opset(APLHZ, r0)
+ opset(APLHA, r0)
+ opset(APLD, r0)
+ opset(APLBZ, r0)
+ case APLXSSP:
+ opset(APLXSD, r0)
+ case APLXV:
+ case APLXVP:
+ case APMXVF32GERPP:
+ opset(APMXVF32GERPN, r0)
+ opset(APMXVF32GERNP, r0)
+ opset(APMXVF32GERNN, r0)
+ opset(APMXVF32GER, r0)
+ case APMXVF64GERPP:
+ opset(APMXVF64GERPN, r0)
+ opset(APMXVF64GERNP, r0)
+ opset(APMXVF64GERNN, r0)
+ opset(APMXVF64GER, r0)
+ case APMXVI16GER2SPP:
+ opset(APMXVI16GER2S, r0)
+ opset(APMXVI16GER2PP, r0)
+ opset(APMXVI16GER2, r0)
+ opset(APMXVF16GER2PP, r0)
+ opset(APMXVF16GER2PN, r0)
+ opset(APMXVF16GER2NP, r0)
+ opset(APMXVF16GER2NN, r0)
+ opset(APMXVF16GER2, r0)
+ opset(APMXVBF16GER2PP, r0)
+ opset(APMXVBF16GER2PN, r0)
+ opset(APMXVBF16GER2NP, r0)
+ opset(APMXVBF16GER2NN, r0)
+ opset(APMXVBF16GER2, r0)
+ case APMXVI4GER8PP:
+ opset(APMXVI4GER8, r0)
+ case APMXVI8GER4SPP:
+ opset(APMXVI8GER4PP, r0)
+ opset(APMXVI8GER4, r0)
+ case APNOP:
+ case APSTFS:
+ opset(APSTFD, r0)
+ case APSTQ:
+ case APSTW:
+ opset(APSTH, r0)
+ opset(APSTD, r0)
+ opset(APSTB, r0)
+ case APSTXSSP:
+ opset(APSTXSD, r0)
+ case APSTXV:
+ case APSTXVP:
+ case ASETNBCR:
+ opset(ASETNBC, r0)
+ opset(ASETBCR, r0)
+ opset(ASETBC, r0)
+ case ASTXVP:
+ case ASTXVPX:
+ case ASTXVRWX:
+ opset(ASTXVRHX, r0)
+ opset(ASTXVRDX, r0)
+ opset(ASTXVRBX, r0)
+ case AVCLRRB:
+ opset(AVCLRLB, r0)
+ case AVCMPUQ:
+ opset(AVCMPSQ, r0)
+ case AVCNTMBW:
+ opset(AVCNTMBH, r0)
+ opset(AVCNTMBD, r0)
+ opset(AVCNTMBB, r0)
+ case AVEXTDUWVRX:
+ opset(AVEXTDUWVLX, r0)
+ opset(AVEXTDUHVRX, r0)
+ opset(AVEXTDUHVLX, r0)
+ opset(AVEXTDUBVRX, r0)
+ opset(AVEXTDUBVLX, r0)
+ opset(AVEXTDDVRX, r0)
+ opset(AVEXTDDVLX, r0)
+ case AVEXTRACTWM:
+ opset(AVEXTRACTQM, r0)
+ opset(AVEXTRACTHM, r0)
+ opset(AVEXTRACTDM, r0)
+ opset(AVEXTRACTBM, r0)
+ case AVGNB:
+ case AVINSW:
+ opset(AVINSD, r0)
+ case AVINSWRX:
+ opset(AVINSWLX, r0)
+ opset(AVINSHRX, r0)
+ opset(AVINSHLX, r0)
+ opset(AVINSDRX, r0)
+ opset(AVINSDLX, r0)
+ opset(AVINSBRX, r0)
+ opset(AVINSBLX, r0)
+ case AVINSWVRX:
+ opset(AVINSWVLX, r0)
+ opset(AVINSHVRX, r0)
+ opset(AVINSHVLX, r0)
+ opset(AVINSBVRX, r0)
+ opset(AVINSBVLX, r0)
+ case AVMSUMCUD:
+ case AVSRDBI:
+ opset(AVSLDBI, r0)
+ case AXSCVUQQP:
+ opset(AXSCVSQQP, r0)
+ opset(AXSCVQPUQZ, r0)
+ opset(AXSCVQPSQZ, r0)
+ opset(AVSTRIHRCC, r0)
+ opset(AVSTRIHR, r0)
+ opset(AVSTRIHLCC, r0)
+ opset(AVSTRIHL, r0)
+ opset(AVSTRIBRCC, r0)
+ opset(AVSTRIBR, r0)
+ opset(AVSTRIBLCC, r0)
+ opset(AVSTRIBL, r0)
+ opset(AVEXTSD2Q, r0)
+ opset(AVEXPANDWM, r0)
+ opset(AVEXPANDQM, r0)
+ opset(AVEXPANDHM, r0)
+ opset(AVEXPANDDM, r0)
+ opset(AVEXPANDBM, r0)
+ case AXSMINCQP:
+ opset(AXSMAXCQP, r0)
+ opset(AXSCMPGTQP, r0)
+ opset(AXSCMPGEQP, r0)
+ opset(AXSCMPEQQP, r0)
+ opset(AVSRQ, r0)
+ opset(AVSRAQ, r0)
+ opset(AVSLQ, r0)
+ opset(AVRLQNM, r0)
+ opset(AVRLQMI, r0)
+ opset(AVRLQ, r0)
+ opset(AVPEXTD, r0)
+ opset(AVPDEPD, r0)
+ opset(AVMULOUD, r0)
+ opset(AVMULOSD, r0)
+ opset(AVMULLD, r0)
+ opset(AVMULHUW, r0)
+ opset(AVMULHUD, r0)
+ opset(AVMULHSW, r0)
+ opset(AVMULHSD, r0)
+ opset(AVMULEUD, r0)
+ opset(AVMULESD, r0)
+ opset(AVMODUW, r0)
+ opset(AVMODUQ, r0)
+ opset(AVMODUD, r0)
+ opset(AVMODSW, r0)
+ opset(AVMODSQ, r0)
+ opset(AVMODSD, r0)
+ opset(AVDIVUW, r0)
+ opset(AVDIVUQ, r0)
+ opset(AVDIVUD, r0)
+ opset(AVDIVSW, r0)
+ opset(AVDIVSQ, r0)
+ opset(AVDIVSD, r0)
+ opset(AVDIVEUW, r0)
+ opset(AVDIVEUQ, r0)
+ opset(AVDIVEUD, r0)
+ opset(AVDIVESW, r0)
+ opset(AVDIVESQ, r0)
+ opset(AVDIVESD, r0)
+ opset(AVCTZDM, r0)
+ opset(AVCMPGTUQCC, r0)
+ opset(AVCMPGTUQ, r0)
+ opset(AVCMPGTSQCC, r0)
+ opset(AVCMPGTSQ, r0)
+ opset(AVCMPEQUQCC, r0)
+ opset(AVCMPEQUQ, r0)
+ opset(AVCLZDM, r0)
+ opset(AVCFUGED, r0)
+ case AXVCVSPBF16:
+ opset(AXVCVBF16SPN, r0)
+ case AXVI8GER4SPP:
+ opset(AXVI8GER4PP, r0)
+ opset(AXVI8GER4, r0)
+ opset(AXVI4GER8PP, r0)
+ opset(AXVI4GER8, r0)
+ opset(AXVI16GER2SPP, r0)
+ opset(AXVI16GER2S, r0)
+ opset(AXVI16GER2PP, r0)
+ opset(AXVI16GER2, r0)
+ opset(AXVF64GERPP, r0)
+ opset(AXVF64GERPN, r0)
+ opset(AXVF64GERNP, r0)
+ opset(AXVF64GERNN, r0)
+ opset(AXVF64GER, r0)
+ opset(AXVF32GERPP, r0)
+ opset(AXVF32GERPN, r0)
+ opset(AXVF32GERNP, r0)
+ opset(AXVF32GERNN, r0)
+ opset(AXVF32GER, r0)
+ opset(AXVF16GER2PP, r0)
+ opset(AXVF16GER2PN, r0)
+ opset(AXVF16GER2NP, r0)
+ opset(AXVF16GER2NN, r0)
+ opset(AXVF16GER2, r0)
+ opset(AXVBF16GER2PP, r0)
+ opset(AXVBF16GER2PN, r0)
+ opset(AXVBF16GER2NP, r0)
+ opset(AXVBF16GER2NN, r0)
+ opset(AXVBF16GER2, r0)
+ case AXVTLSBB:
+ case AXXBLENDVW:
+ opset(AXXBLENDVH, r0)
+ opset(AXXBLENDVD, r0)
+ opset(AXXBLENDVB, r0)
+ case AXXEVAL:
+ case AXXGENPCVWM:
+ opset(AXXGENPCVHM, r0)
+ opset(AXXGENPCVDM, r0)
+ opset(AXXGENPCVBM, r0)
+ case AXXPERMX:
+ case AXXSETACCZ:
+ opset(AXXMTACC, r0)
+ opset(AXXMFACC, r0)
+ case AXXSPLTI32DX:
+ case AXXSPLTIW:
+ opset(AXXSPLTIDP, r0)
+ default:
+ return false
+ }
+ return true
+}
diff --git a/src/cmd/internal/obj/ppc64/asm_test.go b/src/cmd/internal/obj/ppc64/asm_test.go
new file mode 100644
index 0000000..b8995dc
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/asm_test.go
@@ -0,0 +1,555 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "bytes"
+ "fmt"
+ "internal/testenv"
+ "math"
+ "os"
+ "path/filepath"
+ "regexp"
+ "strings"
+ "testing"
+
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+)
+
+var platformEnvs = [][]string{
+ {"GOOS=aix", "GOARCH=ppc64"},
+ {"GOOS=linux", "GOARCH=ppc64"},
+ {"GOOS=linux", "GOARCH=ppc64le"},
+}
+
+const invalidPCAlignSrc = `
+TEXT test(SB),0,$0-0
+ADD $2, R3
+PCALIGN $128
+RET
+`
+
+const validPCAlignSrc = `
+TEXT test(SB),0,$0-0
+ADD $2, R3
+PCALIGN $16
+MOVD $8, R16
+ADD $8, R4
+PCALIGN $32
+ADD $8, R3
+PCALIGN $8
+ADD $4, R8
+RET
+`
+
+const x64pgm = `
+TEXT test(SB),0,$0-0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+`
+const x32pgm = `
+TEXT test(SB),0,$0-0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+`
+
+const x16pgm = `
+TEXT test(SB),0,$0-0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+`
+
+const x0pgm = `
+TEXT test(SB),0,$0-0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+`
+const x64pgmA64 = `
+TEXT test(SB),0,$0-0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+`
+
+const x64pgmA32 = `
+TEXT test(SB),0,$0-0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+OR R0, R0
+PNOP
+`
+
+// Test that nops are inserted when crossing 64B boundaries, and
+// alignment is adjusted to avoid crossing.
+func TestPfxAlign(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := os.MkdirTemp("", "testpfxalign")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ pgms := []struct {
+ text []byte
+ align string
+ hasNop bool
+ }{
+ {[]byte(x0pgm), "align=0x0", false}, // No alignment or nop adjustments needed
+ {[]byte(x16pgm), "align=0x20", false}, // Increased alignment needed
+ {[]byte(x32pgm), "align=0x40", false}, // Worst case alignment needed
+ {[]byte(x64pgm), "align=0x0", true}, // 0 aligned is default (16B) alignment
+ {[]byte(x64pgmA64), "align=0x40", true}, // extra alignment + nop
+ {[]byte(x64pgmA32), "align=0x20", true}, // extra alignment + nop
+ }
+
+ for _, pgm := range pgms {
+ tmpfile := filepath.Join(dir, "x.s")
+ err = os.WriteFile(tmpfile, pgm.text, 0644)
+ if err != nil {
+ t.Fatalf("can't write output: %v\n", err)
+ }
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-S", "-o", filepath.Join(dir, "test.o"), tmpfile)
+ cmd.Env = append(os.Environ(), "GOOS=linux", "GOARCH=ppc64le")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("Failed to compile %v: %v\n", pgm, err)
+ }
+ if !strings.Contains(string(out), pgm.align) {
+ t.Errorf(fmt.Sprintf("Fatal, misaligned text with prefixed instructions:\n%s\n", string(out)))
+ }
+ hasNop := strings.Contains(string(out), "00 00 00 60")
+ if hasNop != pgm.hasNop {
+ t.Errorf(fmt.Sprintf("Fatal, prefixed instruction is missing nop padding:\n%s\n", string(out)))
+ }
+ }
+}
+
+// TestLarge generates a very large file to verify that large
+// program builds successfully, and branches which exceed the
+// range of BC are rewritten to reach.
+func TestLarge(t *testing.T) {
+ if testing.Short() {
+ t.Skip("Skip in short mode")
+ }
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := os.MkdirTemp("", "testlarge")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // A few interesting test cases for long conditional branch fixups
+ tests := []struct {
+ jmpinsn string
+ backpattern []string
+ fwdpattern []string
+ }{
+ // Test the interesting cases of conditional branch rewrites for too-far targets. Simple conditional
+ // branches can be made to reach with one JMP insertion, compound conditionals require two.
+ //
+ // beq <-> bne conversion (insert one jump)
+ {"BEQ",
+ []string{``,
+ `0x20030 131120\s\(.*\)\tBC\t\$4,\sCR0EQ,\s131128`,
+ `0x20034 131124\s\(.*\)\tJMP\t0`},
+ []string{``,
+ `0x0000 00000\s\(.*\)\tBC\t\$4,\sCR0EQ,\s8`,
+ `0x0004 00004\s\(.*\)\tJMP\t131128`},
+ },
+ {"BNE",
+ []string{``,
+ `0x20030 131120\s\(.*\)\tBC\t\$12,\sCR0EQ,\s131128`,
+ `0x20034 131124\s\(.*\)\tJMP\t0`},
+ []string{``,
+ `0x0000 00000\s\(.*\)\tBC\t\$12,\sCR0EQ,\s8`,
+ `0x0004 00004\s\(.*\)\tJMP\t131128`}},
+ // bdnz (BC 16,0,tgt) <-> bdz (BC 18,0,+4) conversion (insert one jump)
+ {"BC 16,0,",
+ []string{``,
+ `0x20030 131120\s\(.*\)\tBC\t\$18,\sCR0LT,\s131128`,
+ `0x20034 131124\s\(.*\)\tJMP\t0`},
+ []string{``,
+ `0x0000 00000\s\(.*\)\tBC\t\$18,\sCR0LT,\s8`,
+ `0x0004 00004\s\(.*\)\tJMP\t131128`}},
+ {"BC 18,0,",
+ []string{``,
+ `0x20030 131120\s\(.*\)\tBC\t\$16,\sCR0LT,\s131128`,
+ `0x20034 131124\s\(.*\)\tJMP\t0`},
+ []string{``,
+ `0x0000 00000\s\(.*\)\tBC\t\$16,\sCR0LT,\s8`,
+ `0x0004 00004\s\(.*\)\tJMP\t131128`}},
+ // bdnzt (BC 8,0,tgt) <-> bdnzt (BC 8,0,+4) conversion (insert two jumps)
+ {"BC 8,0,",
+ []string{``,
+ `0x20034 131124\s\(.*\)\tBC\t\$8,\sCR0LT,\s131132`,
+ `0x20038 131128\s\(.*\)\tJMP\t131136`,
+ `0x2003c 131132\s\(.*\)\tJMP\t0\n`},
+ []string{``,
+ `0x0000 00000\s\(.*\)\tBC\t\$8,\sCR0LT,\s8`,
+ `0x0004 00004\s\(.*\)\tJMP\t12`,
+ `0x0008 00008\s\(.*\)\tJMP\t131136\n`}},
+ }
+
+ for _, test := range tests {
+ // generate a very large function
+ buf := bytes.NewBuffer(make([]byte, 0, 7000000))
+ gen(buf, test.jmpinsn)
+
+ tmpfile := filepath.Join(dir, "x.s")
+ err = os.WriteFile(tmpfile, buf.Bytes(), 0644)
+ if err != nil {
+ t.Fatalf("can't write output: %v\n", err)
+ }
+
+ // Test on all supported ppc64 platforms
+ for _, platenv := range platformEnvs {
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-S", "-o", filepath.Join(dir, "test.o"), tmpfile)
+ cmd.Env = append(os.Environ(), platenv...)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("Assemble failed (%v): %v, output: %s", platenv, err, out)
+ }
+ matched, err := regexp.MatchString(strings.Join(test.fwdpattern, "\n\t*"), string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("Failed to detect long forward BC fixup in (%v):%s\n", platenv, out)
+ }
+ matched, err = regexp.MatchString(strings.Join(test.backpattern, "\n\t*"), string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("Failed to detect long backward BC fixup in (%v):%s\n", platenv, out)
+ }
+ }
+ }
+}
+
+// gen generates a very large program with a very long forward and backwards conditional branch.
+func gen(buf *bytes.Buffer, jmpinsn string) {
+ fmt.Fprintln(buf, "TEXT f(SB),0,$0-0")
+ fmt.Fprintln(buf, "label_start:")
+ fmt.Fprintln(buf, jmpinsn, "label_end")
+ for i := 0; i < (1<<15 + 10); i++ {
+ fmt.Fprintln(buf, "MOVD R0, R1")
+ }
+ fmt.Fprintln(buf, jmpinsn, "label_start")
+ fmt.Fprintln(buf, "label_end:")
+ fmt.Fprintln(buf, "MOVD R0, R1")
+ fmt.Fprintln(buf, "RET")
+}
+
+// TestPCalign generates two asm files containing the
+// PCALIGN directive, to verify correct values are and
+// accepted, and incorrect values are flagged in error.
+func TestPCalign(t *testing.T) {
+ var pattern8 = `0x...8\s.*ADD\s..,\sR8`
+ var pattern16 = `0x...[80]\s.*MOVD\s..,\sR16`
+ var pattern32 = `0x...0\s.*ADD\s..,\sR3`
+
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := os.MkdirTemp("", "testpcalign")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // generate a test with valid uses of PCALIGN
+
+ tmpfile := filepath.Join(dir, "x.s")
+ err = os.WriteFile(tmpfile, []byte(validPCAlignSrc), 0644)
+ if err != nil {
+ t.Fatalf("can't write output: %v\n", err)
+ }
+
+ // build generated file without errors and assemble it
+ cmd := testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "x.o"), "-S", tmpfile)
+ cmd.Env = append(os.Environ(), "GOARCH=ppc64le", "GOOS=linux")
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Errorf("Build failed: %v, output: %s", err, out)
+ }
+
+ matched, err := regexp.MatchString(pattern8, string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("The 8 byte alignment is not correct: %t, output:%s\n", matched, out)
+ }
+
+ matched, err = regexp.MatchString(pattern16, string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("The 16 byte alignment is not correct: %t, output:%s\n", matched, out)
+ }
+
+ matched, err = regexp.MatchString(pattern32, string(out))
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !matched {
+ t.Errorf("The 32 byte alignment is not correct: %t, output:%s\n", matched, out)
+ }
+
+ // generate a test with invalid use of PCALIGN
+
+ tmpfile = filepath.Join(dir, "xi.s")
+ err = os.WriteFile(tmpfile, []byte(invalidPCAlignSrc), 0644)
+ if err != nil {
+ t.Fatalf("can't write output: %v\n", err)
+ }
+
+ // build test with errors and check for messages
+ cmd = testenv.Command(t, testenv.GoToolPath(t), "tool", "asm", "-o", filepath.Join(dir, "xi.o"), "-S", tmpfile)
+ cmd.Env = append(os.Environ(), "GOARCH=ppc64le", "GOOS=linux")
+ out, err = cmd.CombinedOutput()
+ if !strings.Contains(string(out), "Unexpected alignment") {
+ t.Errorf("Invalid alignment not detected for PCALIGN\n")
+ }
+}
+
+// Verify register constants are correctly aligned. Much of the ppc64 assembler assumes masking out significant
+// bits will produce a valid register number:
+// REG_Rx & 31 == x
+// REG_Fx & 31 == x
+// REG_Vx & 31 == x
+// REG_VSx & 63 == x
+// REG_SPRx & 1023 == x
+// REG_CRx & 7 == x
+//
+// VR and FPR disjointly overlap VSR, interpreting as VSR registers should produce the correctly overlapped VSR.
+// REG_FPx & 63 == x
+// REG_Vx & 63 == x + 32
+func TestRegValueAlignment(t *testing.T) {
+ tstFunc := func(rstart, rend, msk, rout int) {
+ for i := rstart; i <= rend; i++ {
+ if i&msk != rout {
+ t.Errorf("%v is not aligned to 0x%X (expected %d, got %d)\n", rconv(i), msk, rout, rstart&msk)
+ }
+ rout++
+ }
+ }
+ var testType = []struct {
+ rstart int
+ rend int
+ msk int
+ rout int
+ }{
+ {REG_VS0, REG_VS63, 63, 0},
+ {REG_R0, REG_R31, 31, 0},
+ {REG_F0, REG_F31, 31, 0},
+ {REG_V0, REG_V31, 31, 0},
+ {REG_V0, REG_V31, 63, 32},
+ {REG_F0, REG_F31, 63, 0},
+ {REG_SPR0, REG_SPR0 + 1023, 1023, 0},
+ {REG_CR0, REG_CR7, 7, 0},
+ {REG_CR0LT, REG_CR7SO, 31, 0},
+ }
+ for _, t := range testType {
+ tstFunc(t.rstart, t.rend, t.msk, t.rout)
+ }
+}
+
+// Verify interesting obj.Addr arguments are classified correctly.
+func TestAddrClassifier(t *testing.T) {
+ type cmplx struct {
+ pic int
+ pic_dyn int
+ dyn int
+ nonpic int
+ }
+ tsts := [...]struct {
+ arg obj.Addr
+ output interface{}
+ }{
+ // Supported register type args
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_R1}, C_REG},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_R2}, C_REGP},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_F1}, C_FREG},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_F2}, C_FREGP},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_V2}, C_VREG},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_VS1}, C_VSREG},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_VS2}, C_VSREGP},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_CR}, C_CREG},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_CR1}, C_CREG},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_CR1SO}, C_CRBIT},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0}, C_SPR},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0 + 1}, C_XER},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0 + 8}, C_LR},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_SPR0 + 9}, C_CTR},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_FPSCR}, C_FPSCR},
+ {obj.Addr{Type: obj.TYPE_REG, Reg: REG_A1}, C_AREG},
+
+ // Memory type arguments.
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_GOTREF}, C_ADDR},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_TOCREF}, C_ADDR},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: &obj.LSym{Type: objabi.STLSBSS}}, cmplx{C_TLS_IE, C_TLS_IE, C_TLS_LE, C_TLS_LE}},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: &obj.LSym{Type: objabi.SDATA}}, C_ADDR},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_AUTO}, C_SOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_AUTO, Offset: BIG}, C_LOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_AUTO, Offset: -BIG - 1}, C_LOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_PARAM}, C_SOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_PARAM, Offset: BIG}, C_LOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_PARAM, Offset: -BIG - 33}, C_LOREG}, // 33 is FixedFrameSize-1
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_NONE}, C_ZOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_NONE, Index: REG_R4}, C_XOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_NONE, Offset: 1}, C_SOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_NONE, Offset: BIG}, C_LOREG},
+ {obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_NONE, Offset: -BIG - 33}, C_LOREG},
+
+ // Misc (golang initializes -0.0 to 0.0, hence the obfuscation below)
+ {obj.Addr{Type: obj.TYPE_TEXTSIZE}, C_TEXTSIZE},
+ {obj.Addr{Type: obj.TYPE_FCONST, Val: 0.0}, C_ZCON},
+ {obj.Addr{Type: obj.TYPE_FCONST, Val: math.Float64frombits(0x8000000000000000)}, C_S16CON},
+
+ // Address type arguments
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_NONE, Offset: 1}, C_SACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_NONE, Offset: BIG}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_NONE, Offset: -BIG - 1}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_NONE, Offset: 1 << 32}, C_DACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: &obj.LSym{Type: objabi.SDATA}}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_STATIC, Sym: &obj.LSym{Type: objabi.SDATA}}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_AUTO, Offset: 1}, C_SACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_AUTO, Offset: BIG}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_AUTO, Offset: -BIG - 1}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_PARAM, Offset: 1}, C_SACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_PARAM, Offset: BIG}, C_LACON},
+ {obj.Addr{Type: obj.TYPE_ADDR, Reg: REG_R0, Name: obj.NAME_PARAM, Offset: -BIG - 33}, C_LACON}, // 33 is FixedFrameSize-1
+
+ // Constant type arguments
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 0}, C_ZCON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1}, C_U1CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 2}, C_U2CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 4}, C_U3CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 8}, C_U4CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 16}, C_U5CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 32}, C_U8CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 14}, C_U15CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 15}, C_U16CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 16}, C_U3216CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 + 1<<16}, C_U32CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 32}, C_S34CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: 1 << 33}, C_64CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -1}, C_S16CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -0x10000}, C_S3216CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -0x10001}, C_S32CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -(1 << 33)}, C_S34CON},
+ {obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: -(1 << 34)}, C_64CON},
+
+ // Branch like arguments
+ {obj.Addr{Type: obj.TYPE_BRANCH, Sym: &obj.LSym{Type: objabi.SDATA}}, cmplx{C_SBRA, C_LBRAPIC, C_LBRAPIC, C_SBRA}},
+ {obj.Addr{Type: obj.TYPE_BRANCH}, C_SBRA},
+ }
+
+ pic_ctxt9 := ctxt9{ctxt: &obj.Link{Flag_shared: true, Arch: &Linkppc64}, autosize: 0}
+ pic_dyn_ctxt9 := ctxt9{ctxt: &obj.Link{Flag_shared: true, Flag_dynlink: true, Arch: &Linkppc64}, autosize: 0}
+ dyn_ctxt9 := ctxt9{ctxt: &obj.Link{Flag_dynlink: true, Arch: &Linkppc64}, autosize: 0}
+ nonpic_ctxt9 := ctxt9{ctxt: &obj.Link{Arch: &Linkppc64}, autosize: 0}
+ ctxts := [...]*ctxt9{&pic_ctxt9, &pic_dyn_ctxt9, &dyn_ctxt9, &nonpic_ctxt9}
+ name := [...]string{"pic", "pic_dyn", "dyn", "nonpic"}
+ for _, tst := range tsts {
+ var expect []int
+ switch tst.output.(type) {
+ case cmplx:
+ v := tst.output.(cmplx)
+ expect = []int{v.pic, v.pic_dyn, v.dyn, v.nonpic}
+ case int:
+ expect = []int{tst.output.(int), tst.output.(int), tst.output.(int), tst.output.(int)}
+ }
+ for i := range ctxts {
+ if output := ctxts[i].aclass(&tst.arg); output != expect[i] {
+ t.Errorf("%s.aclass(%v) = %v, expected %v\n", name[i], tst.arg, DRconv(output), DRconv(expect[i]))
+ }
+ }
+ }
+}
diff --git a/src/cmd/internal/obj/ppc64/doc.go b/src/cmd/internal/obj/ppc64/doc.go
new file mode 100644
index 0000000..da10ea3
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/doc.go
@@ -0,0 +1,290 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Package ppc64 implements a PPC64 assembler that assembles Go asm into
+the corresponding PPC64 instructions as defined by the Power ISA 3.0B.
+
+This document provides information on how to write code in Go assembler
+for PPC64, focusing on the differences between Go and PPC64 assembly language.
+It assumes some knowledge of PPC64 assembler. The original implementation of
+PPC64 in Go defined many opcodes that are different from PPC64 opcodes, but
+updates to the Go assembly language used mnemonics that are mostly similar if not
+identical to the PPC64 mneumonics, such as VMX and VSX instructions. Not all detail
+is included here; refer to the Power ISA document if interested in more detail.
+
+Starting with Go 1.15 the Go objdump supports the -gnu option, which provides a
+side by side view of the Go assembler and the PPC64 assembler output. This is
+extremely helpful in determining what final PPC64 assembly is generated from the
+corresponding Go assembly.
+
+In the examples below, the Go assembly is on the left, PPC64 assembly on the right.
+
+1. Operand ordering
+
+In Go asm, the last operand (right) is the target operand, but with PPC64 asm,
+the first operand (left) is the target. The order of the remaining operands is
+not consistent: in general opcodes with 3 operands that perform math or logical
+operations have their operands in reverse order. Opcodes for vector instructions
+and those with more than 3 operands usually have operands in the same order except
+for the target operand, which is first in PPC64 asm and last in Go asm.
+
+Example:
+
+ ADD R3, R4, R5 <=> add r5, r4, r3
+
+2. Constant operands
+
+In Go asm, an operand that starts with '$' indicates a constant value. If the
+instruction using the constant has an immediate version of the opcode, then an
+immediate value is used with the opcode if possible.
+
+Example:
+
+ ADD $1, R3, R4 <=> addi r4, r3, 1
+
+3. Opcodes setting condition codes
+
+In PPC64 asm, some instructions other than compares have variations that can set
+the condition code where meaningful. This is indicated by adding '.' to the end
+of the PPC64 instruction. In Go asm, these instructions have 'CC' at the end of
+the opcode. The possible settings of the condition code depend on the instruction.
+CR0 is the default for fixed-point instructions; CR1 for floating point; CR6 for
+vector instructions.
+
+Example:
+
+ ANDCC R3, R4, R5 <=> and. r5, r3, r4 (set CR0)
+
+4. Loads and stores from memory
+
+In Go asm, opcodes starting with 'MOV' indicate a load or store. When the target
+is a memory reference, then it is a store; when the target is a register and the
+source is a memory reference, then it is a load.
+
+MOV{B,H,W,D} variations identify the size as byte, halfword, word, doubleword.
+
+Adding 'Z' to the opcode for a load indicates zero extend; if omitted it is sign extend.
+Adding 'U' to a load or store indicates an update of the base register with the offset.
+Adding 'BR' to an opcode indicates byte-reversed load or store, or the order opposite
+of the expected endian order. If 'BR' is used then zero extend is assumed.
+
+Memory references n(Ra) indicate the address in Ra + n. When used with an update form
+of an opcode, the value in Ra is incremented by n.
+
+Memory references (Ra+Rb) or (Ra)(Rb) indicate the address Ra + Rb, used by indexed
+loads or stores. Both forms are accepted. When used with an update then the base register
+is updated by the value in the index register.
+
+Examples:
+
+ MOVD (R3), R4 <=> ld r4,0(r3)
+ MOVW (R3), R4 <=> lwa r4,0(r3)
+ MOVWZU 4(R3), R4 <=> lwzu r4,4(r3)
+ MOVWZ (R3+R5), R4 <=> lwzx r4,r3,r5
+ MOVHZ (R3), R4 <=> lhz r4,0(r3)
+ MOVHU 2(R3), R4 <=> lhau r4,2(r3)
+ MOVBZ (R3), R4 <=> lbz r4,0(r3)
+
+ MOVD R4,(R3) <=> std r4,0(r3)
+ MOVW R4,(R3) <=> stw r4,0(r3)
+ MOVW R4,(R3+R5) <=> stwx r4,r3,r5
+ MOVWU R4,4(R3) <=> stwu r4,4(r3)
+ MOVH R4,2(R3) <=> sth r4,2(r3)
+ MOVBU R4,(R3)(R5) <=> stbux r4,r3,r5
+
+4. Compares
+
+When an instruction does a compare or other operation that might
+result in a condition code, then the resulting condition is set
+in a field of the condition register. The condition register consists
+of 8 4-bit fields named CR0 - CR7. When a compare instruction
+identifies a CR then the resulting condition is set in that field
+to be read by a later branch or isel instruction. Within these fields,
+bits are set to indicate less than, greater than, or equal conditions.
+
+Once an instruction sets a condition, then a subsequent branch, isel or
+other instruction can read the condition field and operate based on the
+bit settings.
+
+Examples:
+
+ CMP R3, R4 <=> cmp r3, r4 (CR0 assumed)
+ CMP R3, R4, CR1 <=> cmp cr1, r3, r4
+
+Note that the condition register is the target operand of compare opcodes, so
+the remaining operands are in the same order for Go asm and PPC64 asm.
+When CR0 is used then it is implicit and does not need to be specified.
+
+5. Branches
+
+Many branches are represented as a form of the BC instruction. There are
+other extended opcodes to make it easier to see what type of branch is being
+used.
+
+The following is a brief description of the BC instruction and its commonly
+used operands.
+
+BC op1, op2, op3
+
+ op1: type of branch
+ 16 -> bctr (branch on ctr)
+ 12 -> bcr (branch if cr bit is set)
+ 8 -> bcr+bctr (branch on ctr and cr values)
+ 4 -> bcr != 0 (branch if specified cr bit is not set)
+
+ There are more combinations but these are the most common.
+
+ op2: condition register field and condition bit
+
+ This contains an immediate value indicating which condition field
+ to read and what bits to test. Each field is 4 bits long with CR0
+ at bit 0, CR1 at bit 4, etc. The value is computed as 4*CR+condition
+ with these condition values:
+
+ 0 -> LT
+ 1 -> GT
+ 2 -> EQ
+ 3 -> OVG
+
+ Thus 0 means test CR0 for LT, 5 means CR1 for GT, 30 means CR7 for EQ.
+
+ op3: branch target
+
+Examples:
+
+ BC 12, 0, target <=> blt cr0, target
+ BC 12, 2, target <=> beq cr0, target
+ BC 12, 5, target <=> bgt cr1, target
+ BC 12, 30, target <=> beq cr7, target
+ BC 4, 6, target <=> bne cr1, target
+ BC 4, 1, target <=> ble cr1, target
+
+ The following extended opcodes are available for ease of use and readability:
+
+ BNE CR2, target <=> bne cr2, target
+ BEQ CR4, target <=> beq cr4, target
+ BLT target <=> blt target (cr0 default)
+ BGE CR7, target <=> bge cr7, target
+
+Refer to the ISA for more information on additional values for the BC instruction,
+how to handle OVG information, and much more.
+
+5. Align directive
+
+Starting with Go 1.12, Go asm supports the PCALIGN directive, which indicates
+that the next instruction should be aligned to the specified value. Currently
+8 and 16 are the only supported values, and a maximum of 2 NOPs will be added
+to align the code. That means in the case where the code is aligned to 4 but
+PCALIGN $16 is at that location, the code will only be aligned to 8 to avoid
+adding 3 NOPs.
+
+The purpose of this directive is to improve performance for cases like loops
+where better alignment (8 or 16 instead of 4) might be helpful. This directive
+exists in PPC64 assembler and is frequently used by PPC64 assembler writers.
+
+PCALIGN $16
+PCALIGN $8
+
+By default, functions in Go are aligned to 16 bytes, as is the case in all
+other compilers for PPC64. If there is a PCALIGN directive requesting alignment
+greater than 16, then the alignment of the containing function must be
+promoted to that same alignment or greater.
+
+The behavior of PCALIGN is changed in Go 1.21 to be more straightforward to
+ensure the alignment required for some instructions in power10. The acceptable
+values are 8, 16, 32 and 64, and the use of those values will always provide the
+specified alignment.
+
+6. Shift instructions
+
+The simple scalar shifts on PPC64 expect a shift count that fits in 5 bits for
+32-bit values or 6 bit for 64-bit values. If the shift count is a constant value
+greater than the max then the assembler sets it to the max for that size (31 for
+32 bit values, 63 for 64 bit values). If the shift count is in a register, then
+only the low 5 or 6 bits of the register will be used as the shift count. The
+Go compiler will add appropriate code to compare the shift value to achieve the
+correct result, and the assembler does not add extra checking.
+
+Examples:
+
+ SRAD $8,R3,R4 => sradi r4,r3,8
+ SRD $8,R3,R4 => rldicl r4,r3,56,8
+ SLD $8,R3,R4 => rldicr r4,r3,8,55
+ SRAW $16,R4,R5 => srawi r5,r4,16
+ SRW $40,R4,R5 => rlwinm r5,r4,0,0,31
+ SLW $12,R4,R5 => rlwinm r5,r4,12,0,19
+
+Some non-simple shifts have operands in the Go assembly which don't map directly
+onto operands in the PPC64 assembly. When an operand in a shift instruction in the
+Go assembly is a bit mask, that mask is represented as a start and end bit in the
+PPC64 assembly instead of a mask. See the ISA for more detail on these types of shifts.
+Here are a few examples:
+
+ RLWMI $7,R3,$65535,R6 => rlwimi r6,r3,7,16,31
+ RLDMI $0,R4,$7,R6 => rldimi r6,r4,0,61
+
+More recently, Go opcodes were added which map directly onto the PPC64 opcodes. It is
+recommended to use the newer opcodes to avoid confusion.
+
+ RLDICL $0,R4,$15,R6 => rldicl r6,r4,0,15
+ RLDICR $0,R4,$15,R6 => rldicr r6.r4,0,15
+
+# Register naming
+
+1. Special register usage in Go asm
+
+The following registers should not be modified by user Go assembler code.
+
+ R0: Go code expects this register to contain the value 0.
+ R1: Stack pointer
+ R2: TOC pointer when compiled with -shared or -dynlink (a.k.a position independent code)
+ R13: TLS pointer
+ R30: g (goroutine)
+
+Register names:
+
+ Rn is used for general purpose registers. (0-31)
+ Fn is used for floating point registers. (0-31)
+ Vn is used for vector registers. Slot 0 of Vn overlaps with Fn. (0-31)
+ VSn is used for vector-scalar registers. V0-V31 overlap with VS32-VS63. (0-63)
+ CTR represents the count register.
+ LR represents the link register.
+ CR represents the condition register
+ CRn represents a condition register field. (0-7)
+ CRnLT represents CR bit 0 of CR field n. (0-7)
+ CRnGT represents CR bit 1 of CR field n. (0-7)
+ CRnEQ represents CR bit 2 of CR field n. (0-7)
+ CRnSO represents CR bit 3 of CR field n. (0-7)
+
+# GOPPC64 >= power10 and its effects on Go asm
+
+When GOPPC64=power10 is used to compile a Go program for ppc64le/linux, MOV*, FMOV*, and ADD
+opcodes which would require 2 or more machine instructions to emulate a 32 bit constant, or
+symbolic reference are implemented using prefixed instructions.
+
+A user who wishes granular control over the generated machine code is advised to use Go asm
+opcodes which explicitly translate to one PPC64 machine instruction. Most common opcodes
+are supported.
+
+Some examples of how pseudo-op assembly changes with GOPPC64:
+
+ Go asm GOPPC64 <= power9 GOPPC64 >= power10
+ MOVD mypackage·foo(SB), R3 addis r2, r3, ... pld r3, ...
+ ld r3, r3, ...
+
+ MOVD 131072(R3), R4 addis r31, r4, 2 pld r4, 131072(r3)
+ ld r4, 0(R3)
+
+ ADD $131073, R3 lis r31, 2 paddi r3, r3, 131073
+ addi r31, 1
+ add r3,r31,r3
+
+ MOVD $131073, R3 lis r3, 2 pli r3, 131073
+ addi r3, 1
+
+ MOVD $mypackage·foo(SB), R3 addis r2, r3, ... pla r3, ...
+ addi r3, r3, ...
+*/
+package ppc64
diff --git a/src/cmd/internal/obj/ppc64/list9.go b/src/cmd/internal/obj/ppc64/list9.go
new file mode 100644
index 0000000..451c162
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/list9.go
@@ -0,0 +1,117 @@
+// cmd/9l/list.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "fmt"
+)
+
+func init() {
+ obj.RegisterRegister(obj.RBasePPC64, REG_SPR0+1024, rconv)
+ // Note, the last entry in Anames is "LASTAOUT", it is not a real opcode.
+ obj.RegisterOpcode(obj.ABasePPC64, Anames[:len(Anames)-1])
+ obj.RegisterOpcode(AFIRSTGEN, GenAnames)
+}
+
+func rconv(r int) string {
+ if r == 0 {
+ return "NONE"
+ }
+ if r == REGG {
+ // Special case.
+ return "g"
+ }
+ if REG_R0 <= r && r <= REG_R31 {
+ return fmt.Sprintf("R%d", r-REG_R0)
+ }
+ if REG_F0 <= r && r <= REG_F31 {
+ return fmt.Sprintf("F%d", r-REG_F0)
+ }
+ if REG_V0 <= r && r <= REG_V31 {
+ return fmt.Sprintf("V%d", r-REG_V0)
+ }
+ if REG_VS0 <= r && r <= REG_VS63 {
+ return fmt.Sprintf("VS%d", r-REG_VS0)
+ }
+ if REG_CR0 <= r && r <= REG_CR7 {
+ return fmt.Sprintf("CR%d", r-REG_CR0)
+ }
+ if REG_CR0LT <= r && r <= REG_CR7SO {
+ bits := [4]string{"LT", "GT", "EQ", "SO"}
+ crf := (r - REG_CR0LT) / 4
+ return fmt.Sprintf("CR%d%s", crf, bits[r%4])
+ }
+ if REG_A0 <= r && r <= REG_A7 {
+ return fmt.Sprintf("A%d", r-REG_A0)
+ }
+ if r == REG_CR {
+ return "CR"
+ }
+ if REG_SPR0 <= r && r <= REG_SPR0+1023 {
+ switch r {
+ case REG_XER:
+ return "XER"
+
+ case REG_LR:
+ return "LR"
+
+ case REG_CTR:
+ return "CTR"
+ }
+
+ return fmt.Sprintf("SPR(%d)", r-REG_SPR0)
+ }
+
+ if r == REG_FPSCR {
+ return "FPSCR"
+ }
+ if r == REG_MSR {
+ return "MSR"
+ }
+
+ return fmt.Sprintf("Rgok(%d)", r-obj.RBasePPC64)
+}
+
+func DRconv(a int) string {
+ s := "C_??"
+ if a >= C_NONE && a <= C_NCLASS {
+ s = cnames9[a]
+ }
+ var fp string
+ fp += s
+ return fp
+}
+
+func ConstantToCRbit(c int64) (int16, bool) {
+ reg64 := REG_CRBIT0 + c
+ success := reg64 >= REG_CR0LT && reg64 <= REG_CR7SO
+ return int16(reg64), success
+}
diff --git a/src/cmd/internal/obj/ppc64/obj9.go b/src/cmd/internal/obj/ppc64/obj9.go
new file mode 100644
index 0000000..02831b8
--- /dev/null
+++ b/src/cmd/internal/obj/ppc64/obj9.go
@@ -0,0 +1,1412 @@
+// cmd/9l/noop.c, cmd/9l/pass.c, cmd/9l/span.c from Vita Nuova.
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package ppc64
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "internal/abi"
+ "log"
+)
+
+func progedit(ctxt *obj.Link, p *obj.Prog, newprog obj.ProgAlloc) {
+ p.From.Class = 0
+ p.To.Class = 0
+
+ c := ctxt9{ctxt: ctxt, newprog: newprog}
+
+ // Rewrite BR/BL to symbol as TYPE_BRANCH.
+ switch p.As {
+ case ABR,
+ ABL,
+ obj.ARET,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ if p.To.Sym != nil {
+ p.To.Type = obj.TYPE_BRANCH
+ }
+ }
+
+ // Rewrite float constants to values stored in memory.
+ switch p.As {
+ case AFMOVS:
+ if p.From.Type == obj.TYPE_FCONST {
+ f32 := float32(p.From.Val.(float64))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Float32Sym(f32)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+
+ case AFMOVD:
+ if p.From.Type == obj.TYPE_FCONST {
+ f64 := p.From.Val.(float64)
+ // Constant not needed in memory for float +/- 0
+ if f64 != 0 {
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Float64Sym(f64)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+ }
+
+ case AMOVD:
+ // 32b constants (signed and unsigned) can be generated via 1 or 2 instructions.
+ // All others must be placed in memory and loaded.
+ isS32 := int64(int32(p.From.Offset)) == p.From.Offset
+ isU32 := uint64(uint32(p.From.Offset)) == uint64(p.From.Offset)
+ if p.From.Type == obj.TYPE_CONST && p.From.Name == obj.NAME_NONE && p.From.Reg == 0 && !isS32 && !isU32 {
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = ctxt.Int64Sym(p.From.Offset)
+ p.From.Name = obj.NAME_EXTERN
+ p.From.Offset = 0
+ }
+ }
+
+ switch p.As {
+ // Rewrite SUB constants into ADD.
+ case ASUBC:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDC
+ }
+
+ case ASUBCCC:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADDCCC
+ }
+
+ case ASUB:
+ if p.From.Type == obj.TYPE_CONST {
+ p.From.Offset = -p.From.Offset
+ p.As = AADD
+ }
+
+ // To maintain backwards compatibility, we accept some 4 argument usage of
+ // several opcodes which was likely not intended, but did work. These are not
+ // added to optab to avoid the chance this behavior might be used with newer
+ // instructions.
+ //
+ // Rewrite argument ordering like "ADDEX R3, $3, R4, R5" into
+ // "ADDEX R3, R4, $3, R5"
+ case AVSHASIGMAW, AVSHASIGMAD, AADDEX, AXXSLDWI, AXXPERMDI:
+ if len(p.RestArgs) == 2 && p.Reg == 0 && p.RestArgs[0].Addr.Type == obj.TYPE_CONST && p.RestArgs[1].Addr.Type == obj.TYPE_REG {
+ p.Reg = p.RestArgs[1].Addr.Reg
+ p.RestArgs = p.RestArgs[:1]
+ }
+ }
+
+ if c.ctxt.Headtype == objabi.Haix {
+ c.rewriteToUseTOC(p)
+ } else if c.ctxt.Flag_dynlink {
+ c.rewriteToUseGot(p)
+ }
+}
+
+// Rewrite p, if necessary, to access a symbol using its TOC anchor.
+// This code is for AIX only.
+func (c *ctxt9) rewriteToUseTOC(p *obj.Prog) {
+ if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
+ return
+ }
+
+ if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
+ // ADUFFZERO/ADUFFCOPY is considered as an ABL except in dynamic
+ // link where it should be an indirect call.
+ if !c.ctxt.Flag_dynlink {
+ return
+ }
+ // ADUFFxxx $offset
+ // becomes
+ // MOVD runtime.duffxxx@TOC, R12
+ // ADD $offset, R12
+ // MOVD R12, LR
+ // BL (LR)
+ var sym *obj.LSym
+ if p.As == obj.ADUFFZERO {
+ sym = c.ctxt.Lookup("runtime.duffzero")
+ } else {
+ sym = c.ctxt.Lookup("runtime.duffcopy")
+ }
+ // Retrieve or create the TOC anchor.
+ symtoc := c.ctxt.LookupInit("TOC."+sym.Name, func(s *obj.LSym) {
+ s.Type = objabi.SDATA
+ s.Set(obj.AttrDuplicateOK, true)
+ s.Set(obj.AttrStatic, true)
+ c.ctxt.Data = append(c.ctxt.Data, s)
+ s.WriteAddr(c.ctxt, 0, 8, sym, 0)
+ })
+
+ offset := p.To.Offset
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_TOCREF
+ p.From.Sym = symtoc
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R12
+ p.To.Name = obj.NAME_NONE
+ p.To.Offset = 0
+ p.To.Sym = nil
+ p1 := obj.Appendp(p, c.newprog)
+ p1.As = AADD
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = offset
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = REG_R12
+ p2 := obj.Appendp(p1, c.newprog)
+ p2.As = AMOVD
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = REG_R12
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = REG_LR
+ p3 := obj.Appendp(p2, c.newprog)
+ p3.As = obj.ACALL
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = REG_LR
+ }
+
+ var source *obj.Addr
+ if p.From.Name == obj.NAME_EXTERN || p.From.Name == obj.NAME_STATIC {
+ if p.From.Type == obj.TYPE_ADDR {
+ if p.As == ADWORD {
+ // ADWORD $sym doesn't need TOC anchor
+ return
+ }
+ if p.As != AMOVD {
+ c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v", p)
+ return
+ }
+ if p.To.Type != obj.TYPE_REG {
+ c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v", p)
+ return
+ }
+ } else if p.From.Type != obj.TYPE_MEM {
+ c.ctxt.Diag("do not know how to handle %v without TYPE_MEM", p)
+ return
+ }
+ source = &p.From
+
+ } else if p.To.Name == obj.NAME_EXTERN || p.To.Name == obj.NAME_STATIC {
+ if p.To.Type != obj.TYPE_MEM {
+ c.ctxt.Diag("do not know how to handle %v without TYPE_MEM", p)
+ return
+ }
+ if source != nil {
+ c.ctxt.Diag("cannot handle symbols on both sides in %v", p)
+ return
+ }
+ source = &p.To
+ } else {
+ return
+
+ }
+
+ if source.Sym == nil {
+ c.ctxt.Diag("do not know how to handle nil symbol in %v", p)
+ return
+ }
+
+ if source.Sym.Type == objabi.STLSBSS {
+ return
+ }
+
+ // Retrieve or create the TOC anchor.
+ symtoc := c.ctxt.LookupInit("TOC."+source.Sym.Name, func(s *obj.LSym) {
+ s.Type = objabi.SDATA
+ s.Set(obj.AttrDuplicateOK, true)
+ s.Set(obj.AttrStatic, true)
+ c.ctxt.Data = append(c.ctxt.Data, s)
+ s.WriteAddr(c.ctxt, 0, 8, source.Sym, 0)
+ })
+
+ if source.Type == obj.TYPE_ADDR {
+ // MOVD $sym, Rx becomes MOVD symtoc, Rx
+ // MOVD $sym+<off>, Rx becomes MOVD symtoc, Rx; ADD <off>, Rx
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = symtoc
+ p.From.Name = obj.NAME_TOCREF
+
+ if p.From.Offset != 0 {
+ q := obj.Appendp(p, c.newprog)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = p.From.Offset
+ p.From.Offset = 0
+ q.To = p.To
+ }
+ return
+
+ }
+
+ // MOVx sym, Ry becomes MOVD symtoc, REGTMP; MOVx (REGTMP), Ry
+ // MOVx Ry, sym becomes MOVD symtoc, REGTMP; MOVx Ry, (REGTMP)
+ // An addition may be inserted between the two MOVs if there is an offset.
+
+ q := obj.Appendp(p, c.newprog)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Sym = symtoc
+ q.From.Name = obj.NAME_TOCREF
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGTMP
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = p.As
+ q.From = p.From
+ q.To = p.To
+ if p.From.Name != obj.NAME_NONE {
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REGTMP
+ q.From.Name = obj.NAME_NONE
+ q.From.Sym = nil
+ } else if p.To.Name != obj.NAME_NONE {
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REGTMP
+ q.To.Name = obj.NAME_NONE
+ q.To.Sym = nil
+ } else {
+ c.ctxt.Diag("unreachable case in rewriteToUseTOC with %v", p)
+ }
+
+ obj.Nopout(p)
+}
+
+// Rewrite p, if necessary, to access global data via the global offset table.
+func (c *ctxt9) rewriteToUseGot(p *obj.Prog) {
+ if p.As == obj.ADUFFCOPY || p.As == obj.ADUFFZERO {
+ // ADUFFxxx $offset
+ // becomes
+ // MOVD runtime.duffxxx@GOT, R12
+ // ADD $offset, R12
+ // MOVD R12, LR
+ // BL (LR)
+ var sym *obj.LSym
+ if p.As == obj.ADUFFZERO {
+ sym = c.ctxt.LookupABI("runtime.duffzero", obj.ABIInternal)
+ } else {
+ sym = c.ctxt.LookupABI("runtime.duffcopy", obj.ABIInternal)
+ }
+ offset := p.To.Offset
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_GOTREF
+ p.From.Sym = sym
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R12
+ p.To.Name = obj.NAME_NONE
+ p.To.Offset = 0
+ p.To.Sym = nil
+ p1 := obj.Appendp(p, c.newprog)
+ p1.As = AADD
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = offset
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = REG_R12
+ p2 := obj.Appendp(p1, c.newprog)
+ p2.As = AMOVD
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = REG_R12
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = REG_LR
+ p3 := obj.Appendp(p2, c.newprog)
+ p3.As = obj.ACALL
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = REG_LR
+ }
+
+ // We only care about global data: NAME_EXTERN means a global
+ // symbol in the Go sense, and p.Sym.Local is true for a few
+ // internally defined symbols.
+ if p.From.Type == obj.TYPE_ADDR && p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
+ // MOVD $sym, Rx becomes MOVD sym@GOT, Rx
+ // MOVD $sym+<off>, Rx becomes MOVD sym@GOT, Rx; ADD <off>, Rx
+ if p.As != AMOVD {
+ c.ctxt.Diag("do not know how to handle TYPE_ADDR in %v with -dynlink", p)
+ }
+ if p.To.Type != obj.TYPE_REG {
+ c.ctxt.Diag("do not know how to handle LEAQ-type insn to non-register in %v with -dynlink", p)
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_GOTREF
+ if p.From.Offset != 0 {
+ q := obj.Appendp(p, c.newprog)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = p.From.Offset
+ q.To = p.To
+ p.From.Offset = 0
+ }
+ }
+ if p.GetFrom3() != nil && p.GetFrom3().Name == obj.NAME_EXTERN {
+ c.ctxt.Diag("don't know how to handle %v with -dynlink", p)
+ }
+ var source *obj.Addr
+ // MOVx sym, Ry becomes MOVD sym@GOT, REGTMP; MOVx (REGTMP), Ry
+ // MOVx Ry, sym becomes MOVD sym@GOT, REGTMP; MOVx Ry, (REGTMP)
+ // An addition may be inserted between the two MOVs if there is an offset.
+ if p.From.Name == obj.NAME_EXTERN && !p.From.Sym.Local() {
+ if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
+ c.ctxt.Diag("cannot handle NAME_EXTERN on both sides in %v with -dynlink", p)
+ }
+ source = &p.From
+ } else if p.To.Name == obj.NAME_EXTERN && !p.To.Sym.Local() {
+ source = &p.To
+ } else {
+ return
+ }
+ if p.As == obj.ATEXT || p.As == obj.AFUNCDATA || p.As == obj.ACALL || p.As == obj.ARET || p.As == obj.AJMP {
+ return
+ }
+ if source.Sym.Type == objabi.STLSBSS {
+ return
+ }
+ if source.Type != obj.TYPE_MEM {
+ c.ctxt.Diag("don't know how to handle %v with -dynlink", p)
+ }
+ p1 := obj.Appendp(p, c.newprog)
+ p2 := obj.Appendp(p1, c.newprog)
+
+ p1.As = AMOVD
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Sym = source.Sym
+ p1.From.Name = obj.NAME_GOTREF
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = REGTMP
+
+ p2.As = p.As
+ p2.From = p.From
+ p2.To = p.To
+ if p.From.Name == obj.NAME_EXTERN {
+ p2.From.Reg = REGTMP
+ p2.From.Name = obj.NAME_NONE
+ p2.From.Sym = nil
+ } else if p.To.Name == obj.NAME_EXTERN {
+ p2.To.Reg = REGTMP
+ p2.To.Name = obj.NAME_NONE
+ p2.To.Sym = nil
+ } else {
+ return
+ }
+ obj.Nopout(p)
+}
+
+func preprocess(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) {
+ // TODO(minux): add morestack short-cuts with small fixed frame-size.
+ if cursym.Func().Text == nil || cursym.Func().Text.Link == nil {
+ return
+ }
+
+ c := ctxt9{ctxt: ctxt, cursym: cursym, newprog: newprog}
+
+ p := c.cursym.Func().Text
+ textstksiz := p.To.Offset
+ if textstksiz == -8 {
+ // Compatibility hack.
+ p.From.Sym.Set(obj.AttrNoFrame, true)
+ textstksiz = 0
+ }
+ if textstksiz%8 != 0 {
+ c.ctxt.Diag("frame size %d not a multiple of 8", textstksiz)
+ }
+ if p.From.Sym.NoFrame() {
+ if textstksiz != 0 {
+ c.ctxt.Diag("NOFRAME functions must have a frame size of 0, not %d", textstksiz)
+ }
+ }
+
+ c.cursym.Func().Args = p.To.Val.(int32)
+ c.cursym.Func().Locals = int32(textstksiz)
+
+ /*
+ * find leaf subroutines
+ * expand RET
+ * expand BECOME pseudo
+ */
+
+ var q *obj.Prog
+ var q1 *obj.Prog
+ for p := c.cursym.Func().Text; p != nil; p = p.Link {
+ switch p.As {
+ /* too hard, just leave alone */
+ case obj.ATEXT:
+ q = p
+
+ p.Mark |= LABEL | LEAF | SYNC
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+
+ case ANOR:
+ q = p
+ if p.To.Type == obj.TYPE_REG {
+ if p.To.Reg == REGZERO {
+ p.Mark |= LABEL | SYNC
+ }
+ }
+
+ case ALWAR,
+ ALBAR,
+ ASTBCCC,
+ ASTWCCC,
+ AEIEIO,
+ AICBI,
+ AISYNC,
+ ATLBIE,
+ ATLBIEL,
+ ASLBIA,
+ ASLBIE,
+ ASLBMFEE,
+ ASLBMFEV,
+ ASLBMTE,
+ ADCBF,
+ ADCBI,
+ ADCBST,
+ ADCBT,
+ ADCBTST,
+ ADCBZ,
+ ASYNC,
+ ATLBSYNC,
+ APTESYNC,
+ ALWSYNC,
+ ATW,
+ AWORD,
+ ARFI,
+ ARFCI,
+ ARFID,
+ AHRFID:
+ q = p
+ p.Mark |= LABEL | SYNC
+ continue
+
+ case AMOVW, AMOVWZ, AMOVD:
+ q = p
+ if p.From.Reg >= REG_SPECIAL || p.To.Reg >= REG_SPECIAL {
+ p.Mark |= LABEL | SYNC
+ }
+ continue
+
+ case AFABS,
+ AFABSCC,
+ AFADD,
+ AFADDCC,
+ AFCTIW,
+ AFCTIWCC,
+ AFCTIWZ,
+ AFCTIWZCC,
+ AFDIV,
+ AFDIVCC,
+ AFMADD,
+ AFMADDCC,
+ AFMOVD,
+ AFMOVDU,
+ /* case AFMOVDS: */
+ AFMOVS,
+ AFMOVSU,
+
+ /* case AFMOVSD: */
+ AFMSUB,
+ AFMSUBCC,
+ AFMUL,
+ AFMULCC,
+ AFNABS,
+ AFNABSCC,
+ AFNEG,
+ AFNEGCC,
+ AFNMADD,
+ AFNMADDCC,
+ AFNMSUB,
+ AFNMSUBCC,
+ AFRSP,
+ AFRSPCC,
+ AFSUB,
+ AFSUBCC:
+ q = p
+
+ p.Mark |= FLOAT
+ continue
+
+ case ABL,
+ ABCL,
+ obj.ADUFFZERO,
+ obj.ADUFFCOPY:
+ c.cursym.Func().Text.Mark &^= LEAF
+ fallthrough
+
+ case ABC,
+ ABEQ,
+ ABGE,
+ ABGT,
+ ABLE,
+ ABLT,
+ ABNE,
+ ABR,
+ ABVC,
+ ABVS:
+ p.Mark |= BRANCH
+ q = p
+ q1 = p.To.Target()
+ if q1 != nil {
+ // NOPs are not removed due to #40689.
+
+ if q1.Mark&LEAF == 0 {
+ q1.Mark |= LABEL
+ }
+ } else {
+ p.Mark |= LABEL
+ }
+ q1 = p.Link
+ if q1 != nil {
+ q1.Mark |= LABEL
+ }
+ continue
+
+ case AFCMPO, AFCMPU:
+ q = p
+ p.Mark |= FCMP | FLOAT
+ continue
+
+ case obj.ARET:
+ q = p
+ if p.Link != nil {
+ p.Link.Mark |= LABEL
+ }
+ continue
+
+ case obj.ANOP:
+ // NOPs are not removed due to
+ // #40689
+ continue
+
+ default:
+ q = p
+ continue
+ }
+ }
+
+ autosize := int32(0)
+ var p1 *obj.Prog
+ var p2 *obj.Prog
+ for p := c.cursym.Func().Text; p != nil; p = p.Link {
+ o := p.As
+ switch o {
+ case obj.ATEXT:
+ autosize = int32(textstksiz)
+
+ if p.Mark&LEAF != 0 && autosize == 0 {
+ // A leaf function with no locals has no frame.
+ p.From.Sym.Set(obj.AttrNoFrame, true)
+ }
+
+ if !p.From.Sym.NoFrame() {
+ // If there is a stack frame at all, it includes
+ // space to save the LR.
+ autosize += int32(c.ctxt.Arch.FixedFrameSize)
+ }
+
+ if p.Mark&LEAF != 0 && autosize < abi.StackSmall {
+ // A leaf function with a small stack can be marked
+ // NOSPLIT, avoiding a stack check.
+ p.From.Sym.Set(obj.AttrNoSplit, true)
+ }
+
+ p.To.Offset = int64(autosize)
+
+ q = p
+
+ if NeedTOCpointer(c.ctxt) && c.cursym.Name != "runtime.duffzero" && c.cursym.Name != "runtime.duffcopy" {
+ // When compiling Go into PIC, without PCrel support, all functions must start
+ // with instructions to load the TOC pointer into r2:
+ //
+ // addis r2, r12, .TOC.-func@ha
+ // addi r2, r2, .TOC.-func@l+4
+ //
+ // We could probably skip this prologue in some situations
+ // but it's a bit subtle. However, it is both safe and
+ // necessary to leave the prologue off duffzero and
+ // duffcopy as we rely on being able to jump to a specific
+ // instruction offset for them.
+ //
+ // These are AWORDS because there is no (afaict) way to
+ // generate the addis instruction except as part of the
+ // load of a large constant, and in that case there is no
+ // way to use r12 as the source.
+ //
+ // Note that the same condition is tested in
+ // putelfsym in cmd/link/internal/ld/symtab.go
+ // where we set the st_other field to indicate
+ // the presence of these instructions.
+ q = obj.Appendp(q, c.newprog)
+ q.As = AWORD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = 0x3c4c0000
+ q = obj.Appendp(q, c.newprog)
+ q.As = AWORD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = 0x38420000
+ rel := obj.Addrel(c.cursym)
+ rel.Off = 0
+ rel.Siz = 8
+ rel.Sym = c.ctxt.Lookup(".TOC.")
+ rel.Type = objabi.R_ADDRPOWER_PCREL
+ }
+
+ if !c.cursym.Func().Text.From.Sym.NoSplit() {
+ q = c.stacksplit(q, autosize) // emit split check
+ }
+
+ if autosize != 0 {
+ var prologueEnd *obj.Prog
+ // Save the link register and update the SP. MOVDU is used unless
+ // the frame size is too large. The link register must be saved
+ // even for non-empty leaf functions so that traceback works.
+ if autosize >= -BIG && autosize <= BIG {
+ // Use MOVDU to adjust R1 when saving R31, if autosize is small.
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_LR
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGTMP
+ prologueEnd = q
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVDU
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_MEM
+ q.To.Offset = int64(-autosize)
+ q.To.Reg = REGSP
+ q.Spadj = autosize
+ } else {
+ // Frame size is too large for a MOVDU instruction.
+ // Store link register before decrementing SP, so if a signal comes
+ // during the execution of the function prologue, the traceback
+ // code will not see a half-updated stack frame.
+ // This sequence is not async preemptible, as if we open a frame
+ // at the current SP, it will clobber the saved LR.
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_LR
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R29 // REGTMP may be used to synthesize large offset in the next instruction
+
+ q = c.ctxt.StartUnsafePoint(q, c.newprog)
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R29
+ q.To.Type = obj.TYPE_MEM
+ q.To.Offset = int64(-autosize)
+ q.To.Reg = REGSP
+
+ prologueEnd = q
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AADD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(-autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = +autosize
+
+ q = c.ctxt.EndUnsafePoint(q, c.newprog, -1)
+ }
+ prologueEnd.Pos = prologueEnd.Pos.WithXlogue(src.PosPrologueEnd)
+ } else if c.cursym.Func().Text.Mark&LEAF == 0 {
+ // A very few functions that do not return to their caller
+ // (e.g. gogo) are not identified as leaves but still have
+ // no frame.
+ c.cursym.Func().Text.Mark |= LEAF
+ }
+
+ if c.cursym.Func().Text.Mark&LEAF != 0 {
+ c.cursym.Set(obj.AttrLeaf, true)
+ break
+ }
+
+ if NeedTOCpointer(c.ctxt) {
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R2
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REGSP
+ q.To.Offset = 24
+ }
+
+ if c.cursym.Func().Text.From.Sym.Wrapper() {
+ // if(g->panic != nil && g->panic->argp == FP) g->panic->argp = bottom-of-frame
+ //
+ // MOVD g_panic(g), R3
+ // CMP R0, R3
+ // BEQ end
+ // MOVD panic_argp(R3), R4
+ // ADD $(autosize+8), R1, R5
+ // CMP R4, R5
+ // BNE end
+ // ADD $8, R1, R6
+ // MOVD R6, panic_argp(R3)
+ // end:
+ // NOP
+ //
+ // The NOP is needed to give the jumps somewhere to land.
+ // It is a liblink NOP, not a ppc64 NOP: it encodes to 0 instruction bytes.
+
+ q = obj.Appendp(q, c.newprog)
+
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REGG
+ q.From.Offset = 4 * int64(c.ctxt.Arch.PtrSize) // G.panic
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R22
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = ACMP
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R0
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R22
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = ABEQ
+ q.To.Type = obj.TYPE_BRANCH
+ p1 = q
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = REG_R22
+ q.From.Offset = 0 // Panic.argp
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R23
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize) + c.ctxt.Arch.FixedFrameSize
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R24
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = ACMP
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R23
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R24
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = ABNE
+ q.To.Type = obj.TYPE_BRANCH
+ p2 = q
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AADD
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = c.ctxt.Arch.FixedFrameSize
+ q.Reg = REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_R25
+
+ q = obj.Appendp(q, c.newprog)
+ q.As = AMOVD
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REG_R25
+ q.To.Type = obj.TYPE_MEM
+ q.To.Reg = REG_R22
+ q.To.Offset = 0 // Panic.argp
+
+ q = obj.Appendp(q, c.newprog)
+
+ q.As = obj.ANOP
+ p1.To.SetTarget(q)
+ p2.To.SetTarget(q)
+ }
+
+ case obj.ARET:
+ if p.From.Type == obj.TYPE_CONST {
+ c.ctxt.Diag("using BECOME (%v) is not supported!", p)
+ break
+ }
+
+ retTarget := p.To.Sym
+
+ if c.cursym.Func().Text.Mark&LEAF != 0 {
+ if autosize == 0 {
+ p.As = ABR
+ p.From = obj.Addr{}
+ if retTarget == nil {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+ } else {
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Sym = retTarget
+ }
+ p.Mark |= BRANCH
+ break
+ }
+
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(autosize)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = -autosize
+
+ q = c.newprog()
+ q.As = ABR
+ q.Pos = p.Pos
+ if retTarget == nil {
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+ } else {
+ q.To.Type = obj.TYPE_BRANCH
+ q.To.Sym = retTarget
+ }
+ q.Mark |= BRANCH
+ q.Spadj = +autosize
+
+ q.Link = p.Link
+ p.Link = q
+ break
+ }
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 0
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+
+ q = c.newprog()
+ q.As = AMOVD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REG_LR
+
+ q.Link = p.Link
+ p.Link = q
+ p = q
+
+ if false {
+ // Debug bad returns
+ q = c.newprog()
+
+ q.As = AMOVD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_MEM
+ q.From.Offset = 0
+ q.From.Reg = REGTMP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGTMP
+
+ q.Link = p.Link
+ p.Link = q
+ p = q
+ }
+ prev := p
+ if autosize != 0 {
+ q = c.newprog()
+ q.As = AADD
+ q.Pos = p.Pos
+ q.From.Type = obj.TYPE_CONST
+ q.From.Offset = int64(autosize)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = REGSP
+ q.Spadj = -autosize
+
+ q.Link = p.Link
+ prev.Link = q
+ prev = q
+ }
+
+ q1 = c.newprog()
+ q1.As = ABR
+ q1.Pos = p.Pos
+ if retTarget == nil {
+ q1.To.Type = obj.TYPE_REG
+ q1.To.Reg = REG_LR
+ } else {
+ q1.To.Type = obj.TYPE_BRANCH
+ q1.To.Sym = retTarget
+ }
+ q1.Mark |= BRANCH
+ q1.Spadj = +autosize
+
+ q1.Link = q.Link
+ prev.Link = q1
+ case AADD:
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.From.Type == obj.TYPE_CONST {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ case AMOVDU:
+ if p.To.Type == obj.TYPE_MEM && p.To.Reg == REGSP {
+ p.Spadj = int32(-p.To.Offset)
+ }
+ if p.From.Type == obj.TYPE_MEM && p.From.Reg == REGSP {
+ p.Spadj = int32(-p.From.Offset)
+ }
+ case obj.AGETCALLERPC:
+ if cursym.Leaf() {
+ /* MOVD LR, Rd */
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ } else {
+ /* MOVD (RSP), Rd */
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGSP
+ }
+ }
+
+ if p.To.Type == obj.TYPE_REG && p.To.Reg == REGSP && p.Spadj == 0 && p.As != ACMPU {
+ f := c.cursym.Func()
+ if f.FuncFlag&abi.FuncFlagSPWrite == 0 {
+ c.cursym.Func().FuncFlag |= abi.FuncFlagSPWrite
+ if ctxt.Debugvlog || !ctxt.IsAsm {
+ ctxt.Logf("auto-SPWRITE: %s %v\n", c.cursym.Name, p)
+ if !ctxt.IsAsm {
+ ctxt.Diag("invalid auto-SPWRITE in non-assembly")
+ ctxt.DiagFlush()
+ log.Fatalf("bad SPWRITE")
+ }
+ }
+ }
+ }
+ }
+}
+
+/*
+// instruction scheduling
+
+ if(debug['Q'] == 0)
+ return;
+
+ curtext = nil;
+ q = nil; // p - 1
+ q1 = firstp; // top of block
+ o = 0; // count of instructions
+ for(p = firstp; p != nil; p = p1) {
+ p1 = p->link;
+ o++;
+ if(p->mark & NOSCHED){
+ if(q1 != p){
+ sched(q1, q);
+ }
+ for(; p != nil; p = p->link){
+ if(!(p->mark & NOSCHED))
+ break;
+ q = p;
+ }
+ p1 = p;
+ q1 = p;
+ o = 0;
+ continue;
+ }
+ if(p->mark & (LABEL|SYNC)) {
+ if(q1 != p)
+ sched(q1, q);
+ q1 = p;
+ o = 1;
+ }
+ if(p->mark & (BRANCH|SYNC)) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ if(o >= NSCHED) {
+ sched(q1, p);
+ q1 = p1;
+ o = 0;
+ }
+ q = p;
+ }
+*/
+func (c *ctxt9) stacksplit(p *obj.Prog, framesize int32) *obj.Prog {
+ if c.ctxt.Flag_maymorestack != "" {
+ if c.ctxt.Flag_shared || c.ctxt.Flag_dynlink {
+ // See the call to morestack for why these are
+ // complicated to support.
+ c.ctxt.Diag("maymorestack with -shared or -dynlink is not supported")
+ }
+
+ // Spill arguments. This has to happen before we open
+ // any more frame space.
+ p = c.cursym.Func().SpillRegisterArgs(p, c.newprog)
+
+ // Save LR and REGCTXT
+ frameSize := 8 + c.ctxt.Arch.FixedFrameSize
+
+ // MOVD LR, REGTMP
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+ // MOVDU REGTMP, -16(SP)
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVDU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = -frameSize
+ p.To.Reg = REGSP
+ p.Spadj = int32(frameSize)
+
+ // MOVD REGCTXT, 8(SP)
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGCTXT
+ p.To.Type = obj.TYPE_MEM
+ p.To.Offset = 8
+ p.To.Reg = REGSP
+
+ // BL maymorestack
+ p = obj.Appendp(p, c.newprog)
+ p.As = ABL
+ p.To.Type = obj.TYPE_BRANCH
+ // See ../x86/obj6.go
+ p.To.Sym = c.ctxt.LookupABI(c.ctxt.Flag_maymorestack, c.cursym.ABI())
+
+ // Restore LR and REGCTXT
+
+ // MOVD 8(SP), REGCTXT
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 8
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGCTXT
+
+ // MOVD 0(SP), REGTMP
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = 0
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGTMP
+
+ // MOVD REGTMP, LR
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+
+ // ADD $16, SP
+ p = obj.Appendp(p, c.newprog)
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = frameSize
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ p.Spadj = -int32(frameSize)
+
+ // Unspill arguments.
+ p = c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
+ }
+
+ // save entry point, but skipping the two instructions setting R2 in shared mode and maymorestack
+ startPred := p
+
+ // MOVD g_stackguard(g), R22
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGG
+ p.From.Offset = 2 * int64(c.ctxt.Arch.PtrSize) // G.stackguard0
+ if c.cursym.CFunc() {
+ p.From.Offset = 3 * int64(c.ctxt.Arch.PtrSize) // G.stackguard1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R22
+
+ // Mark the stack bound check and morestack call async nonpreemptible.
+ // If we get preempted here, when resumed the preemption request is
+ // cleared, but we'll still call morestack, which will double the stack
+ // unnecessarily. See issue #35470.
+ p = c.ctxt.StartUnsafePoint(p, c.newprog)
+
+ var q *obj.Prog
+ if framesize <= abi.StackSmall {
+ // small stack: SP < stackguard
+ // CMP stackguard, SP
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R22
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REGSP
+ } else {
+ // large stack: SP-framesize < stackguard-StackSmall
+ offset := int64(framesize) - abi.StackSmall
+ if framesize > abi.StackBig {
+ // Such a large stack we need to protect against underflow.
+ // The runtime guarantees SP > objabi.StackBig, but
+ // framesize is large enough that SP-framesize may
+ // underflow, causing a direct comparison with the
+ // stack guard to incorrectly succeed. We explicitly
+ // guard against underflow.
+ //
+ // CMPU SP, $(framesize-StackSmall)
+ // BLT label-of-call-to-morestack
+ if offset <= 0xffff {
+ p = obj.Appendp(p, c.newprog)
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = offset
+ } else {
+ // Constant is too big for CMPU.
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R23
+
+ p = obj.Appendp(p, c.newprog)
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R23
+ }
+
+ p = obj.Appendp(p, c.newprog)
+ q = p
+ p.As = ABLT
+ p.To.Type = obj.TYPE_BRANCH
+ }
+
+ // Check against the stack guard. We've ensured this won't underflow.
+ // ADD $-(framesize-StackSmall), SP, R4
+ // CMPU stackguard, R4
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = AADD
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -offset
+ p.Reg = REGSP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R23
+
+ p = obj.Appendp(p, c.newprog)
+ p.As = ACMPU
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R22
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R23
+ }
+
+ // q1: BLT done
+ p = obj.Appendp(p, c.newprog)
+ q1 := p
+
+ p.As = ABLT
+ p.To.Type = obj.TYPE_BRANCH
+
+ p = obj.Appendp(p, c.newprog)
+ p.As = obj.ANOP // zero-width place holder
+
+ if q != nil {
+ q.To.SetTarget(p)
+ }
+
+ // Spill the register args that could be clobbered by the
+ // morestack code.
+
+ spill := c.cursym.Func().SpillRegisterArgs(p, c.newprog)
+
+ // MOVD LR, R5
+ p = obj.Appendp(spill, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_LR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R5
+
+ p = c.ctxt.EmitEntryStackMap(c.cursym, p, c.newprog)
+
+ var morestacksym *obj.LSym
+ if c.cursym.CFunc() {
+ morestacksym = c.ctxt.Lookup("runtime.morestackc")
+ } else if !c.cursym.Func().Text.From.Sym.NeedCtxt() {
+ morestacksym = c.ctxt.Lookup("runtime.morestack_noctxt")
+ } else {
+ morestacksym = c.ctxt.Lookup("runtime.morestack")
+ }
+
+ if NeedTOCpointer(c.ctxt) {
+ // In PPC64 PIC code, R2 is used as TOC pointer derived from R12
+ // which is the address of function entry point when entering
+ // the function. We need to preserve R2 across call to morestack.
+ // Fortunately, in shared mode, 8(SP) and 16(SP) are reserved in
+ // the caller's frame, but not used (0(SP) is caller's saved LR,
+ // 24(SP) is caller's saved R2). Use 8(SP) to save this function's R2.
+ // MOVD R2, 8(SP)
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R2
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = REGSP
+ p.To.Offset = 8
+ }
+
+ if c.ctxt.Flag_dynlink {
+ // Avoid calling morestack via a PLT when dynamically linking. The
+ // PLT stubs generated by the system linker on ppc64le when "std r2,
+ // 24(r1)" to save the TOC pointer in their callers stack
+ // frame. Unfortunately (and necessarily) morestack is called before
+ // the function that calls it sets up its frame and so the PLT ends
+ // up smashing the saved TOC pointer for its caller's caller.
+ //
+ // According to the ABI documentation there is a mechanism to avoid
+ // the TOC save that the PLT stub does (put a R_PPC64_TOCSAVE
+ // relocation on the nop after the call to morestack) but at the time
+ // of writing it is not supported at all by gold and my attempt to
+ // use it with ld.bfd caused an internal linker error. So this hack
+ // seems preferable.
+
+ // MOVD $runtime.morestack(SB), R12
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Sym = morestacksym
+ p.From.Name = obj.NAME_GOTREF
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R12
+
+ // MOVD R12, LR
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = REG_R12
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+
+ // BL LR
+ p = obj.Appendp(p, c.newprog)
+ p.As = obj.ACALL
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_LR
+ } else {
+ // BL runtime.morestack(SB)
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = ABL
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.Sym = morestacksym
+ }
+
+ if NeedTOCpointer(c.ctxt) {
+ // MOVD 8(SP), R2
+ p = obj.Appendp(p, c.newprog)
+ p.As = AMOVD
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = REGSP
+ p.From.Offset = 8
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = REG_R2
+ }
+
+ unspill := c.cursym.Func().UnspillRegisterArgs(p, c.newprog)
+ p = c.ctxt.EndUnsafePoint(unspill, c.newprog, -1)
+
+ // BR start
+ p = obj.Appendp(p, c.newprog)
+ p.As = ABR
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(startPred.Link)
+
+ // placeholder for q1's jump target
+ p = obj.Appendp(p, c.newprog)
+
+ p.As = obj.ANOP // zero-width place holder
+ q1.To.SetTarget(p)
+
+ return p
+}
+
+// MMA accumulator to/from instructions are slightly ambiguous since
+// the argument represents both source and destination, specified as
+// an accumulator. It is treated as a unary destination to simplify
+// the code generation in ppc64map.
+var unaryDst = map[obj.As]bool{
+ AXXSETACCZ: true,
+ AXXMTACC: true,
+ AXXMFACC: true,
+}
+
+var Linkppc64 = obj.LinkArch{
+ Arch: sys.ArchPPC64,
+ Init: buildop,
+ Preprocess: preprocess,
+ Assemble: span9,
+ Progedit: progedit,
+ UnaryDst: unaryDst,
+ DWARFRegisters: PPC64DWARFRegisters,
+}
+
+var Linkppc64le = obj.LinkArch{
+ Arch: sys.ArchPPC64LE,
+ Init: buildop,
+ Preprocess: preprocess,
+ Assemble: span9,
+ Progedit: progedit,
+ UnaryDst: unaryDst,
+ DWARFRegisters: PPC64DWARFRegisters,
+}