diff options
Diffstat (limited to 'src/cmd/internal/obj/ppc64/asm9.go')
-rw-r--r-- | src/cmd/internal/obj/ppc64/asm9.go | 5342 |
1 files changed, 5342 insertions, 0 deletions
diff --git a/src/cmd/internal/obj/ppc64/asm9.go b/src/cmd/internal/obj/ppc64/asm9.go new file mode 100644 index 0000000..c346043 --- /dev/null +++ b/src/cmd/internal/obj/ppc64/asm9.go @@ -0,0 +1,5342 @@ +// cmd/9l/optab.c, cmd/9l/asmout.c from Vita Nuova. +// +// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved. +// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net) +// Portions Copyright © 1997-1999 Vita Nuova Limited +// Portions Copyright © 2000-2008 Vita Nuova Holdings Limited (www.vitanuova.com) +// Portions Copyright © 2004,2006 Bruce Ellis +// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net) +// Revisions Copyright © 2000-2008 Lucent Technologies Inc. and others +// Portions Copyright © 2009 The Go Authors. All rights reserved. +// +// Permission is hereby granted, free of charge, to any person obtaining a copy +// of this software and associated documentation files (the "Software"), to deal +// in the Software without restriction, including without limitation the rights +// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +// copies of the Software, and to permit persons to whom the Software is +// furnished to do so, subject to the following conditions: +// +// The above copyright notice and this permission notice shall be included in +// all copies or substantial portions of the Software. +// +// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +// THE SOFTWARE. + +package ppc64 + +import ( + "cmd/internal/obj" + "cmd/internal/objabi" + "encoding/binary" + "fmt" + "log" + "math" + "math/bits" + "sort" +) + +// ctxt9 holds state while assembling a single function. +// Each function gets a fresh ctxt9. +// This allows for multiple functions to be safely concurrently assembled. +type ctxt9 struct { + ctxt *obj.Link + newprog obj.ProgAlloc + cursym *obj.LSym + autosize int32 + instoffset int64 + pc int64 +} + +// Instruction layout. + +const ( + r0iszero = 1 +) + +type Optab struct { + as obj.As // Opcode + a1 uint8 // p.From argument (obj.Addr). p is of type obj.Prog. + a2 uint8 // p.Reg argument (int16 Register) + a3 uint8 // p.RestArgs[0] (obj.AddrPos) + a4 uint8 // p.RestArgs[1] + a5 uint8 // p.RestARgs[2] + a6 uint8 // p.To (obj.Addr) + type_ int8 // cases in asmout below. E.g., 44 = st r,(ra+rb); 45 = ld (ra+rb), r + size int8 // Text space in bytes to lay operation + + // A prefixed instruction is generated by this opcode. This cannot be placed + // across a 64B PC address. Opcodes should not translate to more than one + // prefixed instruction. The prefixed instruction should be written first + // (e.g when Optab.size > 8). + ispfx bool + + asmout func(*ctxt9, *obj.Prog, *Optab, *[5]uint32) +} + +// optab contains an array to be sliced of accepted operand combinations for an +// instruction. Unused arguments and fields are not explicitly enumerated, and +// should not be listed for clarity. Unused arguments and values should always +// assume the default value for the given type. +// +// optab does not list every valid ppc64 opcode, it enumerates representative +// operand combinations for a class of instruction. The variable oprange indexes +// all valid ppc64 opcodes. +// +// oprange is initialized to point a slice within optab which contains the valid +// operand combinations for a given instruction. This is initialized from buildop. +// +// Likewise, each slice of optab is dynamically sorted using the ocmp Sort interface +// to arrange entries to minimize text size of each opcode. +var optab = []Optab{ + {as: obj.ATEXT, a1: C_LOREG, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_LOREG, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_ADDR, a6: C_TEXTSIZE, type_: 0, size: 0}, + {as: obj.ATEXT, a1: C_ADDR, a3: C_LCON, a6: C_TEXTSIZE, type_: 0, size: 0}, + /* move register */ + {as: AADD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADD, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_SCON, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, + {as: AADD, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, + {as: AADD, a1: C_UCON, a6: C_REG, type_: 20, size: 4}, + {as: AADD, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 22, size: 8}, + {as: AADD, a1: C_ANDCON, a6: C_REG, type_: 22, size: 8}, + {as: AADD, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AADD, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: AADDIS, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 20, size: 4}, + {as: AADDIS, a1: C_ADDCON, a6: C_REG, type_: 20, size: 4}, + {as: AADDC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADDC, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AADDC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AADDC, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, + {as: AADDC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AADDC, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: AAND, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, no literal */ + {as: AAND, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AANDCC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AANDCC, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AANDCC, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AANDCC, a1: C_UCON, a6: C_REG, type_: 59, size: 4}, + {as: AANDCC, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: AANDCC, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, + {as: AANDCC, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, + {as: AANDCC, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, + {as: AANDCC, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, + {as: AANDISCC, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4}, + {as: AANDISCC, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: AMULLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AMULLW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: AMULLW, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_ADDCON, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_ANDCON, a6: C_REG, type_: 4, size: 4}, + {as: AMULLW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 22, size: 12}, + {as: AMULLW, a1: C_LCON, a6: C_REG, type_: 22, size: 12}, + {as: ASUBC, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, + {as: ASUBC, a1: C_REG, a6: C_REG, type_: 10, size: 4}, + {as: ASUBC, a1: C_REG, a3: C_ADDCON, a6: C_REG, type_: 27, size: 4}, + {as: ASUBC, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 28, size: 12}, + {as: AOR, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, /* logical, literal not cc (or/xor) */ + {as: AOR, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: AOR, a1: C_ANDCON, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 58, size: 4}, + {as: AOR, a1: C_UCON, a6: C_REG, type_: 59, size: 4}, + {as: AOR, a1: C_UCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: AOR, a1: C_ADDCON, a6: C_REG, type_: 23, size: 8}, + {as: AOR, a1: C_ADDCON, a2: C_REG, a6: C_REG, type_: 23, size: 8}, + {as: AOR, a1: C_LCON, a6: C_REG, type_: 23, size: 12}, + {as: AOR, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 23, size: 12}, + {as: AORIS, a1: C_ANDCON, a6: C_REG, type_: 59, size: 4}, + {as: AORIS, a1: C_ANDCON, a2: C_REG, a6: C_REG, type_: 59, size: 4}, + {as: ADIVW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 2, size: 4}, /* op r1[,r2],r3 */ + {as: ADIVW, a1: C_REG, a6: C_REG, type_: 2, size: 4}, + {as: ASUB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 10, size: 4}, /* op r2[,r1],r3 */ + {as: ASUB, a1: C_REG, a6: C_REG, type_: 10, size: 4}, + {as: ASLW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASLD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, + {as: ASLD, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, + {as: AEXTSWSLI, a1: C_SCON, a6: C_REG, type_: 25, size: 4}, + {as: AEXTSWSLI, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 25, size: 4}, + {as: ASLW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 57, size: 4}, + {as: ASLW, a1: C_SCON, a6: C_REG, type_: 57, size: 4}, + {as: ASRAW, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAW, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAW, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, + {as: ASRAW, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, + {as: ASRAD, a1: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 6, size: 4}, + {as: ASRAD, a1: C_SCON, a2: C_REG, a6: C_REG, type_: 56, size: 4}, + {as: ASRAD, a1: C_SCON, a6: C_REG, type_: 56, size: 4}, + {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, + {as: ARLWMI, a1: C_SCON, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 102, size: 4}, + {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 63, size: 4}, + {as: ARLWMI, a1: C_REG, a2: C_REG, a3: C_SCON, a4: C_SCON, a6: C_REG, type_: 103, size: 4}, + {as: ACLRLSLWI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 62, size: 4}, + {as: ARLDMI, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 30, size: 4}, + {as: ARLDC, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, + {as: ARLDCL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 29, size: 4}, + {as: ARLDCL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDICL, a1: C_REG, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDICL, a1: C_SCON, a2: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: ARLDCL, a1: C_REG, a3: C_LCON, a6: C_REG, type_: 14, size: 4}, + {as: AFADD, a1: C_FREG, a6: C_FREG, type_: 2, size: 4}, + {as: AFADD, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 2, size: 4}, + {as: AFABS, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, + {as: AFABS, a6: C_FREG, type_: 33, size: 4}, + {as: AFMADD, a1: C_FREG, a2: C_FREG, a3: C_FREG, a6: C_FREG, type_: 34, size: 4}, + {as: AFMUL, a1: C_FREG, a6: C_FREG, type_: 32, size: 4}, + {as: AFMUL, a1: C_FREG, a2: C_FREG, a6: C_FREG, type_: 32, size: 4}, + + {as: AMOVBU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVBU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, + {as: AMOVBU, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, + {as: AMOVBU, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, + + {as: AMOVBZU, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVBZU, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, + {as: AMOVBZU, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVBZU, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, + + {as: AMOVHBR, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, + {as: AMOVHBR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, + + {as: AMOVB, a1: C_ADDR, a6: C_REG, type_: 75, size: 12}, + {as: AMOVB, a1: C_LOREG, a6: C_REG, type_: 36, size: 12}, + {as: AMOVB, a1: C_SOREG, a6: C_REG, type_: 8, size: 8}, + {as: AMOVB, a1: C_XOREG, a6: C_REG, type_: 109, size: 8}, + {as: AMOVB, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVB, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVB, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVB, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, + {as: AMOVB, a1: C_REG, a6: C_REG, type_: 13, size: 4}, + + {as: AMOVBZ, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, + {as: AMOVBZ, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, + {as: AMOVBZ, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVBZ, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, + {as: AMOVBZ, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVBZ, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVBZ, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVBZ, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, + {as: AMOVBZ, a1: C_REG, a6: C_REG, type_: 13, size: 4}, + + {as: AMOVD, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_UCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, + {as: AMOVD, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVD, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, + {as: AMOVD, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, + {as: AMOVD, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVD, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, + {as: AMOVD, a1: C_SOREG, a6: C_SPR, type_: 107, size: 8}, + {as: AMOVD, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, + {as: AMOVD, a1: C_TLS_LE, a6: C_REG, type_: 79, size: 8}, + {as: AMOVD, a1: C_TLS_IE, a6: C_REG, type_: 80, size: 12}, + {as: AMOVD, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, + {as: AMOVD, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVD, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVD, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, + {as: AMOVD, a1: C_SPR, a6: C_SOREG, type_: 106, size: 8}, + {as: AMOVD, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVD, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, + {as: AMOVD, a1: C_REG, a6: C_REG, type_: 13, size: 4}, + + {as: AMOVW, a1: C_ADDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_ANDCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_UCON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_LCON, a6: C_REG, type_: 19, size: 8}, + {as: AMOVW, a1: C_SACON, a6: C_REG, type_: 3, size: 4}, + {as: AMOVW, a1: C_LACON, a6: C_REG, type_: 26, size: 8}, + {as: AMOVW, a1: C_ADDR, a6: C_REG, type_: 75, size: 8}, + {as: AMOVW, a1: C_CREG, a6: C_REG, type_: 68, size: 4}, + {as: AMOVW, a1: C_SOREG, a6: C_REG, type_: 8, size: 4}, + {as: AMOVW, a1: C_LOREG, a6: C_REG, type_: 36, size: 8}, + {as: AMOVW, a1: C_XOREG, a6: C_REG, type_: 109, size: 4}, + {as: AMOVW, a1: C_SPR, a6: C_REG, type_: 66, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_ADDR, type_: 74, size: 8}, + {as: AMOVW, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_SOREG, type_: 7, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_LOREG, type_: 35, size: 8}, + {as: AMOVW, a1: C_REG, a6: C_XOREG, type_: 108, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_SPR, type_: 66, size: 4}, + {as: AMOVW, a1: C_REG, a6: C_REG, type_: 13, size: 4}, + + {as: AFMOVD, a1: C_ADDCON, a6: C_FREG, type_: 24, size: 8}, + {as: AFMOVD, a1: C_SOREG, a6: C_FREG, type_: 8, size: 4}, + {as: AFMOVD, a1: C_XOREG, a6: C_FREG, type_: 109, size: 4}, + {as: AFMOVD, a1: C_LOREG, a6: C_FREG, type_: 36, size: 8}, + {as: AFMOVD, a1: C_ZCON, a6: C_FREG, type_: 24, size: 4}, + {as: AFMOVD, a1: C_ADDR, a6: C_FREG, type_: 75, size: 8}, + {as: AFMOVD, a1: C_FREG, a6: C_FREG, type_: 33, size: 4}, + {as: AFMOVD, a1: C_FREG, a6: C_SOREG, type_: 7, size: 4}, + {as: AFMOVD, a1: C_FREG, a6: C_XOREG, type_: 108, size: 4}, + {as: AFMOVD, a1: C_FREG, a6: C_LOREG, type_: 35, size: 8}, + {as: AFMOVD, a1: C_FREG, a6: C_ADDR, type_: 74, size: 8}, + + {as: AFMOVSX, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, + {as: AFMOVSX, a1: C_FREG, a6: C_XOREG, type_: 44, size: 4}, + + {as: AFMOVSZ, a1: C_ZOREG, a6: C_FREG, type_: 45, size: 4}, + {as: AFMOVSZ, a1: C_XOREG, a6: C_FREG, type_: 45, size: 4}, + + {as: AMOVFL, a1: C_CREG, a6: C_CREG, type_: 67, size: 4}, + {as: AMOVFL, a1: C_FPSCR, a6: C_CREG, type_: 73, size: 4}, + {as: AMOVFL, a1: C_FPSCR, a6: C_FREG, type_: 53, size: 4}, + {as: AMOVFL, a1: C_FREG, a3: C_LCON, a6: C_FPSCR, type_: 64, size: 4}, + {as: AMOVFL, a1: C_FREG, a6: C_FPSCR, type_: 64, size: 4}, + {as: AMOVFL, a1: C_LCON, a6: C_FPSCR, type_: 65, size: 4}, + {as: AMOVFL, a1: C_REG, a6: C_CREG, type_: 69, size: 4}, + {as: AMOVFL, a1: C_REG, a6: C_LCON, type_: 69, size: 4}, + + {as: ASYSCALL, type_: 5, size: 4}, + {as: ASYSCALL, a1: C_REG, type_: 77, size: 12}, + {as: ASYSCALL, a1: C_SCON, type_: 77, size: 12}, + {as: ABEQ, a6: C_SBRA, type_: 16, size: 4}, + {as: ABEQ, a1: C_CREG, a6: C_SBRA, type_: 16, size: 4}, + {as: ABR, a6: C_LBRA, type_: 11, size: 4}, // b label + {as: ABR, a6: C_LBRAPIC, type_: 11, size: 8}, // b label; nop + {as: ABR, a6: C_LR, type_: 18, size: 4}, // blr + {as: ABR, a6: C_CTR, type_: 18, size: 4}, // bctr + {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_SBRA, type_: 16, size: 4}, // bc bo, bi, label + {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LBRA, type_: 17, size: 4}, // bc bo, bi, label + {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi + {as: ABC, a1: C_SCON, a2: C_CRBIT, a3: C_SCON, a6: C_LR, type_: 18, size: 4}, // bclr bo, bi, bh + {as: ABC, a1: C_SCON, a2: C_CRBIT, a6: C_CTR, type_: 18, size: 4}, // bcctr bo, bi + {as: ABDNZ, a6: C_SBRA, type_: 16, size: 4}, + {as: ASYNC, type_: 46, size: 4}, + {as: AWORD, a1: C_LCON, type_: 40, size: 4}, + {as: ADWORD, a1: C_64CON, type_: 31, size: 8}, + {as: ADWORD, a1: C_LACON, type_: 31, size: 8}, + {as: AADDME, a1: C_REG, a6: C_REG, type_: 47, size: 4}, + {as: AEXTSB, a1: C_REG, a6: C_REG, type_: 48, size: 4}, + {as: AEXTSB, a6: C_REG, type_: 48, size: 4}, + {as: AISEL, a1: C_U5CON, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, + {as: AISEL, a1: C_CRBIT, a2: C_REG, a3: C_REG, a6: C_REG, type_: 84, size: 4}, + {as: ANEG, a1: C_REG, a6: C_REG, type_: 47, size: 4}, + {as: ANEG, a6: C_REG, type_: 47, size: 4}, + {as: AREM, a1: C_REG, a6: C_REG, type_: 50, size: 12}, + {as: AREM, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 12}, + {as: AREMU, a1: C_REG, a6: C_REG, type_: 50, size: 16}, + {as: AREMU, a1: C_REG, a2: C_REG, a6: C_REG, type_: 50, size: 16}, + {as: AREMD, a1: C_REG, a6: C_REG, type_: 51, size: 12}, + {as: AREMD, a1: C_REG, a2: C_REG, a6: C_REG, type_: 51, size: 12}, + {as: AMTFSB0, a1: C_SCON, type_: 52, size: 4}, + /* Other ISA 2.05+ instructions */ + {as: APOPCNTD, a1: C_REG, a6: C_REG, type_: 93, size: 4}, /* population count, x-form */ + {as: ACMPB, a1: C_REG, a2: C_REG, a6: C_REG, type_: 92, size: 4}, /* compare byte, x-form */ + {as: ACMPEQB, a1: C_REG, a2: C_REG, a6: C_CREG, type_: 92, size: 4}, /* compare equal byte, x-form, ISA 3.0 */ + {as: ACMPEQB, a1: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: AFTDIV, a1: C_FREG, a2: C_FREG, a6: C_SCON, type_: 92, size: 4}, /* floating test for sw divide, x-form */ + {as: AFTSQRT, a1: C_FREG, a6: C_SCON, type_: 93, size: 4}, /* floating test for sw square root, x-form */ + {as: ACOPY, a1: C_REG, a6: C_REG, type_: 92, size: 4}, /* copy/paste facility, x-form */ + {as: ADARN, a1: C_SCON, a6: C_REG, type_: 92, size: 4}, /* deliver random number, x-form */ + {as: AMADDHD, a1: C_REG, a2: C_REG, a3: C_REG, a6: C_REG, type_: 83, size: 4}, /* multiply-add high/low doubleword, va-form */ + {as: AADDEX, a1: C_REG, a2: C_REG, a3: C_SCON, a6: C_REG, type_: 94, size: 4}, /* add extended using alternate carry, z23-form */ + {as: ACRAND, a1: C_CRBIT, a2: C_CRBIT, a6: C_CRBIT, type_: 2, size: 4}, /* logical ops for condition register bits xl-form */ + + /* Vector instructions */ + + /* Vector load */ + {as: ALVEBX, a1: C_XOREG, a6: C_VREG, type_: 45, size: 4}, /* vector load, x-form */ + + /* Vector store */ + {as: ASTVEBX, a1: C_VREG, a6: C_XOREG, type_: 44, size: 4}, /* vector store, x-form */ + + /* Vector logical */ + {as: AVAND, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector and, vx-form */ + {as: AVOR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector or, vx-form */ + + /* Vector add */ + {as: AVADDUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned modulo, vx-form */ + {as: AVADDCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add & write carry unsigned, vx-form */ + {as: AVADDUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add unsigned saturate, vx-form */ + {as: AVADDSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector add signed saturate, vx-form */ + {as: AVADDE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector add extended, va-form */ + + /* Vector subtract */ + {as: AVSUBUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned modulo, vx-form */ + {as: AVSUBCU, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract & write carry unsigned, vx-form */ + {as: AVSUBUS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract unsigned saturate, vx-form */ + {as: AVSUBSS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector subtract signed saturate, vx-form */ + {as: AVSUBE, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector subtract extended, va-form */ + + /* Vector multiply */ + {as: AVMULESB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector multiply, vx-form */ + {as: AVPMSUM, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector polynomial multiply & sum, vx-form */ + {as: AVMSUMUDM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector multiply-sum, va-form */ + + /* Vector rotate */ + {as: AVR, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector rotate, vx-form */ + + /* Vector shift */ + {as: AVS, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift, vx-form */ + {as: AVSA, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector shift algebraic, vx-form */ + {as: AVSOI, a1: C_ANDCON, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector shift by octet immediate, va-form */ + + /* Vector count */ + {as: AVCLZ, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector count leading zeros, vx-form */ + {as: AVPOPCNT, a1: C_VREG, a6: C_VREG, type_: 85, size: 4}, /* vector population count, vx-form */ + + /* Vector compare */ + {as: AVCMPEQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare equal, vc-form */ + {as: AVCMPGT, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare greater than, vc-form */ + {as: AVCMPNEZB, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector compare not equal, vx-form */ + + /* Vector merge */ + {as: AVMRGOW, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector merge odd word, vx-form */ + + /* Vector permute */ + {as: AVPERM, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector permute, va-form */ + + /* Vector bit permute */ + {as: AVBPERMQ, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector bit permute, vx-form */ + + /* Vector select */ + {as: AVSEL, a1: C_VREG, a2: C_VREG, a3: C_VREG, a6: C_VREG, type_: 83, size: 4}, /* vector select, va-form */ + + /* Vector splat */ + {as: AVSPLTB, a1: C_SCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector splat, vx-form */ + {as: AVSPLTB, a1: C_ADDCON, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, + {as: AVSPLTISB, a1: C_SCON, a6: C_VREG, type_: 82, size: 4}, /* vector splat immediate, vx-form */ + {as: AVSPLTISB, a1: C_ADDCON, a6: C_VREG, type_: 82, size: 4}, + + /* Vector AES */ + {as: AVCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES cipher, vx-form */ + {as: AVNCIPH, a1: C_VREG, a2: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES inverse cipher, vx-form */ + {as: AVSBOX, a1: C_VREG, a6: C_VREG, type_: 82, size: 4}, /* vector AES subbytes, vx-form */ + + /* Vector SHA */ + {as: AVSHASIGMA, a1: C_ANDCON, a2: C_VREG, a3: C_ANDCON, a6: C_VREG, type_: 82, size: 4}, /* vector SHA sigma, vx-form */ + + /* VSX vector load */ + {as: ALXVD2X, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx vector load, xx1-form */ + {as: ALXV, a1: C_SOREG, a6: C_VSREG, type_: 96, size: 4}, /* vsx vector load, dq-form */ + {as: ALXVL, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 98, size: 4}, /* vsx vector load length */ + + /* VSX vector store */ + {as: ASTXVD2X, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx vector store, xx1-form */ + {as: ASTXV, a1: C_VSREG, a6: C_SOREG, type_: 97, size: 4}, /* vsx vector store, dq-form */ + {as: ASTXVL, a1: C_VSREG, a2: C_REG, a6: C_REG, type_: 99, size: 4}, /* vsx vector store with length x-form */ + + /* VSX scalar load */ + {as: ALXSDX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar load, xx1-form */ + + /* VSX scalar store */ + {as: ASTXSDX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar store, xx1-form */ + + /* VSX scalar as integer load */ + {as: ALXSIWAX, a1: C_XOREG, a6: C_VSREG, type_: 87, size: 4}, /* vsx scalar as integer load, xx1-form */ + + /* VSX scalar store as integer */ + {as: ASTXSIWX, a1: C_VSREG, a6: C_XOREG, type_: 86, size: 4}, /* vsx scalar as integer store, xx1-form */ + + /* VSX move from VSR */ + {as: AMFVSRD, a1: C_VSREG, a6: C_REG, type_: 88, size: 4}, + {as: AMFVSRD, a1: C_FREG, a6: C_REG, type_: 88, size: 4}, + + /* VSX move to VSR */ + {as: AMTVSRD, a1: C_REG, a6: C_VSREG, type_: 104, size: 4}, + {as: AMTVSRD, a1: C_REG, a6: C_FREG, type_: 104, size: 4}, + {as: AMTVSRDD, a1: C_REG, a2: C_REG, a6: C_VSREG, type_: 104, size: 4}, + + /* VSX logical */ + {as: AXXLAND, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx and, xx3-form */ + {as: AXXLOR, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx or, xx3-form */ + + /* VSX select */ + {as: AXXSEL, a1: C_VSREG, a2: C_VSREG, a3: C_VSREG, a6: C_VSREG, type_: 91, size: 4}, /* vsx select, xx4-form */ + + /* VSX merge */ + {as: AXXMRGHW, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx merge, xx3-form */ + + /* VSX splat */ + {as: AXXSPLTW, a1: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 89, size: 4}, /* vsx splat, xx2-form */ + {as: AXXSPLTIB, a1: C_SCON, a6: C_VSREG, type_: 100, size: 4}, /* vsx splat, xx2-form */ + + /* VSX permute */ + {as: AXXPERM, a1: C_VSREG, a2: C_VSREG, a6: C_VSREG, type_: 90, size: 4}, /* vsx permute, xx3-form */ + + /* VSX shift */ + {as: AXXSLDWI, a1: C_VSREG, a2: C_VSREG, a3: C_SCON, a6: C_VSREG, type_: 90, size: 4}, /* vsx shift immediate, xx3-form */ + + /* VSX reverse bytes */ + {as: AXXBRQ, a1: C_VSREG, a6: C_VSREG, type_: 101, size: 4}, /* vsx reverse bytes */ + + /* VSX scalar FP-FP conversion */ + {as: AXSCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-fp conversion, xx2-form */ + + /* VSX vector FP-FP conversion */ + {as: AXVCVDPSP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-fp conversion, xx2-form */ + + /* VSX scalar FP-integer conversion */ + {as: AXSCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar fp-integer conversion, xx2-form */ + + /* VSX scalar integer-FP conversion */ + {as: AXSCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx scalar integer-fp conversion, xx2-form */ + + /* VSX vector FP-integer conversion */ + {as: AXVCVDPSXDS, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector fp-integer conversion, xx2-form */ + + /* VSX vector integer-FP conversion */ + {as: AXVCVSXDDP, a1: C_VSREG, a6: C_VSREG, type_: 89, size: 4}, /* vsx vector integer-fp conversion, xx2-form */ + + {as: ACMP, a1: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, + {as: ACMP, a1: C_REG, a6: C_ADDCON, type_: 71, size: 4}, + {as: ACMP, a1: C_REG, a2: C_CREG, a6: C_ADDCON, type_: 71, size: 4}, + {as: ACMPU, a1: C_REG, a6: C_REG, type_: 70, size: 4}, + {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_REG, type_: 70, size: 4}, + {as: ACMPU, a1: C_REG, a6: C_ANDCON, type_: 71, size: 4}, + {as: ACMPU, a1: C_REG, a2: C_CREG, a6: C_ANDCON, type_: 71, size: 4}, + {as: AFCMPO, a1: C_FREG, a6: C_FREG, type_: 70, size: 4}, + {as: AFCMPO, a1: C_FREG, a2: C_CREG, a6: C_FREG, type_: 70, size: 4}, + {as: ATW, a1: C_LCON, a2: C_REG, a6: C_REG, type_: 60, size: 4}, + {as: ATW, a1: C_LCON, a2: C_REG, a6: C_ADDCON, type_: 61, size: 4}, + {as: ADCBF, a1: C_SOREG, type_: 43, size: 4}, + {as: ADCBF, a1: C_XOREG, type_: 43, size: 4}, + {as: ADCBF, a1: C_XOREG, a2: C_REG, a6: C_SCON, type_: 43, size: 4}, + {as: ADCBF, a1: C_SOREG, a6: C_SCON, type_: 43, size: 4}, + {as: ADCBF, a1: C_XOREG, a6: C_SCON, type_: 43, size: 4}, + {as: ASTDCCC, a1: C_REG, a2: C_REG, a6: C_XOREG, type_: 44, size: 4}, + {as: ASTDCCC, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, + {as: ALDAR, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, + {as: ALDAR, a1: C_XOREG, a3: C_ANDCON, a6: C_REG, type_: 45, size: 4}, + {as: AEIEIO, type_: 46, size: 4}, + {as: ATLBIE, a1: C_REG, type_: 49, size: 4}, + {as: ATLBIE, a1: C_SCON, a6: C_REG, type_: 49, size: 4}, + {as: ASLBMFEE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, + {as: ASLBMTE, a1: C_REG, a6: C_REG, type_: 55, size: 4}, + {as: ASTSW, a1: C_REG, a6: C_XOREG, type_: 44, size: 4}, + {as: ASTSW, a1: C_REG, a3: C_LCON, a6: C_ZOREG, type_: 41, size: 4}, + {as: ALSW, a1: C_XOREG, a6: C_REG, type_: 45, size: 4}, + {as: ALSW, a1: C_ZOREG, a3: C_LCON, a6: C_REG, type_: 42, size: 4}, + + {as: obj.AUNDEF, type_: 78, size: 4}, + {as: obj.APCDATA, a1: C_LCON, a6: C_LCON, type_: 0, size: 0}, + {as: obj.AFUNCDATA, a1: C_SCON, a6: C_ADDR, type_: 0, size: 0}, + {as: obj.ANOP, type_: 0, size: 0}, + {as: obj.ANOP, a1: C_LCON, type_: 0, size: 0}, // NOP operand variations added for #40689 + {as: obj.ANOP, a1: C_REG, type_: 0, size: 0}, // to preserve previous behavior + {as: obj.ANOP, a1: C_FREG, type_: 0, size: 0}, + {as: obj.ADUFFZERO, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL + {as: obj.ADUFFCOPY, a6: C_LBRA, type_: 11, size: 4}, // same as ABR/ABL + {as: obj.APCALIGN, a1: C_LCON, type_: 0, size: 0}, // align code +} + +var oprange [ALAST & obj.AMask][]Optab + +var xcmp [C_NCLASS][C_NCLASS]bool + +// padding bytes to add to align code as requested. +func addpad(pc, a int64, ctxt *obj.Link, cursym *obj.LSym) int { + // For 16 and 32 byte alignment, there is a tradeoff + // between aligning the code and adding too many NOPs. + switch a { + case 8: + if pc&7 != 0 { + return 4 + } + case 16: + // Align to 16 bytes if possible but add at + // most 2 NOPs. + switch pc & 15 { + case 4, 12: + return 4 + case 8: + return 8 + } + case 32: + // Align to 32 bytes if possible but add at + // most 3 NOPs. + switch pc & 31 { + case 4, 20: + return 12 + case 8, 24: + return 8 + case 12, 28: + return 4 + } + // When 32 byte alignment is requested on Linux, + // promote the function's alignment to 32. On AIX + // the function alignment is not changed which might + // result in 16 byte alignment but that is still fine. + // TODO: alignment on AIX + if ctxt.Headtype != objabi.Haix && cursym.Func().Align < 32 { + cursym.Func().Align = 32 + } + default: + ctxt.Diag("Unexpected alignment: %d for PCALIGN directive\n", a) + } + return 0 +} + +// Get the implied register of a operand which doesn't specify one. These show up +// in handwritten asm like "MOVD R5, foosymbol" where a base register is not supplied, +// or "MOVD R5, foo+10(SP) or pseudo-register is used. The other common case is when +// generating constants in register like "MOVD $constant, Rx". +func (c *ctxt9) getimpliedreg(a *obj.Addr, p *obj.Prog) int { + class := oclass(a) + if class >= C_ZCON && class <= C_64CON { + return REGZERO + } + switch class { + case C_SACON, C_LACON: + return REGSP + case C_LOREG, C_SOREG, C_ZOREG, C_XOREG: + switch a.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + return REGSB + case obj.NAME_AUTO, obj.NAME_PARAM: + return REGSP + case obj.NAME_NONE: + return REGZERO + } + } + c.ctxt.Diag("failed to determine implied reg for class %v (%v)", DRconv(oclass(a)), p) + return 0 +} + +func span9(ctxt *obj.Link, cursym *obj.LSym, newprog obj.ProgAlloc) { + p := cursym.Func().Text + if p == nil || p.Link == nil { // handle external functions and ELF section symbols + return + } + + if oprange[AANDN&obj.AMask] == nil { + ctxt.Diag("ppc64 ops not initialized, call ppc64.buildop first") + } + + c := ctxt9{ctxt: ctxt, newprog: newprog, cursym: cursym, autosize: int32(p.To.Offset)} + + pc := int64(0) + p.Pc = pc + + var m int + var o *Optab + for p = p.Link; p != nil; p = p.Link { + p.Pc = pc + o = c.oplook(p) + m = int(o.size) + if m == 0 { + if p.As == obj.APCALIGN { + a := c.vregoff(&p.From) + m = addpad(pc, a, ctxt, cursym) + } else { + if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { + ctxt.Diag("zero-width instruction\n%v", p) + } + continue + } + } + pc += int64(m) + } + + c.cursym.Size = pc + + /* + * if any procedure is large enough to + * generate a large SBRA branch, then + * generate extra passes putting branches + * around jmps to fix. this is rare. + */ + bflag := 1 + + var otxt int64 + var q *obj.Prog + var out [5]uint32 + var falign int32 // Track increased alignment requirements for prefix. + for bflag != 0 { + bflag = 0 + pc = 0 + falign = 0 // Note, linker bumps function symbols to funcAlign. + for p = c.cursym.Func().Text.Link; p != nil; p = p.Link { + p.Pc = pc + o = c.oplook(p) + + // very large conditional branches + if (o.type_ == 16 || o.type_ == 17) && p.To.Target() != nil { + otxt = p.To.Target().Pc - pc + if otxt < -(1<<15)+10 || otxt >= (1<<15)-10 { + // Assemble the instruction with a target not too far to figure out BI and BO fields. + // If only the CTR or BI (the CR bit) are tested, the conditional branch can be inverted, + // and only one extra branch is needed to reach the target. + tgt := p.To.Target() + p.To.SetTarget(p.Link) + o.asmout(&c, p, o, &out) + p.To.SetTarget(tgt) + + bo := int64(out[0]>>21) & 31 + bi := int16((out[0] >> 16) & 31) + invertible := false + + if bo&0x14 == 0x14 { + // A conditional branch that is unconditionally taken. This cannot be inverted. + } else if bo&0x10 == 0x10 { + // A branch based on the value of CTR. Invert the CTR comparison against zero bit. + bo ^= 0x2 + invertible = true + } else if bo&0x04 == 0x04 { + // A branch based on CR bit. Invert the BI comparison bit. + bo ^= 0x8 + invertible = true + } + + if invertible { + // Rewrite + // BC bo,...,far_away_target + // NEXT_INSN + // to: + // BC invert(bo),next_insn + // JMP far_away_target + // next_insn: + // NEXT_INSN + p.As = ABC + p.From = obj.Addr{Type: obj.TYPE_CONST, Name: obj.NAME_NONE, Offset: bo} + q = c.newprog() + q.As = ABR + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(p.To.Target()) + q.Link = p.Link + p.To.SetTarget(p.Link) + p.Link = q + p.Reg = REG_CRBIT0 + bi + } else { + // Rewrite + // BC ...,far_away_target + // NEXT_INSN + // to + // BC ...,tmp + // JMP next_insn + // tmp: + // JMP far_away_target + // next_insn: + // NEXT_INSN + q = c.newprog() + q.Link = p.Link + p.Link = q + q.As = ABR + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(p.To.Target()) + p.To.SetTarget(q) + q = c.newprog() + q.Link = p.Link + p.Link = q + q.As = ABR + q.To.Type = obj.TYPE_BRANCH + q.To.SetTarget(q.Link.Link) + } + bflag = 1 + } + } + + m = int(o.size) + if m == 0 { + if p.As == obj.APCALIGN { + a := c.vregoff(&p.From) + m = addpad(pc, a, ctxt, cursym) + } else { + if p.As != obj.ANOP && p.As != obj.AFUNCDATA && p.As != obj.APCDATA { + ctxt.Diag("zero-width instruction\n%v", p) + } + continue + } + } + + // Prefixed instructions cannot be placed across a 64B boundary. + // Mark and adjust the PC of those which do. A nop will be + // inserted during final assembly. + if o.ispfx { + mark := p.Mark &^ PFX_X64B + if pc&63 == 60 { + p.Pc += 4 + m += 4 + mark |= PFX_X64B + } + + // Marks may be adjusted if a too-far conditional branch is + // fixed up above. Likewise, inserting a NOP may cause a + // branch target to become too far away. We need to run + // another iteration and verify no additional changes + // are needed. + if mark != p.Mark { + bflag = 1 + p.Mark = mark + } + + // Check for 16 or 32B crossing of this prefixed insn. + // These do no require padding, but do require increasing + // the function alignment to prevent them from potentially + // crossing a 64B boundary when the linker assigns the final + // PC. + switch p.Pc & 31 { + case 28: // 32B crossing + falign = 64 + case 12: // 16B crossing + if falign < 64 { + falign = 32 + } + } + } + + pc += int64(m) + } + + c.cursym.Size = pc + } + + c.cursym.Size = pc + c.cursym.Func().Align = falign + c.cursym.Grow(c.cursym.Size) + + // lay out the code, emitting code and data relocations. + + bp := c.cursym.P + nop := LOP_IRR(OP_ORI, REGZERO, REGZERO, 0) + var i int32 + for p := c.cursym.Func().Text.Link; p != nil; p = p.Link { + c.pc = p.Pc + o = c.oplook(p) + if int(o.size) > 4*len(out) { + log.Fatalf("out array in span9 is too small, need at least %d for %v", o.size/4, p) + } + // asmout is not set up to add large amounts of padding + if o.type_ == 0 && p.As == obj.APCALIGN { + aln := c.vregoff(&p.From) + v := addpad(p.Pc, aln, c.ctxt, c.cursym) + if v > 0 { + // Same padding instruction for all + for i = 0; i < int32(v/4); i++ { + c.ctxt.Arch.ByteOrder.PutUint32(bp, nop) + bp = bp[4:] + } + } + } else { + if p.Mark&PFX_X64B != 0 { + c.ctxt.Arch.ByteOrder.PutUint32(bp, nop) + bp = bp[4:] + } + o.asmout(&c, p, o, &out) + for i = 0; i < int32(o.size/4); i++ { + c.ctxt.Arch.ByteOrder.PutUint32(bp, out[i]) + bp = bp[4:] + } + } + } +} + +func isint32(v int64) bool { + return int64(int32(v)) == v +} + +func isuint32(v uint64) bool { + return uint64(uint32(v)) == v +} + +func (c *ctxt9) aclassreg(reg int16) int { + if REG_R0 <= reg && reg <= REG_R31 { + return C_REGP + int(reg&1) + } + if REG_F0 <= reg && reg <= REG_F31 { + return C_FREGP + int(reg&1) + } + if REG_V0 <= reg && reg <= REG_V31 { + return C_VREG + } + if REG_VS0 <= reg && reg <= REG_VS63 { + return C_VSREGP + int(reg&1) + } + if REG_CR0 <= reg && reg <= REG_CR7 || reg == REG_CR { + return C_CREG + } + if REG_CR0LT <= reg && reg <= REG_CR7SO { + return C_CRBIT + } + if REG_SPR0 <= reg && reg <= REG_SPR0+1023 { + switch reg { + case REG_LR: + return C_LR + + case REG_XER: + return C_XER + + case REG_CTR: + return C_CTR + } + + return C_SPR + } + if REG_A0 <= reg && reg <= REG_A7 { + return C_AREG + } + if reg == REG_FPSCR { + return C_FPSCR + } + return C_GOK +} + +func (c *ctxt9) aclass(a *obj.Addr) int { + switch a.Type { + case obj.TYPE_NONE: + return C_NONE + + case obj.TYPE_REG: + return c.aclassreg(a.Reg) + + case obj.TYPE_MEM: + if a.Index != 0 { + if a.Name != obj.NAME_NONE || a.Offset != 0 { + c.ctxt.Logf("Unexpected Instruction operand index %d offset %d class %d \n", a.Index, a.Offset, a.Class) + + } + return C_XOREG + } + switch a.Name { + case obj.NAME_GOTREF, obj.NAME_TOCREF: + return C_ADDR + + case obj.NAME_EXTERN, + obj.NAME_STATIC: + c.instoffset = a.Offset + if a.Sym == nil { + break + } else if a.Sym.Type == objabi.STLSBSS { + // For PIC builds, use 12 byte got initial-exec TLS accesses. + if c.ctxt.Flag_shared { + return C_TLS_IE + } + // Otherwise, use 8 byte local-exec TLS accesses. + return C_TLS_LE + } else { + return C_ADDR + } + + case obj.NAME_AUTO: + c.instoffset = int64(c.autosize) + a.Offset + + if c.instoffset >= -BIG && c.instoffset < BIG { + return C_SOREG + } + return C_LOREG + + case obj.NAME_PARAM: + c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize + if c.instoffset >= -BIG && c.instoffset < BIG { + return C_SOREG + } + return C_LOREG + + case obj.NAME_NONE: + c.instoffset = a.Offset + if a.Offset == 0 && a.Index == 0 { + return C_ZOREG + } else if c.instoffset >= -BIG && c.instoffset < BIG { + return C_SOREG + } else { + return C_LOREG + } + } + + return C_GOK + + case obj.TYPE_TEXTSIZE: + return C_TEXTSIZE + + case obj.TYPE_FCONST: + // The only cases where FCONST will occur are with float64 +/- 0. + // All other float constants are generated in memory. + f64 := a.Val.(float64) + if f64 == 0 { + if math.Signbit(f64) { + return C_ADDCON + } + return C_ZCON + } + log.Fatalf("Unexpected nonzero FCONST operand %v", a) + + case obj.TYPE_CONST, + obj.TYPE_ADDR: + switch a.Name { + case obj.NAME_NONE: + c.instoffset = a.Offset + if a.Reg != 0 { + if -BIG <= c.instoffset && c.instoffset < BIG { + return C_SACON + } + if isint32(c.instoffset) { + return C_LACON + } + return C_DACON + } + + case obj.NAME_EXTERN, + obj.NAME_STATIC: + s := a.Sym + if s == nil { + return C_GOK + } + c.instoffset = a.Offset + return C_LACON + + case obj.NAME_AUTO: + c.instoffset = int64(c.autosize) + a.Offset + if c.instoffset >= -BIG && c.instoffset < BIG { + return C_SACON + } + return C_LACON + + case obj.NAME_PARAM: + c.instoffset = int64(c.autosize) + a.Offset + c.ctxt.Arch.FixedFrameSize + if c.instoffset >= -BIG && c.instoffset < BIG { + return C_SACON + } + return C_LACON + + default: + return C_GOK + } + + if c.instoffset >= 0 { + sbits := bits.Len64(uint64(c.instoffset)) + switch { + case sbits <= 5: + return C_ZCON + sbits + case sbits <= 8: + return C_U8CON + case sbits <= 15: + return C_U15CON + case sbits <= 16: + return C_U16CON + case sbits <= 31: + // Special case, a positive int32 value which is a multiple of 2^16 + if c.instoffset&0xFFFF == 0 { + return C_U3216CON + } + return C_U32CON + case sbits <= 32: + return C_U32CON + case sbits <= 33: + return C_S34CON + default: + return C_64CON + } + } else { + sbits := bits.Len64(uint64(^c.instoffset)) + switch { + case sbits <= 15: + return C_S16CON + case sbits <= 31: + // Special case, a negative int32 value which is a multiple of 2^16 + if c.instoffset&0xFFFF == 0 { + return C_S3216CON + } + return C_S32CON + case sbits <= 33: + return C_S34CON + default: + return C_64CON + } + } + + case obj.TYPE_BRANCH: + if a.Sym != nil && c.ctxt.Flag_dynlink { + return C_LBRAPIC + } + return C_SBRA + } + + return C_GOK +} + +func prasm(p *obj.Prog) { + fmt.Printf("%v\n", p) +} + +func (c *ctxt9) oplook(p *obj.Prog) *Optab { + a1 := int(p.Optab) + if a1 != 0 { + return &optab[a1-1] + } + a1 = int(p.From.Class) + if a1 == 0 { + a1 = c.aclass(&p.From) + 1 + p.From.Class = int8(a1) + } + a1-- + + argsv := [3]int{C_NONE + 1, C_NONE + 1, C_NONE + 1} + for i, ap := range p.RestArgs { + argsv[i] = int(ap.Addr.Class) + if argsv[i] == 0 { + argsv[i] = c.aclass(&ap.Addr) + 1 + ap.Addr.Class = int8(argsv[i]) + } + + } + a3 := argsv[0] - 1 + a4 := argsv[1] - 1 + a5 := argsv[2] - 1 + + a6 := int(p.To.Class) + if a6 == 0 { + a6 = c.aclass(&p.To) + 1 + p.To.Class = int8(a6) + } + a6-- + + a2 := C_NONE + if p.Reg != 0 { + a2 = c.aclassreg(p.Reg) + } + + // c.ctxt.Logf("oplook %v %d %d %d %d\n", p, a1, a2, a3, a4, a5, a6) + ops := oprange[p.As&obj.AMask] + c1 := &xcmp[a1] + c2 := &xcmp[a2] + c3 := &xcmp[a3] + c4 := &xcmp[a4] + c5 := &xcmp[a5] + c6 := &xcmp[a6] + for i := range ops { + op := &ops[i] + if c1[op.a1] && c2[op.a2] && c3[op.a3] && c4[op.a4] && c5[op.a5] && c6[op.a6] { + p.Optab = uint16(cap(optab) - cap(ops) + i + 1) + return op + } + } + + c.ctxt.Diag("illegal combination %v %v %v %v %v %v %v", p.As, DRconv(a1), DRconv(a2), DRconv(a3), DRconv(a4), DRconv(a5), DRconv(a6)) + prasm(p) + if ops == nil { + ops = optab + } + return &ops[0] +} + +// Compare two operand types (ex C_REG, or C_SCON) +// and return true if b is compatible with a. +// +// Argument comparison isn't reflexitive, so care must be taken. +// a is the argument type as found in optab, b is the argument as +// fitted by aclass. +func cmp(a int, b int) bool { + if a == b { + return true + } + switch a { + + case C_SPR: + if b == C_LR || b == C_XER || b == C_CTR { + return true + } + + case C_U1CON: + return cmp(C_ZCON, b) + case C_U2CON: + return cmp(C_U1CON, b) + case C_U3CON: + return cmp(C_U2CON, b) + case C_U4CON: + return cmp(C_U3CON, b) + case C_U5CON: + return cmp(C_U4CON, b) + case C_U8CON: + return cmp(C_U5CON, b) + case C_U15CON: + return cmp(C_U8CON, b) + case C_U16CON: + return cmp(C_U15CON, b) + + case C_S16CON: + return cmp(C_U15CON, b) + case C_32CON: + return cmp(C_S16CON, b) || cmp(C_U16CON, b) || cmp(C_32S16CON, b) + case C_S34CON: + return cmp(C_32CON, b) + case C_64CON: + return cmp(C_S34CON, b) + + case C_32S16CON: + return cmp(C_ZCON, b) + + case C_LACON: + return cmp(C_SACON, b) + + case C_LBRA: + return cmp(C_SBRA, b) + + case C_SOREG: + return cmp(C_ZOREG, b) + + case C_LOREG: + return cmp(C_SOREG, b) + + case C_XOREG: + return cmp(C_REG, b) || cmp(C_ZOREG, b) + + // An even/odd register input always matches the regular register types. + case C_REG: + return cmp(C_REGP, b) || (b == C_ZCON && r0iszero != 0) + case C_FREG: + return cmp(C_FREGP, b) + case C_VSREG: + /* Allow any VR argument as a VSR operand. */ + return cmp(C_VSREGP, b) || cmp(C_VREG, b) + + case C_ANY: + return true + } + + return false +} + +// Used when sorting the optab. Sorting is +// done in a way so that the best choice of +// opcode/operand combination is considered first. +func optabLess(i, j int) bool { + p1 := &optab[i] + p2 := &optab[j] + n := int(p1.as) - int(p2.as) + // same opcode + if n != 0 { + return n < 0 + } + // Consider those that generate fewer + // instructions first. + n = int(p1.size) - int(p2.size) + if n != 0 { + return n < 0 + } + // operand order should match + // better choices first + n = int(p1.a1) - int(p2.a1) + if n != 0 { + return n < 0 + } + n = int(p1.a2) - int(p2.a2) + if n != 0 { + return n < 0 + } + n = int(p1.a3) - int(p2.a3) + if n != 0 { + return n < 0 + } + n = int(p1.a4) - int(p2.a4) + if n != 0 { + return n < 0 + } + n = int(p1.a5) - int(p2.a5) + if n != 0 { + return n < 0 + } + n = int(p1.a6) - int(p2.a6) + if n != 0 { + return n < 0 + } + return false +} + +// Add an entry to the opcode table for +// a new opcode b0 with the same operand combinations +// as opcode a. +func opset(a, b0 obj.As) { + oprange[a&obj.AMask] = oprange[b0] +} + +// Build the opcode table +func buildop(ctxt *obj.Link) { + if oprange[AANDN&obj.AMask] != nil { + // Already initialized; stop now. + // This happens in the cmd/asm tests, + // each of which re-initializes the arch. + return + } + + for i := 0; i < C_NCLASS; i++ { + for n := 0; n < C_NCLASS; n++ { + if cmp(n, i) { + xcmp[i][n] = true + } + } + } + for i := range optab { + // Use the legacy assembler function if none provided. + if optab[i].asmout == nil { + optab[i].asmout = asmout + } + } + // Append the generated entries, sort, and fill out oprange. + optab = append(optab, optabGen...) + sort.Slice(optab, optabLess) + for i := 0; i < len(optab); { + r := optab[i].as + r0 := r & obj.AMask + start := i + for i < len(optab) && optab[i].as == r { + i++ + } + oprange[r0] = optab[start:i] + + switch r { + default: + if !opsetGen(r) { + ctxt.Diag("unknown op in build: %v", r) + log.Fatalf("instruction missing from switch in asm9.go:buildop: %v", r) + } + + case ADCBF: /* unary indexed: op (b+a); op (b) */ + opset(ADCBI, r0) + + opset(ADCBST, r0) + opset(ADCBT, r0) + opset(ADCBTST, r0) + opset(ADCBZ, r0) + opset(AICBI, r0) + + case ASTDCCC: /* indexed store: op s,(b+a); op s,(b) */ + opset(ASTWCCC, r0) + opset(ASTHCCC, r0) + opset(ASTBCCC, r0) + + case AREM: /* macro */ + opset(AREM, r0) + + case AREMU: + opset(AREMU, r0) + + case AREMD: + opset(AREMDU, r0) + + case AMULLW: + opset(AMULLD, r0) + + case ADIVW: /* op Rb[,Ra],Rd */ + opset(AMULHW, r0) + + opset(AMULHWCC, r0) + opset(AMULHWU, r0) + opset(AMULHWUCC, r0) + opset(AMULLWCC, r0) + opset(AMULLWVCC, r0) + opset(AMULLWV, r0) + opset(ADIVWCC, r0) + opset(ADIVWV, r0) + opset(ADIVWVCC, r0) + opset(ADIVWU, r0) + opset(ADIVWUCC, r0) + opset(ADIVWUV, r0) + opset(ADIVWUVCC, r0) + opset(AMODUD, r0) + opset(AMODUW, r0) + opset(AMODSD, r0) + opset(AMODSW, r0) + opset(AADDCC, r0) + opset(AADDCV, r0) + opset(AADDCVCC, r0) + opset(AADDV, r0) + opset(AADDVCC, r0) + opset(AADDE, r0) + opset(AADDECC, r0) + opset(AADDEV, r0) + opset(AADDEVCC, r0) + opset(AMULHD, r0) + opset(AMULHDCC, r0) + opset(AMULHDU, r0) + opset(AMULHDUCC, r0) + opset(AMULLDCC, r0) + opset(AMULLDVCC, r0) + opset(AMULLDV, r0) + opset(ADIVD, r0) + opset(ADIVDCC, r0) + opset(ADIVDE, r0) + opset(ADIVDEU, r0) + opset(ADIVDECC, r0) + opset(ADIVDEUCC, r0) + opset(ADIVDVCC, r0) + opset(ADIVDV, r0) + opset(ADIVDU, r0) + opset(ADIVDUV, r0) + opset(ADIVDUVCC, r0) + opset(ADIVDUCC, r0) + + case ACRAND: + opset(ACRANDN, r0) + opset(ACREQV, r0) + opset(ACRNAND, r0) + opset(ACRNOR, r0) + opset(ACROR, r0) + opset(ACRORN, r0) + opset(ACRXOR, r0) + + case APOPCNTD: /* popcntd, popcntw, popcntb, cnttzw, cnttzd */ + opset(APOPCNTW, r0) + opset(APOPCNTB, r0) + opset(ACNTTZW, r0) + opset(ACNTTZWCC, r0) + opset(ACNTTZD, r0) + opset(ACNTTZDCC, r0) + + case ACOPY: /* copy, paste. */ + opset(APASTECC, r0) + + case AMADDHD: /* maddhd, maddhdu, maddld */ + opset(AMADDHDU, r0) + opset(AMADDLD, r0) + + case AMOVBZ: /* lbz, stz, rlwm(r/r), lhz, lha, stz, and x variants */ + opset(AMOVH, r0) + opset(AMOVHZ, r0) + + case AMOVBZU: /* lbz[x]u, stb[x]u, lhz[x]u, lha[x]u, sth[u]x, ld[x]u, std[u]x */ + opset(AMOVHU, r0) + + opset(AMOVHZU, r0) + opset(AMOVWU, r0) + opset(AMOVWZU, r0) + opset(AMOVDU, r0) + opset(AMOVMW, r0) + + case ALVEBX: /* lvebx, lvehx, lvewx, lvx, lvxl, lvsl, lvsr */ + opset(ALVEHX, r0) + opset(ALVEWX, r0) + opset(ALVX, r0) + opset(ALVXL, r0) + opset(ALVSL, r0) + opset(ALVSR, r0) + + case ASTVEBX: /* stvebx, stvehx, stvewx, stvx, stvxl */ + opset(ASTVEHX, r0) + opset(ASTVEWX, r0) + opset(ASTVX, r0) + opset(ASTVXL, r0) + + case AVAND: /* vand, vandc, vnand */ + opset(AVAND, r0) + opset(AVANDC, r0) + opset(AVNAND, r0) + + case AVMRGOW: /* vmrgew, vmrgow */ + opset(AVMRGEW, r0) + + case AVOR: /* vor, vorc, vxor, vnor, veqv */ + opset(AVOR, r0) + opset(AVORC, r0) + opset(AVXOR, r0) + opset(AVNOR, r0) + opset(AVEQV, r0) + + case AVADDUM: /* vaddubm, vadduhm, vadduwm, vaddudm, vadduqm */ + opset(AVADDUBM, r0) + opset(AVADDUHM, r0) + opset(AVADDUWM, r0) + opset(AVADDUDM, r0) + opset(AVADDUQM, r0) + + case AVADDCU: /* vaddcuq, vaddcuw */ + opset(AVADDCUQ, r0) + opset(AVADDCUW, r0) + + case AVADDUS: /* vaddubs, vadduhs, vadduws */ + opset(AVADDUBS, r0) + opset(AVADDUHS, r0) + opset(AVADDUWS, r0) + + case AVADDSS: /* vaddsbs, vaddshs, vaddsws */ + opset(AVADDSBS, r0) + opset(AVADDSHS, r0) + opset(AVADDSWS, r0) + + case AVADDE: /* vaddeuqm, vaddecuq */ + opset(AVADDEUQM, r0) + opset(AVADDECUQ, r0) + + case AVSUBUM: /* vsububm, vsubuhm, vsubuwm, vsubudm, vsubuqm */ + opset(AVSUBUBM, r0) + opset(AVSUBUHM, r0) + opset(AVSUBUWM, r0) + opset(AVSUBUDM, r0) + opset(AVSUBUQM, r0) + + case AVSUBCU: /* vsubcuq, vsubcuw */ + opset(AVSUBCUQ, r0) + opset(AVSUBCUW, r0) + + case AVSUBUS: /* vsububs, vsubuhs, vsubuws */ + opset(AVSUBUBS, r0) + opset(AVSUBUHS, r0) + opset(AVSUBUWS, r0) + + case AVSUBSS: /* vsubsbs, vsubshs, vsubsws */ + opset(AVSUBSBS, r0) + opset(AVSUBSHS, r0) + opset(AVSUBSWS, r0) + + case AVSUBE: /* vsubeuqm, vsubecuq */ + opset(AVSUBEUQM, r0) + opset(AVSUBECUQ, r0) + + case AVMULESB: /* vmulesb, vmulosb, vmuleub, vmuloub, vmulosh, vmulouh, vmulesw, vmulosw, vmuleuw, vmulouw, vmuluwm */ + opset(AVMULOSB, r0) + opset(AVMULEUB, r0) + opset(AVMULOUB, r0) + opset(AVMULESH, r0) + opset(AVMULOSH, r0) + opset(AVMULEUH, r0) + opset(AVMULOUH, r0) + opset(AVMULESW, r0) + opset(AVMULOSW, r0) + opset(AVMULEUW, r0) + opset(AVMULOUW, r0) + opset(AVMULUWM, r0) + case AVPMSUM: /* vpmsumb, vpmsumh, vpmsumw, vpmsumd */ + opset(AVPMSUMB, r0) + opset(AVPMSUMH, r0) + opset(AVPMSUMW, r0) + opset(AVPMSUMD, r0) + + case AVR: /* vrlb, vrlh, vrlw, vrld */ + opset(AVRLB, r0) + opset(AVRLH, r0) + opset(AVRLW, r0) + opset(AVRLD, r0) + + case AVS: /* vs[l,r], vs[l,r]o, vs[l,r]b, vs[l,r]h, vs[l,r]w, vs[l,r]d */ + opset(AVSLB, r0) + opset(AVSLH, r0) + opset(AVSLW, r0) + opset(AVSL, r0) + opset(AVSLO, r0) + opset(AVSRB, r0) + opset(AVSRH, r0) + opset(AVSRW, r0) + opset(AVSR, r0) + opset(AVSRO, r0) + opset(AVSLD, r0) + opset(AVSRD, r0) + + case AVSA: /* vsrab, vsrah, vsraw, vsrad */ + opset(AVSRAB, r0) + opset(AVSRAH, r0) + opset(AVSRAW, r0) + opset(AVSRAD, r0) + + case AVSOI: /* vsldoi */ + opset(AVSLDOI, r0) + + case AVCLZ: /* vclzb, vclzh, vclzw, vclzd */ + opset(AVCLZB, r0) + opset(AVCLZH, r0) + opset(AVCLZW, r0) + opset(AVCLZD, r0) + + case AVPOPCNT: /* vpopcntb, vpopcnth, vpopcntw, vpopcntd */ + opset(AVPOPCNTB, r0) + opset(AVPOPCNTH, r0) + opset(AVPOPCNTW, r0) + opset(AVPOPCNTD, r0) + + case AVCMPEQ: /* vcmpequb[.], vcmpequh[.], vcmpequw[.], vcmpequd[.] */ + opset(AVCMPEQUB, r0) + opset(AVCMPEQUBCC, r0) + opset(AVCMPEQUH, r0) + opset(AVCMPEQUHCC, r0) + opset(AVCMPEQUW, r0) + opset(AVCMPEQUWCC, r0) + opset(AVCMPEQUD, r0) + opset(AVCMPEQUDCC, r0) + + case AVCMPGT: /* vcmpgt[u,s]b[.], vcmpgt[u,s]h[.], vcmpgt[u,s]w[.], vcmpgt[u,s]d[.] */ + opset(AVCMPGTUB, r0) + opset(AVCMPGTUBCC, r0) + opset(AVCMPGTUH, r0) + opset(AVCMPGTUHCC, r0) + opset(AVCMPGTUW, r0) + opset(AVCMPGTUWCC, r0) + opset(AVCMPGTUD, r0) + opset(AVCMPGTUDCC, r0) + opset(AVCMPGTSB, r0) + opset(AVCMPGTSBCC, r0) + opset(AVCMPGTSH, r0) + opset(AVCMPGTSHCC, r0) + opset(AVCMPGTSW, r0) + opset(AVCMPGTSWCC, r0) + opset(AVCMPGTSD, r0) + opset(AVCMPGTSDCC, r0) + + case AVCMPNEZB: /* vcmpnezb[.] */ + opset(AVCMPNEZBCC, r0) + opset(AVCMPNEB, r0) + opset(AVCMPNEBCC, r0) + opset(AVCMPNEH, r0) + opset(AVCMPNEHCC, r0) + opset(AVCMPNEW, r0) + opset(AVCMPNEWCC, r0) + + case AVPERM: /* vperm */ + opset(AVPERMXOR, r0) + opset(AVPERMR, r0) + + case AVBPERMQ: /* vbpermq, vbpermd */ + opset(AVBPERMD, r0) + + case AVSEL: /* vsel */ + opset(AVSEL, r0) + + case AVSPLTB: /* vspltb, vsplth, vspltw */ + opset(AVSPLTH, r0) + opset(AVSPLTW, r0) + + case AVSPLTISB: /* vspltisb, vspltish, vspltisw */ + opset(AVSPLTISH, r0) + opset(AVSPLTISW, r0) + + case AVCIPH: /* vcipher, vcipherlast */ + opset(AVCIPHER, r0) + opset(AVCIPHERLAST, r0) + + case AVNCIPH: /* vncipher, vncipherlast */ + opset(AVNCIPHER, r0) + opset(AVNCIPHERLAST, r0) + + case AVSBOX: /* vsbox */ + opset(AVSBOX, r0) + + case AVSHASIGMA: /* vshasigmaw, vshasigmad */ + opset(AVSHASIGMAW, r0) + opset(AVSHASIGMAD, r0) + + case ALXVD2X: /* lxvd2x, lxvdsx, lxvw4x, lxvh8x, lxvb16x */ + opset(ALXVDSX, r0) + opset(ALXVW4X, r0) + opset(ALXVH8X, r0) + opset(ALXVB16X, r0) + + case ALXV: /* lxv */ + opset(ALXV, r0) + + case ALXVL: /* lxvl, lxvll, lxvx */ + opset(ALXVLL, r0) + opset(ALXVX, r0) + + case ASTXVD2X: /* stxvd2x, stxvdsx, stxvw4x, stxvh8x, stxvb16x */ + opset(ASTXVW4X, r0) + opset(ASTXVH8X, r0) + opset(ASTXVB16X, r0) + + case ASTXV: /* stxv */ + opset(ASTXV, r0) + + case ASTXVL: /* stxvl, stxvll, stvx */ + opset(ASTXVLL, r0) + opset(ASTXVX, r0) + + case ALXSDX: /* lxsdx */ + opset(ALXSDX, r0) + + case ASTXSDX: /* stxsdx */ + opset(ASTXSDX, r0) + + case ALXSIWAX: /* lxsiwax, lxsiwzx */ + opset(ALXSIWZX, r0) + + case ASTXSIWX: /* stxsiwx */ + opset(ASTXSIWX, r0) + + case AMFVSRD: /* mfvsrd, mfvsrwz (and extended mnemonics), mfvsrld */ + opset(AMFFPRD, r0) + opset(AMFVRD, r0) + opset(AMFVSRWZ, r0) + opset(AMFVSRLD, r0) + + case AMTVSRD: /* mtvsrd, mtvsrwa, mtvsrwz (and extended mnemonics), mtvsrdd, mtvsrws */ + opset(AMTFPRD, r0) + opset(AMTVRD, r0) + opset(AMTVSRWA, r0) + opset(AMTVSRWZ, r0) + opset(AMTVSRWS, r0) + + case AXXLAND: /* xxland, xxlandc, xxleqv, xxlnand */ + opset(AXXLANDC, r0) + opset(AXXLEQV, r0) + opset(AXXLNAND, r0) + + case AXXLOR: /* xxlorc, xxlnor, xxlor, xxlxor */ + opset(AXXLORC, r0) + opset(AXXLNOR, r0) + opset(AXXLORQ, r0) + opset(AXXLXOR, r0) + + case AXXSEL: /* xxsel */ + opset(AXXSEL, r0) + + case AXXMRGHW: /* xxmrghw, xxmrglw */ + opset(AXXMRGLW, r0) + + case AXXSPLTW: /* xxspltw */ + opset(AXXSPLTW, r0) + + case AXXSPLTIB: /* xxspltib */ + opset(AXXSPLTIB, r0) + + case AXXPERM: /* xxpermdi */ + opset(AXXPERM, r0) + + case AXXSLDWI: /* xxsldwi */ + opset(AXXPERMDI, r0) + opset(AXXSLDWI, r0) + + case AXXBRQ: /* xxbrq, xxbrd, xxbrw, xxbrh */ + opset(AXXBRD, r0) + opset(AXXBRW, r0) + opset(AXXBRH, r0) + + case AXSCVDPSP: /* xscvdpsp, xscvspdp, xscvdpspn, xscvspdpn */ + opset(AXSCVSPDP, r0) + opset(AXSCVDPSPN, r0) + opset(AXSCVSPDPN, r0) + + case AXVCVDPSP: /* xvcvdpsp, xvcvspdp */ + opset(AXVCVSPDP, r0) + + case AXSCVDPSXDS: /* xscvdpsxds, xscvdpsxws, xscvdpuxds, xscvdpuxws */ + opset(AXSCVDPSXWS, r0) + opset(AXSCVDPUXDS, r0) + opset(AXSCVDPUXWS, r0) + + case AXSCVSXDDP: /* xscvsxddp, xscvuxddp, xscvsxdsp, xscvuxdsp */ + opset(AXSCVUXDDP, r0) + opset(AXSCVSXDSP, r0) + opset(AXSCVUXDSP, r0) + + case AXVCVDPSXDS: /* xvcvdpsxds, xvcvdpsxws, xvcvdpuxds, xvcvdpuxws, xvcvspsxds, xvcvspsxws, xvcvspuxds, xvcvspuxws */ + opset(AXVCVDPSXDS, r0) + opset(AXVCVDPSXWS, r0) + opset(AXVCVDPUXDS, r0) + opset(AXVCVDPUXWS, r0) + opset(AXVCVSPSXDS, r0) + opset(AXVCVSPSXWS, r0) + opset(AXVCVSPUXDS, r0) + opset(AXVCVSPUXWS, r0) + + case AXVCVSXDDP: /* xvcvsxddp, xvcvsxwdp, xvcvuxddp, xvcvuxwdp, xvcvsxdsp, xvcvsxwsp, xvcvuxdsp, xvcvuxwsp */ + opset(AXVCVSXWDP, r0) + opset(AXVCVUXDDP, r0) + opset(AXVCVUXWDP, r0) + opset(AXVCVSXDSP, r0) + opset(AXVCVSXWSP, r0) + opset(AXVCVUXDSP, r0) + opset(AXVCVUXWSP, r0) + + case AAND: /* logical op Rb,Rs,Ra; no literal */ + opset(AANDN, r0) + opset(AANDNCC, r0) + opset(AEQV, r0) + opset(AEQVCC, r0) + opset(ANAND, r0) + opset(ANANDCC, r0) + opset(ANOR, r0) + opset(ANORCC, r0) + opset(AORCC, r0) + opset(AORN, r0) + opset(AORNCC, r0) + opset(AXORCC, r0) + + case AADDME: /* op Ra, Rd */ + opset(AADDMECC, r0) + + opset(AADDMEV, r0) + opset(AADDMEVCC, r0) + opset(AADDZE, r0) + opset(AADDZECC, r0) + opset(AADDZEV, r0) + opset(AADDZEVCC, r0) + opset(ASUBME, r0) + opset(ASUBMECC, r0) + opset(ASUBMEV, r0) + opset(ASUBMEVCC, r0) + opset(ASUBZE, r0) + opset(ASUBZECC, r0) + opset(ASUBZEV, r0) + opset(ASUBZEVCC, r0) + + case AADDC: + opset(AADDCCC, r0) + + case ABEQ: + opset(ABGE, r0) + opset(ABGT, r0) + opset(ABLE, r0) + opset(ABLT, r0) + opset(ABNE, r0) + opset(ABVC, r0) + opset(ABVS, r0) + + case ABR: + opset(ABL, r0) + + case ABC: + opset(ABCL, r0) + + case ABDNZ: + opset(ABDZ, r0) + + case AEXTSB: /* op Rs, Ra */ + opset(AEXTSBCC, r0) + + opset(AEXTSH, r0) + opset(AEXTSHCC, r0) + opset(ACNTLZW, r0) + opset(ACNTLZWCC, r0) + opset(ACNTLZD, r0) + opset(AEXTSW, r0) + opset(AEXTSWCC, r0) + opset(ACNTLZDCC, r0) + + case AFABS: /* fop [s,]d */ + opset(AFABSCC, r0) + + opset(AFNABS, r0) + opset(AFNABSCC, r0) + opset(AFNEG, r0) + opset(AFNEGCC, r0) + opset(AFRSP, r0) + opset(AFRSPCC, r0) + opset(AFCTIW, r0) + opset(AFCTIWCC, r0) + opset(AFCTIWZ, r0) + opset(AFCTIWZCC, r0) + opset(AFCTID, r0) + opset(AFCTIDCC, r0) + opset(AFCTIDZ, r0) + opset(AFCTIDZCC, r0) + opset(AFCFID, r0) + opset(AFCFIDCC, r0) + opset(AFCFIDU, r0) + opset(AFCFIDUCC, r0) + opset(AFCFIDS, r0) + opset(AFCFIDSCC, r0) + opset(AFRES, r0) + opset(AFRESCC, r0) + opset(AFRIM, r0) + opset(AFRIMCC, r0) + opset(AFRIP, r0) + opset(AFRIPCC, r0) + opset(AFRIZ, r0) + opset(AFRIZCC, r0) + opset(AFRIN, r0) + opset(AFRINCC, r0) + opset(AFRSQRTE, r0) + opset(AFRSQRTECC, r0) + opset(AFSQRT, r0) + opset(AFSQRTCC, r0) + opset(AFSQRTS, r0) + opset(AFSQRTSCC, r0) + + case AFADD: + opset(AFADDS, r0) + opset(AFADDCC, r0) + opset(AFADDSCC, r0) + opset(AFCPSGN, r0) + opset(AFCPSGNCC, r0) + opset(AFDIV, r0) + opset(AFDIVS, r0) + opset(AFDIVCC, r0) + opset(AFDIVSCC, r0) + opset(AFSUB, r0) + opset(AFSUBS, r0) + opset(AFSUBCC, r0) + opset(AFSUBSCC, r0) + + case AFMADD: + opset(AFMADDCC, r0) + opset(AFMADDS, r0) + opset(AFMADDSCC, r0) + opset(AFMSUB, r0) + opset(AFMSUBCC, r0) + opset(AFMSUBS, r0) + opset(AFMSUBSCC, r0) + opset(AFNMADD, r0) + opset(AFNMADDCC, r0) + opset(AFNMADDS, r0) + opset(AFNMADDSCC, r0) + opset(AFNMSUB, r0) + opset(AFNMSUBCC, r0) + opset(AFNMSUBS, r0) + opset(AFNMSUBSCC, r0) + opset(AFSEL, r0) + opset(AFSELCC, r0) + + case AFMUL: + opset(AFMULS, r0) + opset(AFMULCC, r0) + opset(AFMULSCC, r0) + + case AFCMPO: + opset(AFCMPU, r0) + + case AMTFSB0: + opset(AMTFSB0CC, r0) + opset(AMTFSB1, r0) + opset(AMTFSB1CC, r0) + + case ANEG: /* op [Ra,] Rd */ + opset(ANEGCC, r0) + + opset(ANEGV, r0) + opset(ANEGVCC, r0) + + case AOR: /* or/xor Rb,Rs,Ra; ori/xori $uimm,Rs,R */ + opset(AXOR, r0) + + case AORIS: /* oris/xoris $uimm,Rs,Ra */ + opset(AXORIS, r0) + + case ASLW: + opset(ASLWCC, r0) + opset(ASRW, r0) + opset(ASRWCC, r0) + opset(AROTLW, r0) + + case ASLD: + opset(ASLDCC, r0) + opset(ASRD, r0) + opset(ASRDCC, r0) + opset(AROTL, r0) + + case ASRAW: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ + opset(ASRAWCC, r0) + + case AEXTSWSLI: + opset(AEXTSWSLICC, r0) + + case ASRAD: /* sraw Rb,Rs,Ra; srawi sh,Rs,Ra */ + opset(ASRADCC, r0) + + case ASUB: /* SUB Ra,Rb,Rd => subf Rd,ra,rb */ + opset(ASUB, r0) + + opset(ASUBCC, r0) + opset(ASUBV, r0) + opset(ASUBVCC, r0) + opset(ASUBCCC, r0) + opset(ASUBCV, r0) + opset(ASUBCVCC, r0) + opset(ASUBE, r0) + opset(ASUBECC, r0) + opset(ASUBEV, r0) + opset(ASUBEVCC, r0) + + case ASYNC: + opset(AISYNC, r0) + opset(ALWSYNC, r0) + opset(APTESYNC, r0) + opset(ATLBSYNC, r0) + + case ARLWMI: + opset(ARLWMICC, r0) + opset(ARLWNM, r0) + opset(ARLWNMCC, r0) + + case ARLDMI: + opset(ARLDMICC, r0) + opset(ARLDIMI, r0) + opset(ARLDIMICC, r0) + + case ARLDC: + opset(ARLDCCC, r0) + + case ARLDCL: + opset(ARLDCR, r0) + opset(ARLDCLCC, r0) + opset(ARLDCRCC, r0) + + case ARLDICL: + opset(ARLDICLCC, r0) + opset(ARLDICR, r0) + opset(ARLDICRCC, r0) + opset(ARLDIC, r0) + opset(ARLDICCC, r0) + opset(ACLRLSLDI, r0) + + case AFMOVD: + opset(AFMOVDCC, r0) + opset(AFMOVDU, r0) + opset(AFMOVS, r0) + opset(AFMOVSU, r0) + + case ALDAR: + opset(ALBAR, r0) + opset(ALHAR, r0) + opset(ALWAR, r0) + + case ASYSCALL: /* just the op; flow of control */ + opset(ARFI, r0) + + opset(ARFCI, r0) + opset(ARFID, r0) + opset(AHRFID, r0) + + case AMOVHBR: + opset(AMOVWBR, r0) + opset(AMOVDBR, r0) + + case ASLBMFEE: + opset(ASLBMFEV, r0) + + case ATW: + opset(ATD, r0) + + case ATLBIE: + opset(ASLBIE, r0) + opset(ATLBIEL, r0) + + case AEIEIO: + opset(ASLBIA, r0) + + case ACMP: + opset(ACMPW, r0) + + case ACMPU: + opset(ACMPWU, r0) + + case ACMPB: + opset(ACMPB, r0) + + case AFTDIV: + opset(AFTDIV, r0) + + case AFTSQRT: + opset(AFTSQRT, r0) + + case AMOVW: /* load/store/move word with sign extension; move 32-bit literals */ + opset(AMOVWZ, r0) /* Same as above, but zero extended */ + + case AADD, + AADDIS, + AANDCC, /* and. Rb,Rs,Ra; andi. $uimm,Rs,Ra */ + AANDISCC, + AFMOVSX, + AFMOVSZ, + ALSW, + AMOVD, /* load/store/move 64-bit values, including 32-bit literals with/without sign-extension */ + AMOVB, /* macro: move byte with sign extension */ + AMOVBU, /* macro: move byte with sign extension & update */ + AMOVFL, + /* op $s[,r2],r3; op r1[,r2],r3; no cc/v */ + ASUBC, /* op r1,$s,r3; op r1[,r2],r3 */ + ASTSW, + ASLBMTE, + AWORD, + ADWORD, + ADARN, + AVMSUMUDM, + AADDEX, + ACMPEQB, + ACLRLSLWI, + AMTVSRDD, + APNOP, + AISEL, + obj.ANOP, + obj.ATEXT, + obj.AUNDEF, + obj.AFUNCDATA, + obj.APCALIGN, + obj.APCDATA, + obj.ADUFFZERO, + obj.ADUFFCOPY: + break + } + } +} + +func OPVXX1(o uint32, xo uint32, oe uint32) uint32 { + return o<<26 | xo<<1 | oe<<11 +} + +func OPVXX2(o uint32, xo uint32, oe uint32) uint32 { + return o<<26 | xo<<2 | oe<<11 +} + +func OPVXX2VA(o uint32, xo uint32, oe uint32) uint32 { + return o<<26 | xo<<2 | oe<<16 +} + +func OPVXX3(o uint32, xo uint32, oe uint32) uint32 { + return o<<26 | xo<<3 | oe<<11 +} + +func OPVXX4(o uint32, xo uint32, oe uint32) uint32 { + return o<<26 | xo<<4 | oe<<11 +} + +func OPDQ(o uint32, xo uint32, oe uint32) uint32 { + return o<<26 | xo | oe<<4 +} + +func OPVX(o uint32, xo uint32, oe uint32, rc uint32) uint32 { + return o<<26 | xo | oe<<11 | rc&1 +} + +func OPVC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { + return o<<26 | xo | oe<<11 | (rc&1)<<10 +} + +func OPVCC(o uint32, xo uint32, oe uint32, rc uint32) uint32 { + return o<<26 | xo<<1 | oe<<10 | rc&1 +} + +func OPCC(o uint32, xo uint32, rc uint32) uint32 { + return OPVCC(o, xo, 0, rc) +} + +/* Generate MD-form opcode */ +func OPMD(o, xo, rc uint32) uint32 { + return o<<26 | xo<<2 | rc&1 +} + +/* the order is dest, a/s, b/imm for both arithmetic and logical operations. */ +func AOP_RRR(op uint32, d uint32, a uint32, b uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 +} + +/* VX-form 2-register operands, r/none/r */ +func AOP_RR(op uint32, d uint32, a uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<11 +} + +/* VA-form 4-register operands */ +func AOP_RRRR(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&31)<<6 +} + +func AOP_IRR(op uint32, d uint32, a uint32, simm uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | simm&0xFFFF +} + +/* VX-form 2-register + UIM operands */ +func AOP_VIRR(op uint32, d uint32, a uint32, simm uint32) uint32 { + return op | (d&31)<<21 | (simm&0xFFFF)<<16 | (a&31)<<11 +} + +/* VX-form 2-register + ST + SIX operands */ +func AOP_IIRR(op uint32, d uint32, a uint32, sbit uint32, simm uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (sbit&1)<<15 | (simm&0xF)<<11 +} + +/* VA-form 3-register + SHB operands */ +func AOP_IRRR(op uint32, d uint32, a uint32, b uint32, simm uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (simm&0xF)<<6 +} + +/* VX-form 1-register + SIM operands */ +func AOP_IR(op uint32, d uint32, simm uint32) uint32 { + return op | (d&31)<<21 | (simm&31)<<16 +} + +/* XX1-form 3-register operands, 1 VSR operand */ +func AOP_XX1(op uint32, r uint32, a uint32, b uint32) uint32 { + return op | (r&31)<<21 | (a&31)<<16 | (b&31)<<11 | (r&32)>>5 +} + +/* XX2-form 3-register operands, 2 VSR operands */ +func AOP_XX2(op uint32, xt uint32, a uint32, xb uint32) uint32 { + return op | (xt&31)<<21 | (a&3)<<16 | (xb&31)<<11 | (xb&32)>>4 | (xt&32)>>5 +} + +/* XX3-form 3 VSR operands */ +func AOP_XX3(op uint32, xt uint32, xa uint32, xb uint32) uint32 { + return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 +} + +/* XX3-form 3 VSR operands + immediate */ +func AOP_XX3I(op uint32, xt uint32, xa uint32, xb uint32, c uint32) uint32 { + return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (c&3)<<8 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 +} + +/* XX4-form, 4 VSR operands */ +func AOP_XX4(op uint32, xt uint32, xa uint32, xb uint32, xc uint32) uint32 { + return op | (xt&31)<<21 | (xa&31)<<16 | (xb&31)<<11 | (xc&31)<<6 | (xc&32)>>2 | (xa&32)>>3 | (xb&32)>>4 | (xt&32)>>5 +} + +/* DQ-form, VSR register, register + offset operands */ +func AOP_DQ(op uint32, xt uint32, a uint32, b uint32) uint32 { + /* The EA for this instruction form is (RA) + DQ << 4, where DQ is a 12-bit signed integer. */ + /* In order to match the output of the GNU objdump (and make the usage in Go asm easier), the */ + /* instruction is called using the sign extended value (i.e. a valid offset would be -32752 or 32752, */ + /* not -2047 or 2047), so 'b' needs to be adjusted to the expected 12-bit DQ value. Bear in mind that */ + /* bits 0 to 3 in 'dq' need to be zero, otherwise this will generate an illegal instruction. */ + /* If in doubt how this instruction form is encoded, refer to ISA 3.0b, pages 492 and 507. */ + dq := b >> 4 + return op | (xt&31)<<21 | (a&31)<<16 | (dq&4095)<<4 | (xt&32)>>2 +} + +/* Z23-form, 3-register operands + CY field */ +func AOP_Z23I(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c&3)<<9 +} + +/* X-form, 3-register operands + EH field */ +func AOP_RRRI(op uint32, d uint32, a uint32, b uint32, c uint32) uint32 { + return op | (d&31)<<21 | (a&31)<<16 | (b&31)<<11 | (c & 1) +} + +func LOP_RRR(op uint32, a uint32, s uint32, b uint32) uint32 { + return op | (s&31)<<21 | (a&31)<<16 | (b&31)<<11 +} + +func LOP_IRR(op uint32, a uint32, s uint32, uimm uint32) uint32 { + return op | (s&31)<<21 | (a&31)<<16 | uimm&0xFFFF +} + +func OP_BR(op uint32, li uint32, aa uint32) uint32 { + return op | li&0x03FFFFFC | aa<<1 +} + +func OP_BC(op uint32, bo uint32, bi uint32, bd uint32, aa uint32) uint32 { + return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 | bd&0xFFFC | aa<<1 +} + +func OP_BCR(op uint32, bo uint32, bi uint32) uint32 { + return op | (bo&0x1F)<<21 | (bi&0x1F)<<16 +} + +func OP_RLW(op uint32, a uint32, s uint32, sh uint32, mb uint32, me uint32) uint32 { + return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | (mb&31)<<6 | (me&31)<<1 +} + +func AOP_RLDIC(op uint32, a uint32, s uint32, sh uint32, m uint32) uint32 { + return op | (s&31)<<21 | (a&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 | (m&31)<<6 | ((m&32)>>5)<<5 +} + +func AOP_EXTSWSLI(op uint32, a uint32, s uint32, sh uint32) uint32 { + return op | (a&31)<<21 | (s&31)<<16 | (sh&31)<<11 | ((sh&32)>>5)<<1 +} + +func AOP_ISEL(op uint32, t uint32, a uint32, b uint32, bc uint32) uint32 { + return op | (t&31)<<21 | (a&31)<<16 | (b&31)<<11 | (bc&0x1F)<<6 +} + +const ( + /* each rhs is OPVCC(_, _, _, _) */ + OP_ADD = 31<<26 | 266<<1 | 0<<10 | 0 + OP_ADDI = 14<<26 | 0<<1 | 0<<10 | 0 + OP_ADDIS = 15<<26 | 0<<1 | 0<<10 | 0 + OP_ANDI = 28<<26 | 0<<1 | 0<<10 | 0 + OP_EXTSB = 31<<26 | 954<<1 | 0<<10 | 0 + OP_EXTSH = 31<<26 | 922<<1 | 0<<10 | 0 + OP_EXTSW = 31<<26 | 986<<1 | 0<<10 | 0 + OP_ISEL = 31<<26 | 15<<1 | 0<<10 | 0 + OP_MCRF = 19<<26 | 0<<1 | 0<<10 | 0 + OP_MCRFS = 63<<26 | 64<<1 | 0<<10 | 0 + OP_MCRXR = 31<<26 | 512<<1 | 0<<10 | 0 + OP_MFCR = 31<<26 | 19<<1 | 0<<10 | 0 + OP_MFFS = 63<<26 | 583<<1 | 0<<10 | 0 + OP_MFSPR = 31<<26 | 339<<1 | 0<<10 | 0 + OP_MFSR = 31<<26 | 595<<1 | 0<<10 | 0 + OP_MFSRIN = 31<<26 | 659<<1 | 0<<10 | 0 + OP_MTCRF = 31<<26 | 144<<1 | 0<<10 | 0 + OP_MTFSF = 63<<26 | 711<<1 | 0<<10 | 0 + OP_MTFSFI = 63<<26 | 134<<1 | 0<<10 | 0 + OP_MTSPR = 31<<26 | 467<<1 | 0<<10 | 0 + OP_MTSR = 31<<26 | 210<<1 | 0<<10 | 0 + OP_MTSRIN = 31<<26 | 242<<1 | 0<<10 | 0 + OP_MULLW = 31<<26 | 235<<1 | 0<<10 | 0 + OP_MULLD = 31<<26 | 233<<1 | 0<<10 | 0 + OP_OR = 31<<26 | 444<<1 | 0<<10 | 0 + OP_ORI = 24<<26 | 0<<1 | 0<<10 | 0 + OP_ORIS = 25<<26 | 0<<1 | 0<<10 | 0 + OP_RLWINM = 21<<26 | 0<<1 | 0<<10 | 0 + OP_RLWNM = 23<<26 | 0<<1 | 0<<10 | 0 + OP_SUBF = 31<<26 | 40<<1 | 0<<10 | 0 + OP_RLDIC = 30<<26 | 4<<1 | 0<<10 | 0 + OP_RLDICR = 30<<26 | 2<<1 | 0<<10 | 0 + OP_RLDICL = 30<<26 | 0<<1 | 0<<10 | 0 + OP_RLDCL = 30<<26 | 8<<1 | 0<<10 | 0 + OP_EXTSWSLI = 31<<26 | 445<<2 +) + +func oclass(a *obj.Addr) int { + return int(a.Class) - 1 +} + +const ( + D_FORM = iota + DS_FORM +) + +// This function determines when a non-indexed load or store is D or +// DS form for use in finding the size of the offset field in the instruction. +// The size is needed when setting the offset value in the instruction +// and when generating relocation for that field. +// DS form instructions include: ld, ldu, lwa, std, stdu. All other +// loads and stores with an offset field are D form. This function should +// only be called with the same opcodes as are handled by opstore and opload. +func (c *ctxt9) opform(insn uint32) int { + switch insn { + default: + c.ctxt.Diag("bad insn in loadform: %x", insn) + case OPVCC(58, 0, 0, 0), // ld + OPVCC(58, 0, 0, 1), // ldu + OPVCC(58, 0, 0, 0) | 1<<1, // lwa + OPVCC(62, 0, 0, 0), // std + OPVCC(62, 0, 0, 1): //stdu + return DS_FORM + case OP_ADDI, // add + OPVCC(32, 0, 0, 0), // lwz + OPVCC(33, 0, 0, 0), // lwzu + OPVCC(34, 0, 0, 0), // lbz + OPVCC(35, 0, 0, 0), // lbzu + OPVCC(40, 0, 0, 0), // lhz + OPVCC(41, 0, 0, 0), // lhzu + OPVCC(42, 0, 0, 0), // lha + OPVCC(43, 0, 0, 0), // lhau + OPVCC(46, 0, 0, 0), // lmw + OPVCC(48, 0, 0, 0), // lfs + OPVCC(49, 0, 0, 0), // lfsu + OPVCC(50, 0, 0, 0), // lfd + OPVCC(51, 0, 0, 0), // lfdu + OPVCC(36, 0, 0, 0), // stw + OPVCC(37, 0, 0, 0), // stwu + OPVCC(38, 0, 0, 0), // stb + OPVCC(39, 0, 0, 0), // stbu + OPVCC(44, 0, 0, 0), // sth + OPVCC(45, 0, 0, 0), // sthu + OPVCC(47, 0, 0, 0), // stmw + OPVCC(52, 0, 0, 0), // stfs + OPVCC(53, 0, 0, 0), // stfsu + OPVCC(54, 0, 0, 0), // stfd + OPVCC(55, 0, 0, 0): // stfdu + return D_FORM + } + return 0 +} + +// Encode instructions and create relocation for accessing s+d according to the +// instruction op with source or destination (as appropriate) register reg. +func (c *ctxt9) symbolAccess(s *obj.LSym, d int64, reg int16, op uint32, reuse bool) (o1, o2 uint32) { + if c.ctxt.Headtype == objabi.Haix { + // Every symbol access must be made via a TOC anchor. + c.ctxt.Diag("symbolAccess called for %s", s.Name) + } + var base uint32 + form := c.opform(op) + if c.ctxt.Flag_shared { + base = REG_R2 + } else { + base = REG_R0 + } + // If reg can be reused when computing the symbol address, + // use it instead of REGTMP. + if !reuse { + o1 = AOP_IRR(OP_ADDIS, REGTMP, base, 0) + o2 = AOP_IRR(op, uint32(reg), REGTMP, 0) + } else { + o1 = AOP_IRR(OP_ADDIS, uint32(reg), base, 0) + o2 = AOP_IRR(op, uint32(reg), uint32(reg), 0) + } + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = s + rel.Add = d + if c.ctxt.Flag_shared { + switch form { + case D_FORM: + rel.Type = objabi.R_ADDRPOWER_TOCREL + case DS_FORM: + rel.Type = objabi.R_ADDRPOWER_TOCREL_DS + } + + } else { + switch form { + case D_FORM: + rel.Type = objabi.R_ADDRPOWER + case DS_FORM: + rel.Type = objabi.R_ADDRPOWER_DS + } + } + return +} + +/* + * 32-bit masks + */ +func getmask(m []byte, v uint32) bool { + m[1] = 0 + m[0] = m[1] + if v != ^uint32(0) && v&(1<<31) != 0 && v&1 != 0 { /* MB > ME */ + if getmask(m, ^v) { + i := int(m[0]) + m[0] = m[1] + 1 + m[1] = byte(i - 1) + return true + } + + return false + } + + for i := 0; i < 32; i++ { + if v&(1<<uint(31-i)) != 0 { + m[0] = byte(i) + for { + m[1] = byte(i) + i++ + if i >= 32 || v&(1<<uint(31-i)) == 0 { + break + } + } + + for ; i < 32; i++ { + if v&(1<<uint(31-i)) != 0 { + return false + } + } + return true + } + } + + return false +} + +func (c *ctxt9) maskgen(p *obj.Prog, m []byte, v uint32) { + if !getmask(m, v) { + c.ctxt.Diag("cannot generate mask #%x\n%v", v, p) + } +} + +/* + * 64-bit masks (rldic etc) + */ +func getmask64(m []byte, v uint64) bool { + m[1] = 0 + m[0] = m[1] + for i := 0; i < 64; i++ { + if v&(uint64(1)<<uint(63-i)) != 0 { + m[0] = byte(i) + for { + m[1] = byte(i) + i++ + if i >= 64 || v&(uint64(1)<<uint(63-i)) == 0 { + break + } + } + + for ; i < 64; i++ { + if v&(uint64(1)<<uint(63-i)) != 0 { + return false + } + } + return true + } + } + + return false +} + +func (c *ctxt9) maskgen64(p *obj.Prog, m []byte, v uint64) { + if !getmask64(m, v) { + c.ctxt.Diag("cannot generate mask #%x\n%v", v, p) + } +} + +func loadu32(r int, d int64) uint32 { + v := int32(d >> 16) + if isuint32(uint64(d)) { + return LOP_IRR(OP_ORIS, uint32(r), REGZERO, uint32(v)) + } + return AOP_IRR(OP_ADDIS, uint32(r), REGZERO, uint32(v)) +} + +func high16adjusted(d int32) uint16 { + if d&0x8000 != 0 { + return uint16((d >> 16) + 1) + } + return uint16(d >> 16) +} + +func asmout(c *ctxt9, p *obj.Prog, o *Optab, out *[5]uint32) { + o1 := uint32(0) + o2 := uint32(0) + o3 := uint32(0) + o4 := uint32(0) + o5 := uint32(0) + + //print("%v => case %d\n", p, o->type); + switch o.type_ { + default: + c.ctxt.Diag("unknown type %d", o.type_) + prasm(p) + + case 0: /* pseudo ops */ + break + + case 2: /* int/cr/fp op Rb,[Ra],Rd */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) + + case 3: /* mov $soreg/addcon/andcon/ucon, r ==> addis/oris/addi/ori $i,reg',r */ + d := c.vregoff(&p.From) + + v := int32(d) + r := int(p.From.Reg) + if r == 0 { + r = c.getimpliedreg(&p.From, p) + } + if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 && (r != 0 || v != 0) { + c.ctxt.Diag("literal operation on R0\n%v", p) + } + a := OP_ADDI + if o.a1 == C_UCON { + if d&0xffff != 0 { + log.Fatalf("invalid handling of %v", p) + } + // For UCON operands the value is right shifted 16, using ADDIS if the + // value should be signed, ORIS if unsigned. + v >>= 16 + if r == REGZERO && isuint32(uint64(d)) { + o1 = LOP_IRR(OP_ORIS, uint32(p.To.Reg), REGZERO, uint32(v)) + break + } + + a = OP_ADDIS + } else if int64(int16(d)) != d { + // Operand is 16 bit value with sign bit set + if o.a1 == C_ANDCON { + // Needs unsigned 16 bit so use ORI + if r == 0 || r == REGZERO { + o1 = LOP_IRR(uint32(OP_ORI), uint32(p.To.Reg), uint32(0), uint32(v)) + break + } + // With ADDCON, needs signed 16 bit value, fall through to use ADDI + } else if o.a1 != C_ADDCON { + log.Fatalf("invalid handling of %v", p) + } + } + + o1 = AOP_IRR(uint32(a), uint32(p.To.Reg), uint32(r), uint32(v)) + + case 4: /* add/mul $scon,[r1],r2 */ + v := c.regoff(&p.From) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + if r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0 { + c.ctxt.Diag("literal operation on R0\n%v", p) + } + if int32(int16(v)) != v { + log.Fatalf("mishandled instruction %v", p) + } + o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) + + case 5: /* syscall */ + o1 = c.oprrr(p.As) + + case 6: /* logical op Rb,[Rs,]Ra; no literal */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + // AROTL and AROTLW are extended mnemonics, which map to RLDCL and RLWNM. + switch p.As { + case AROTL: + o1 = AOP_RLDIC(OP_RLDCL, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), uint32(0)) + case AROTLW: + o1 = OP_RLW(OP_RLWNM, uint32(p.To.Reg), uint32(r), uint32(p.From.Reg), 0, 31) + default: + if p.As == AOR && p.From.Type == obj.TYPE_CONST && p.From.Offset == 0 { + // Compile "OR $0, Rx, Ry" into ori. If Rx == Ry == 0, this is the preferred + // hardware no-op. This happens because $0 matches C_REG before C_ZCON. + o1 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(r), 0) + } else { + o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) + } + } + + case 7: /* mov r, soreg ==> stw o(r) */ + r := int(p.To.Reg) + + if r == 0 { + r = c.getimpliedreg(&p.To, p) + } + v := c.regoff(&p.To) + if int32(int16(v)) != v { + log.Fatalf("mishandled instruction %v", p) + } + // Offsets in DS form stores must be a multiple of 4 + inst := c.opstore(p.As) + if c.opform(inst) == DS_FORM && v&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + o1 = AOP_IRR(inst, uint32(p.From.Reg), uint32(r), uint32(v)) + + case 8: /* mov soreg, r ==> lbz/lhz/lwz o(r), lbz o(r) + extsb r,r */ + r := int(p.From.Reg) + + if r == 0 { + r = c.getimpliedreg(&p.From, p) + } + v := c.regoff(&p.From) + if int32(int16(v)) != v { + log.Fatalf("mishandled instruction %v", p) + } + // Offsets in DS form loads must be a multiple of 4 + inst := c.opload(p.As) + if c.opform(inst) == DS_FORM && v&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + o1 = AOP_IRR(inst, uint32(p.To.Reg), uint32(r), uint32(v)) + + // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). + o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) + + case 10: /* sub Ra,[Rb],Rd => subf Rd,Ra,Rb */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(r)) + + case 11: /* br/bl lbra */ + v := int32(0) + + if p.To.Target() != nil { + v = int32(p.To.Target().Pc - p.Pc) + if v&03 != 0 { + c.ctxt.Diag("odd branch target address\n%v", p) + v &^= 03 + } + + if v < -(1<<25) || v >= 1<<24 { + c.ctxt.Diag("branch too far\n%v", p) + } + } + + o1 = OP_BR(c.opirr(p.As), uint32(v), 0) + if p.To.Sym != nil { + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 4 + rel.Sym = p.To.Sym + v += int32(p.To.Offset) + if v&03 != 0 { + c.ctxt.Diag("odd branch target address\n%v", p) + v &^= 03 + } + + rel.Add = int64(v) + rel.Type = objabi.R_CALLPOWER + } + o2 = 0x60000000 // nop, sometimes overwritten by ld r2, 24(r1) when dynamic linking + + case 13: /* mov[bhwd]{z,} r,r */ + // This needs to handle "MOV* $0, Rx". This shows up because $0 also + // matches C_REG if r0iszero. This happens because C_REG sorts before C_ANDCON + // TODO: fix the above behavior and cleanup this exception. + if p.From.Type == obj.TYPE_CONST { + o1 = LOP_IRR(OP_ADDI, REGZERO, uint32(p.To.Reg), 0) + break + } + if p.To.Type == obj.TYPE_CONST { + c.ctxt.Diag("cannot move into constant 0\n%v", p) + } + + switch p.As { + case AMOVB: + o1 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.From.Reg), 0) + case AMOVBZ: + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 24, 31) + case AMOVH: + o1 = LOP_RRR(OP_EXTSH, uint32(p.To.Reg), uint32(p.From.Reg), 0) + case AMOVHZ: + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.From.Reg), 0, 16, 31) + case AMOVW: + o1 = LOP_RRR(OP_EXTSW, uint32(p.To.Reg), uint32(p.From.Reg), 0) + case AMOVWZ: + o1 = OP_RLW(OP_RLDIC, uint32(p.To.Reg), uint32(p.From.Reg), 0, 0, 0) | 1<<5 /* MB=32 */ + case AMOVD: + o1 = LOP_RRR(OP_OR, uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.From.Reg)) + default: + c.ctxt.Diag("internal: bad register move/truncation\n%v", p) + } + + case 14: /* rldc[lr] Rb,Rs,$mask,Ra -- left, right give different masks */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + d := c.vregoff(p.GetFrom3()) + var a int + switch p.As { + + // These opcodes expect a mask operand that has to be converted into the + // appropriate operand. The way these were defined, not all valid masks are possible. + // Left here for compatibility in case they were used or generated. + case ARLDCL, ARLDCLCC: + var mask [2]uint8 + c.maskgen64(p, mask[:], uint64(d)) + + a = int(mask[0]) /* MB */ + if mask[1] != 63 { + c.ctxt.Diag("invalid mask for rotate: %x (end != bit 63)\n%v", uint64(d), p) + } + o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) + o1 |= (uint32(a) & 31) << 6 + if a&0x20 != 0 { + o1 |= 1 << 5 /* mb[5] is top bit */ + } + + case ARLDCR, ARLDCRCC: + var mask [2]uint8 + c.maskgen64(p, mask[:], uint64(d)) + + a = int(mask[1]) /* ME */ + if mask[0] != 0 { + c.ctxt.Diag("invalid mask for rotate: %x %x (start != 0)\n%v", uint64(d), mask[0], p) + } + o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(p.From.Reg)) + o1 |= (uint32(a) & 31) << 6 + if a&0x20 != 0 { + o1 |= 1 << 5 /* mb[5] is top bit */ + } + + // These opcodes use a shift count like the ppc64 asm, no mask conversion done + case ARLDICR, ARLDICRCC: + me := int(d) + sh := c.regoff(&p.From) + if me < 0 || me > 63 || sh > 63 { + c.ctxt.Diag("Invalid me or sh for RLDICR: %x %x\n%v", int(d), sh, p) + } + o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(me)) + + case ARLDICL, ARLDICLCC, ARLDIC, ARLDICCC: + mb := int(d) + sh := c.regoff(&p.From) + if mb < 0 || mb > 63 || sh > 63 { + c.ctxt.Diag("Invalid mb or sh for RLDIC, RLDICL: %x %x\n%v", mb, sh, p) + } + o1 = AOP_RLDIC(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), uint32(sh), uint32(mb)) + + case ACLRLSLDI: + // This is an extended mnemonic defined in the ISA section C.8.1 + // clrlsldi ra,rs,b,n --> rldic ra,rs,n,b-n + // It maps onto RLDIC so is directly generated here based on the operands from + // the clrlsldi. + n := int32(d) + b := c.regoff(&p.From) + if n > b || b > 63 { + c.ctxt.Diag("Invalid n or b for CLRLSLDI: %x %x\n%v", n, b, p) + } + o1 = AOP_RLDIC(OP_RLDIC, uint32(p.To.Reg), uint32(r), uint32(n), uint32(b)-uint32(n)) + + default: + c.ctxt.Diag("unexpected op in rldc case\n%v", p) + a = 0 + } + + case 17, /* bc bo,bi,lbra (same for now) */ + 16: /* bc bo,bi,sbra */ + a := 0 + + r := int(p.Reg) + + if p.From.Type == obj.TYPE_CONST { + a = int(c.regoff(&p.From)) + } else if p.From.Type == obj.TYPE_REG { + if r != 0 { + c.ctxt.Diag("unexpected register setting for branch with CR: %d\n", r) + } + // BI values for the CR + switch p.From.Reg { + case REG_CR0: + r = BI_CR0 + case REG_CR1: + r = BI_CR1 + case REG_CR2: + r = BI_CR2 + case REG_CR3: + r = BI_CR3 + case REG_CR4: + r = BI_CR4 + case REG_CR5: + r = BI_CR5 + case REG_CR6: + r = BI_CR6 + case REG_CR7: + r = BI_CR7 + default: + c.ctxt.Diag("unrecognized register: expecting CR\n") + } + } + v := int32(0) + if p.To.Target() != nil { + v = int32(p.To.Target().Pc - p.Pc) + } + if v&03 != 0 { + c.ctxt.Diag("odd branch target address\n%v", p) + v &^= 03 + } + + if v < -(1<<16) || v >= 1<<15 { + c.ctxt.Diag("branch too far\n%v", p) + } + o1 = OP_BC(c.opirr(p.As), uint32(a), uint32(r), uint32(v), 0) + + case 18: /* br/bl (lr/ctr); bc/bcl bo,bi,(lr/ctr) */ + var v int32 + var bh uint32 = 0 + if p.As == ABC || p.As == ABCL { + v = c.regoff(&p.From) & 31 + } else { + v = 20 /* unconditional */ + } + r := int(p.Reg) + if r == 0 { + r = 0 + } + switch oclass(&p.To) { + case C_CTR: + o1 = OPVCC(19, 528, 0, 0) + + case C_LR: + o1 = OPVCC(19, 16, 0, 0) + + default: + c.ctxt.Diag("bad optab entry (18): %d\n%v", p.To.Class, p) + v = 0 + } + + // Insert optional branch hint for bclr[l]/bcctr[l] + if p.From3Type() != obj.TYPE_NONE { + bh = uint32(p.GetFrom3().Offset) + if bh == 2 || bh > 3 { + log.Fatalf("BH must be 0,1,3 for %v", p) + } + o1 |= bh << 11 + } + + if p.As == ABL || p.As == ABCL { + o1 |= 1 + } + o1 = OP_BCR(o1, uint32(v), uint32(r)) + + case 19: /* mov $lcon,r ==> cau+or */ + d := c.vregoff(&p.From) + o1 = loadu32(int(p.To.Reg), d) + o2 = LOP_IRR(OP_ORI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(int32(d))) + + case 20: /* add $ucon,,r | addis $addcon,r,r */ + v := c.regoff(&p.From) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + if p.As == AADD && (r0iszero == 0 /*TypeKind(100016)*/ && p.Reg == 0 || r0iszero != 0 /*TypeKind(100016)*/ && p.To.Reg == 0) { + c.ctxt.Diag("literal operation on R0\n%v", p) + } + if p.As == AADDIS { + o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) + } else { + o1 = AOP_IRR(c.opirr(AADDIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) + } + + case 22: /* add $lcon/$andcon,r1,r2 ==> oris+ori+add/ori+add */ + if p.To.Reg == REGTMP || p.Reg == REGTMP { + c.ctxt.Diag("can't synthesize large constant\n%v", p) + } + d := c.vregoff(&p.From) + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + if p.From.Sym != nil { + c.ctxt.Diag("%v is not supported", p) + } + // If operand is ANDCON, generate 2 instructions using + // ORI for unsigned value; with LCON 3 instructions. + if o.size == 8 { + o1 = LOP_IRR(OP_ORI, REGTMP, REGZERO, uint32(int32(d))) + o2 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) + } else { + o1 = loadu32(REGTMP, d) + o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) + o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) + } + + case 23: /* and $lcon/$addcon,r1,r2 ==> oris+ori+and/addi+and */ + if p.To.Reg == REGTMP || p.Reg == REGTMP { + c.ctxt.Diag("can't synthesize large constant\n%v", p) + } + d := c.vregoff(&p.From) + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + + // With ADDCON operand, generate 2 instructions using ADDI for signed value, + // with LCON operand generate 3 instructions. + if o.size == 8 { + o1 = LOP_IRR(OP_ADDI, REGZERO, REGTMP, uint32(int32(d))) + o2 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) + } else { + o1 = loadu32(REGTMP, d) + o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(int32(d))) + o3 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), REGTMP, uint32(r)) + } + if p.From.Sym != nil { + c.ctxt.Diag("%v is not supported", p) + } + + case 24: /* lfd fA,float64(0) -> xxlxor xsA,xsaA,xsaA + fneg for -0 */ + o1 = AOP_XX3I(c.oprrr(AXXLXOR), uint32(p.To.Reg), uint32(p.To.Reg), uint32(p.To.Reg), uint32(0)) + // This is needed for -0. + if o.size == 8 { + o2 = AOP_RRR(c.oprrr(AFNEG), uint32(p.To.Reg), 0, uint32(p.To.Reg)) + } + + case 25: + /* sld[.] $sh,rS,rA -> rldicr[.] $sh,rS,mask(0,63-sh),rA; srd[.] -> rldicl */ + v := c.regoff(&p.From) + + if v < 0 { + v = 0 + } else if v > 63 { + v = 63 + } + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + var a int + op := uint32(0) + switch p.As { + case ASLD, ASLDCC: + a = int(63 - v) + op = OP_RLDICR + + case ASRD, ASRDCC: + a = int(v) + v = 64 - v + op = OP_RLDICL + case AROTL: + a = int(0) + op = OP_RLDICL + case AEXTSWSLI, AEXTSWSLICC: + a = int(v) + default: + c.ctxt.Diag("unexpected op in sldi case\n%v", p) + a = 0 + o1 = 0 + } + + if p.As == AEXTSWSLI || p.As == AEXTSWSLICC { + o1 = AOP_EXTSWSLI(OP_EXTSWSLI, uint32(r), uint32(p.To.Reg), uint32(v)) + + } else { + o1 = AOP_RLDIC(op, uint32(p.To.Reg), uint32(r), uint32(v), uint32(a)) + } + if p.As == ASLDCC || p.As == ASRDCC || p.As == AEXTSWSLICC { + o1 |= 1 // Set the condition code bit + } + + case 26: /* mov $lsext/auto/oreg,,r2 ==> addis+addi */ + v := c.vregoff(&p.From) + r := int(p.From.Reg) + + switch p.From.Name { + case obj.NAME_EXTERN, obj.NAME_STATIC: + // Load a 32 bit constant, or relocation depending on if a symbol is attached + o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, OP_ADDI, true) + default: + if r == 0 { + r = c.getimpliedreg(&p.From, p) + } + // Add a 32 bit offset to a register. + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(int32(v)))) + o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) + } + + case 27: /* subc ra,$simm,rd => subfic rd,ra,$simm */ + v := c.regoff(p.GetFrom3()) + + r := int(p.From.Reg) + o1 = AOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) + + case 28: /* subc r1,$lcon,r2 ==> cau+or+subfc */ + if p.To.Reg == REGTMP || p.From.Reg == REGTMP { + c.ctxt.Diag("can't synthesize large constant\n%v", p) + } + v := c.regoff(p.GetFrom3()) + o1 = AOP_IRR(OP_ADDIS, REGTMP, REGZERO, uint32(v)>>16) + o2 = LOP_IRR(OP_ORI, REGTMP, REGTMP, uint32(v)) + o3 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), REGTMP) + if p.From.Sym != nil { + c.ctxt.Diag("%v is not supported", p) + } + + case 29: /* rldic[lr]? $sh,s,$mask,a -- left, right, plain give different masks */ + v := c.regoff(&p.From) + + d := c.vregoff(p.GetFrom3()) + var mask [2]uint8 + c.maskgen64(p, mask[:], uint64(d)) + var a int + switch p.As { + case ARLDC, ARLDCCC: + a = int(mask[0]) /* MB */ + if int32(mask[1]) != (63 - v) { + c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p) + } + + case ARLDCL, ARLDCLCC: + a = int(mask[0]) /* MB */ + if mask[1] != 63 { + c.ctxt.Diag("invalid mask for shift: %x %s (shift %d)\n%v", uint64(d), mask[1], v, p) + } + + case ARLDCR, ARLDCRCC: + a = int(mask[1]) /* ME */ + if mask[0] != 0 { + c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[0], v, p) + } + + default: + c.ctxt.Diag("unexpected op in rldic case\n%v", p) + a = 0 + } + + o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) + o1 |= (uint32(a) & 31) << 6 + if v&0x20 != 0 { + o1 |= 1 << 1 + } + if a&0x20 != 0 { + o1 |= 1 << 5 /* mb[5] is top bit */ + } + + case 30: /* rldimi $sh,s,$mask,a */ + v := c.regoff(&p.From) + + d := c.vregoff(p.GetFrom3()) + + // Original opcodes had mask operands which had to be converted to a shift count as expected by + // the ppc64 asm. + switch p.As { + case ARLDMI, ARLDMICC: + var mask [2]uint8 + c.maskgen64(p, mask[:], uint64(d)) + if int32(mask[1]) != (63 - v) { + c.ctxt.Diag("invalid mask for shift: %x %x (shift %d)\n%v", uint64(d), mask[1], v, p) + } + o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) + o1 |= (uint32(mask[0]) & 31) << 6 + if v&0x20 != 0 { + o1 |= 1 << 1 + } + if mask[0]&0x20 != 0 { + o1 |= 1 << 5 /* mb[5] is top bit */ + } + + // Opcodes with shift count operands. + case ARLDIMI, ARLDIMICC: + o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), (uint32(v) & 0x1F)) + o1 |= (uint32(d) & 31) << 6 + if d&0x20 != 0 { + o1 |= 1 << 5 + } + if v&0x20 != 0 { + o1 |= 1 << 1 + } + } + + case 31: /* dword */ + d := c.vregoff(&p.From) + + if c.ctxt.Arch.ByteOrder == binary.BigEndian { + o1 = uint32(d >> 32) + o2 = uint32(d) + } else { + o1 = uint32(d) + o2 = uint32(d >> 32) + } + + if p.From.Sym != nil { + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Add = p.From.Offset + rel.Type = objabi.R_ADDR + o2 = 0 + o1 = o2 + } + + case 32: /* fmul frc,fra,frd */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) | (uint32(p.From.Reg)&31)<<6 + + case 33: /* fabs [frb,]frd; fmr. frb,frd */ + r := int(p.From.Reg) + + if oclass(&p.From) == C_NONE { + r = int(p.To.Reg) + } + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(r)) + + case 34: /* FMADDx fra,frb,frc,frt (t=a*c±b) */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) | (uint32(p.GetFrom3().Reg)&31)<<6 + + case 35: /* mov r,lext/lauto/loreg ==> cau $(v>>16),sb,r'; store o(r') */ + v := c.regoff(&p.To) + + r := int(p.To.Reg) + if r == 0 { + r = c.getimpliedreg(&p.To, p) + } + // Offsets in DS form stores must be a multiple of 4 + inst := c.opstore(p.As) + if c.opform(inst) == DS_FORM && v&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + o1 = AOP_IRR(OP_ADDIS, REGTMP, uint32(r), uint32(high16adjusted(v))) + o2 = AOP_IRR(inst, uint32(p.From.Reg), REGTMP, uint32(v)) + + case 36: /* mov b/bz/h/hz lext/lauto/lreg,r ==> lbz+extsb/lbz/lha/lhz etc */ + v := c.regoff(&p.From) + + r := int(p.From.Reg) + if r == 0 { + r = c.getimpliedreg(&p.From, p) + } + if o.a6 == C_REG { + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), uint32(r), uint32(high16adjusted(v))) + o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(p.To.Reg), uint32(v)) + } else { + o1 = AOP_IRR(OP_ADDIS, uint32(REGTMP), uint32(r), uint32(high16adjusted(v))) + o2 = AOP_IRR(c.opload(p.As), uint32(p.To.Reg), uint32(REGTMP), uint32(v)) + } + + // Sign extend MOVB if needed + o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) + + case 40: /* word */ + o1 = uint32(c.regoff(&p.From)) + + case 41: /* stswi */ + if p.To.Type == obj.TYPE_MEM && p.To.Index == 0 && p.To.Offset != 0 { + c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) + } + + o1 = AOP_RRR(c.opirr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 + + case 42: /* lswi */ + if p.From.Type == obj.TYPE_MEM && p.From.Index == 0 && p.From.Offset != 0 { + c.ctxt.Diag("Invalid addressing mode used in index type instruction: %v", p.As) + } + o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), 0) | (uint32(c.regoff(p.GetFrom3()))&0x7F)<<11 + + case 43: /* data cache instructions: op (Ra+[Rb]), [th|l] */ + /* TH field for dcbt/dcbtst: */ + /* 0 = Block access - program will soon access EA. */ + /* 8-15 = Stream access - sequence of access (data stream). See section 4.3.2 of the ISA for details. */ + /* 16 = Block access - program will soon make a transient access to EA. */ + /* 17 = Block access - program will not access EA for a long time. */ + + /* L field for dcbf: */ + /* 0 = invalidates the block containing EA in all processors. */ + /* 1 = same as 0, but with limited scope (i.e. block in the current processor will not be reused soon). */ + /* 3 = same as 1, but with even more limited scope (i.e. block in the current processor primary cache will not be reused soon). */ + if p.To.Type == obj.TYPE_NONE { + o1 = AOP_RRR(c.oprrr(p.As), 0, uint32(p.From.Index), uint32(p.From.Reg)) + } else { + th := c.regoff(&p.To) + o1 = AOP_RRR(c.oprrr(p.As), uint32(th), uint32(p.From.Index), uint32(p.From.Reg)) + } + + case 44: /* indexed store */ + o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) + + case 45: /* indexed load */ + switch p.As { + /* The assembler accepts a 4-operand l*arx instruction. The fourth operand is an Exclusive Access Hint (EH) */ + /* The EH field can be used as a lock acquire/release hint as follows: */ + /* 0 = Atomic Update (fetch-and-operate or similar algorithm) */ + /* 1 = Exclusive Access (lock acquire and release) */ + case ALBAR, ALHAR, ALWAR, ALDAR: + if p.From3Type() != obj.TYPE_NONE { + eh := int(c.regoff(p.GetFrom3())) + if eh > 1 { + c.ctxt.Diag("illegal EH field\n%v", p) + } + o1 = AOP_RRRI(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg), uint32(eh)) + } else { + o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) + } + default: + o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) + } + case 46: /* plain op */ + o1 = c.oprrr(p.As) + + case 47: /* op Ra, Rd; also op [Ra,] Rd */ + r := int(p.From.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) + + case 48: /* op Rs, Ra */ + r := int(p.From.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + o1 = LOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(r), 0) + + case 49: /* op Rb; op $n, Rb */ + if p.From.Type != obj.TYPE_REG { /* tlbie $L, rB */ + v := c.regoff(&p.From) & 1 + o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.To.Reg)) | uint32(v)<<21 + } else { + o1 = AOP_RRR(c.oprrr(p.As), 0, 0, uint32(p.From.Reg)) + } + + case 50: /* rem[u] r1[,r2],r3 */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + v := c.oprrr(p.As) + t := v & (1<<10 | 1) /* OE|Rc */ + o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) + o2 = AOP_RRR(OP_MULLW, REGTMP, REGTMP, uint32(p.From.Reg)) + o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) + if p.As == AREMU { + o4 = o3 + + /* Clear top 32 bits */ + o3 = OP_RLW(OP_RLDIC, REGTMP, REGTMP, 0, 0, 0) | 1<<5 + } + + case 51: /* remd[u] r1[,r2],r3 */ + r := int(p.Reg) + + if r == 0 { + r = int(p.To.Reg) + } + v := c.oprrr(p.As) + t := v & (1<<10 | 1) /* OE|Rc */ + o1 = AOP_RRR(v&^t, REGTMP, uint32(r), uint32(p.From.Reg)) + o2 = AOP_RRR(OP_MULLD, REGTMP, REGTMP, uint32(p.From.Reg)) + o3 = AOP_RRR(OP_SUBF|t, uint32(p.To.Reg), REGTMP, uint32(r)) + /* cases 50,51: removed; can be reused. */ + + /* cases 50,51: removed; can be reused. */ + + case 52: /* mtfsbNx cr(n) */ + v := c.regoff(&p.From) & 31 + + o1 = AOP_RRR(c.oprrr(p.As), uint32(v), 0, 0) + + case 53: /* mffsX ,fr1 */ + o1 = AOP_RRR(OP_MFFS, uint32(p.To.Reg), 0, 0) + + case 55: /* op Rb, Rd */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), 0, uint32(p.From.Reg)) + + case 56: /* sra $sh,[s,]a; srd $sh,[s,]a */ + v := c.regoff(&p.From) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.To.Reg), uint32(v)&31) + if (p.As == ASRAD || p.As == ASRADCC) && (v&0x20 != 0) { + o1 |= 1 << 1 /* mb[5] */ + } + + case 57: /* slw $sh,[s,]a -> rlwinm ... */ + v := c.regoff(&p.From) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + + /* + * Let user (gs) shoot himself in the foot. + * qc has already complained. + * + if(v < 0 || v > 31) + ctxt->diag("illegal shift %ld\n%v", v, p); + */ + if v < 0 { + v = 0 + } else if v > 32 { + v = 32 + } + var mask [2]uint8 + switch p.As { + case AROTLW: + mask[0], mask[1] = 0, 31 + case ASRW, ASRWCC: + mask[0], mask[1] = uint8(v), 31 + v = 32 - v + default: + mask[0], mask[1] = 0, uint8(31-v) + } + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(r), uint32(v), uint32(mask[0]), uint32(mask[1])) + if p.As == ASLWCC || p.As == ASRWCC { + o1 |= 1 // set the condition code + } + + case 58: /* logical $andcon,[s],a */ + v := c.regoff(&p.From) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) + + case 59: /* or/xor/and $ucon,,r | oris/xoris/andis $addcon,r,r */ + v := c.regoff(&p.From) + + r := int(p.Reg) + if r == 0 { + r = int(p.To.Reg) + } + switch p.As { + case AOR: + o1 = LOP_IRR(c.opirr(AORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) /* oris, xoris, andis. */ + case AXOR: + o1 = LOP_IRR(c.opirr(AXORIS), uint32(p.To.Reg), uint32(r), uint32(v)>>16) + case AANDCC: + o1 = LOP_IRR(c.opirr(AANDISCC), uint32(p.To.Reg), uint32(r), uint32(v)>>16) + default: + o1 = LOP_IRR(c.opirr(p.As), uint32(p.To.Reg), uint32(r), uint32(v)) + } + + case 60: /* tw to,a,b */ + r := int(c.regoff(&p.From) & 31) + + o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.Reg), uint32(p.To.Reg)) + + case 61: /* tw to,a,$simm */ + r := int(c.regoff(&p.From) & 31) + + v := c.regoff(&p.To) + o1 = AOP_IRR(c.opirr(p.As), uint32(r), uint32(p.Reg), uint32(v)) + + case 62: /* rlwmi $sh,s,$mask,a */ + v := c.regoff(&p.From) + switch p.As { + case ACLRLSLWI: + n := c.regoff(p.GetFrom3()) + // This is an extended mnemonic described in the ISA C.8.2 + // clrlslwi ra,rs,b,n -> rlwinm ra,rs,n,b-n,31-n + // It maps onto rlwinm which is directly generated here. + if n > v || v >= 32 { + c.ctxt.Diag("Invalid n or b for CLRLSLWI: %x %x\n%v", v, n, p) + } + + o1 = OP_RLW(OP_RLWINM, uint32(p.To.Reg), uint32(p.Reg), uint32(n), uint32(v-n), uint32(31-n)) + default: + var mask [2]uint8 + c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) + o1 = AOP_RRR(c.opirr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(v)) + o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 + } + + case 63: /* rlwmi b,s,$mask,a */ + var mask [2]uint8 + c.maskgen(p, mask[:], uint32(c.regoff(p.GetFrom3()))) + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.Reg), uint32(p.To.Reg), uint32(p.From.Reg)) + o1 |= (uint32(mask[0])&31)<<6 | (uint32(mask[1])&31)<<1 + + case 64: /* mtfsf fr[, $m] {,fpcsr} */ + var v int32 + if p.From3Type() != obj.TYPE_NONE { + v = c.regoff(p.GetFrom3()) & 255 + } else { + v = 255 + } + o1 = OP_MTFSF | uint32(v)<<17 | uint32(p.From.Reg)<<11 + + case 65: /* MOVFL $imm,FPSCR(n) => mtfsfi crfd,imm */ + if p.To.Reg == 0 { + c.ctxt.Diag("must specify FPSCR(n)\n%v", p) + } + o1 = OP_MTFSFI | (uint32(p.To.Reg)&15)<<23 | (uint32(c.regoff(&p.From))&31)<<12 + + case 66: /* mov spr,r1; mov r1,spr */ + var r int + var v int32 + if REG_R0 <= p.From.Reg && p.From.Reg <= REG_R31 { + r = int(p.From.Reg) + v = int32(p.To.Reg) + o1 = OPVCC(31, 467, 0, 0) /* mtspr */ + } else { + r = int(p.To.Reg) + v = int32(p.From.Reg) + o1 = OPVCC(31, 339, 0, 0) /* mfspr */ + } + + o1 = AOP_RRR(o1, uint32(r), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 + + case 67: /* mcrf crfD,crfS */ + if p.From.Reg == REG_CR || p.To.Reg == REG_CR { + c.ctxt.Diag("CR argument must be a conditional register field (CR0-CR7)\n%v", p) + } + o1 = AOP_RRR(OP_MCRF, ((uint32(p.To.Reg) & 7) << 2), ((uint32(p.From.Reg) & 7) << 2), 0) + + case 68: /* mfcr rD; mfocrf CRM,rD */ + o1 = AOP_RRR(OP_MFCR, uint32(p.To.Reg), 0, 0) /* form, whole register */ + if p.From.Reg != REG_CR { + v := uint32(1) << uint(7-(p.From.Reg&7)) /* CR(n) */ + o1 |= 1<<20 | v<<12 /* new form, mfocrf */ + } + + case 69: /* mtcrf CRM,rS, mtocrf CRx,rS */ + var v uint32 + if p.To.Reg == REG_CR { + v = 0xff + } else if p.To.Offset != 0 { // MOVFL gpr, constant + v = uint32(p.To.Offset) + } else { // p.To.Reg == REG_CRx + v = 1 << uint(7-(p.To.Reg&7)) + } + // Use mtocrf form if only one CR field moved. + if bits.OnesCount32(v) == 1 { + v |= 1 << 8 + } + + o1 = AOP_RRR(OP_MTCRF, uint32(p.From.Reg), 0, 0) | uint32(v)<<12 + + case 70: /* [f]cmp r,r,cr*/ + var r int + if p.Reg == 0 { + r = 0 + } else { + r = (int(p.Reg) & 7) << 2 + } + o1 = AOP_RRR(c.oprrr(p.As), uint32(r), uint32(p.From.Reg), uint32(p.To.Reg)) + + case 71: /* cmp[l] r,i,cr*/ + var r int + if p.Reg == 0 { + r = 0 + } else { + r = (int(p.Reg) & 7) << 2 + } + o1 = AOP_RRR(c.opirr(p.As), uint32(r), uint32(p.From.Reg), 0) | uint32(c.regoff(&p.To))&0xffff + + case 72: /* slbmte (Rb+Rs -> slb[Rb]) -> Rs, Rb */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), 0, uint32(p.To.Reg)) + + case 73: /* mcrfs crfD,crfS */ + if p.From.Type != obj.TYPE_REG || p.From.Reg != REG_FPSCR || p.To.Type != obj.TYPE_REG || p.To.Reg < REG_CR0 || REG_CR7 < p.To.Reg { + c.ctxt.Diag("illegal FPSCR/CR field number\n%v", p) + } + o1 = AOP_RRR(OP_MCRFS, ((uint32(p.To.Reg) & 7) << 2), ((0 & 7) << 2), 0) + + case 77: /* syscall $scon, syscall Rx */ + if p.From.Type == obj.TYPE_CONST { + if p.From.Offset > BIG || p.From.Offset < -BIG { + c.ctxt.Diag("illegal syscall, sysnum too large: %v", p) + } + o1 = AOP_IRR(OP_ADDI, REGZERO, REGZERO, uint32(p.From.Offset)) + } else if p.From.Type == obj.TYPE_REG { + o1 = LOP_RRR(OP_OR, REGZERO, uint32(p.From.Reg), uint32(p.From.Reg)) + } else { + c.ctxt.Diag("illegal syscall: %v", p) + o1 = 0x7fe00008 // trap always + } + + o2 = c.oprrr(p.As) + o3 = AOP_RRR(c.oprrr(AXOR), REGZERO, REGZERO, REGZERO) // XOR R0, R0 + + case 78: /* undef */ + o1 = 0 /* "An instruction consisting entirely of binary 0s is guaranteed + always to be an illegal instruction." */ + + /* relocation operations */ + case 74: + v := c.vregoff(&p.To) + // Offsets in DS form stores must be a multiple of 4 + inst := c.opstore(p.As) + if c.opform(inst) == DS_FORM && v&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + // Can't reuse base for store instructions. + o1, o2 = c.symbolAccess(p.To.Sym, v, p.From.Reg, inst, false) + + case 75: // 32 bit offset symbol loads (got/toc/addr) + v := p.From.Offset + + // Offsets in DS form loads must be a multiple of 4 + inst := c.opload(p.As) + if c.opform(inst) == DS_FORM && v&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + switch p.From.Name { + case obj.NAME_GOTREF, obj.NAME_TOCREF: + if v != 0 { + c.ctxt.Diag("invalid offset for GOT/TOC access %v", p) + } + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) + o2 = AOP_IRR(inst, uint32(p.To.Reg), uint32(p.To.Reg), 0) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + switch p.From.Name { + case obj.NAME_GOTREF: + rel.Type = objabi.R_ADDRPOWER_GOT + case obj.NAME_TOCREF: + rel.Type = objabi.R_ADDRPOWER_TOCREL_DS + } + default: + reuseBaseReg := o.a6 == C_REG + // Reuse To.Reg as base register if it is a GPR. + o1, o2 = c.symbolAccess(p.From.Sym, v, p.To.Reg, inst, reuseBaseReg) + } + + o3 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) + + case 79: + if p.From.Offset != 0 { + c.ctxt.Diag("invalid offset against tls var %v", p) + } + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R13, 0) + o2 = AOP_IRR(OP_ADDI, uint32(p.To.Reg), uint32(p.To.Reg), 0) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Type = objabi.R_POWER_TLS_LE + + case 80: + if p.From.Offset != 0 { + c.ctxt.Diag("invalid offset against tls var %v", p) + } + o1 = AOP_IRR(OP_ADDIS, uint32(p.To.Reg), REG_R2, 0) + o2 = AOP_IRR(c.opload(AMOVD), uint32(p.To.Reg), uint32(p.To.Reg), 0) + o3 = AOP_RRR(OP_ADD, uint32(p.To.Reg), uint32(p.To.Reg), REG_R13) + rel := obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + rel.Siz = 8 + rel.Sym = p.From.Sym + rel.Type = objabi.R_POWER_TLS_IE + rel = obj.Addrel(c.cursym) + rel.Off = int32(c.pc) + 8 + rel.Siz = 4 + rel.Sym = p.From.Sym + rel.Type = objabi.R_POWER_TLS + + case 82: /* vector instructions, VX-form and VC-form */ + if p.From.Type == obj.TYPE_REG { + /* reg reg none OR reg reg reg */ + /* 3-register operand order: VRA, VRB, VRT */ + /* 2-register operand order: VRA, VRT */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) + } else if p.From3Type() == obj.TYPE_CONST { + /* imm imm reg reg */ + /* operand order: SIX, VRA, ST, VRT */ + six := int(c.regoff(&p.From)) + st := int(c.regoff(p.GetFrom3())) + o1 = AOP_IIRR(c.opiirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(st), uint32(six)) + } else if p.From3Type() == obj.TYPE_NONE && p.Reg != 0 { + /* imm reg reg */ + /* operand order: UIM, VRB, VRT */ + uim := int(c.regoff(&p.From)) + o1 = AOP_VIRR(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(uim)) + } else { + /* imm reg */ + /* operand order: SIM, VRT */ + sim := int(c.regoff(&p.From)) + o1 = AOP_IR(c.opirr(p.As), uint32(p.To.Reg), uint32(sim)) + } + + case 83: /* vector instructions, VA-form */ + if p.From.Type == obj.TYPE_REG { + /* reg reg reg reg */ + /* 4-register operand order: VRA, VRB, VRC, VRT */ + o1 = AOP_RRRR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) + } else if p.From.Type == obj.TYPE_CONST { + /* imm reg reg reg */ + /* operand order: SHB, VRA, VRB, VRT */ + shb := int(c.regoff(&p.From)) + o1 = AOP_IRRR(c.opirrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(shb)) + } + + case 84: // ISEL BC,RA,RB,RT -> isel rt,ra,rb,bc + bc := c.vregoff(&p.From) + if o.a1 == C_CRBIT { + // CR bit is encoded as a register, not a constant. + bc = int64(p.From.Reg) + } + + // rt = To.Reg, ra = p.Reg, rb = p.From3.Reg + o1 = AOP_ISEL(OP_ISEL, uint32(p.To.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg), uint32(bc)) + + case 85: /* vector instructions, VX-form */ + /* reg none reg */ + /* 2-register operand order: VRB, VRT */ + o1 = AOP_RR(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg)) + + case 86: /* VSX indexed store, XX1-form */ + /* reg reg reg */ + /* 3-register operand order: XT, (RB)(RA*1) */ + o1 = AOP_XX1(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(p.To.Reg)) + + case 87: /* VSX indexed load, XX1-form */ + /* reg reg reg */ + /* 3-register operand order: (RB)(RA*1), XT */ + o1 = AOP_XX1(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(p.From.Reg)) + + case 88: /* VSX mfvsr* instructions, XX1-form XS,RA */ + o1 = AOP_XX1(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) + + case 89: /* VSX instructions, XX2-form */ + /* reg none reg OR reg imm reg */ + /* 2-register operand order: XB, XT or XB, UIM, XT*/ + uim := int(c.regoff(p.GetFrom3())) + o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(uim), uint32(p.From.Reg)) + + case 90: /* VSX instructions, XX3-form */ + if p.From3Type() == obj.TYPE_NONE { + /* reg reg reg */ + /* 3-register operand order: XA, XB, XT */ + o1 = AOP_XX3(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) + } else if p.From3Type() == obj.TYPE_CONST { + /* reg reg reg imm */ + /* operand order: XA, XB, DM, XT */ + dm := int(c.regoff(p.GetFrom3())) + o1 = AOP_XX3I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(dm)) + } + + case 91: /* VSX instructions, XX4-form */ + /* reg reg reg reg */ + /* 3-register operand order: XA, XB, XC, XT */ + o1 = AOP_XX4(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(p.GetFrom3().Reg)) + + case 92: /* X-form instructions, 3-operands */ + if p.To.Type == obj.TYPE_CONST { + /* imm reg reg */ + xf := int32(p.From.Reg) + if REG_F0 <= xf && xf <= REG_F31 { + /* operand order: FRA, FRB, BF */ + bf := int(c.regoff(&p.To)) << 2 + o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) + } else { + /* operand order: RA, RB, L */ + l := int(c.regoff(&p.To)) + o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.From.Reg), uint32(p.Reg)) + } + } else if p.From3Type() == obj.TYPE_CONST { + /* reg reg imm */ + /* operand order: RB, L, RA */ + l := int(c.regoff(p.GetFrom3())) + o1 = AOP_RRR(c.opirr(p.As), uint32(l), uint32(p.To.Reg), uint32(p.From.Reg)) + } else if p.To.Type == obj.TYPE_REG { + cr := int32(p.To.Reg) + if REG_CR0 <= cr && cr <= REG_CR7 { + /* cr reg reg */ + /* operand order: RA, RB, BF */ + bf := (int(p.To.Reg) & 7) << 2 + o1 = AOP_RRR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg), uint32(p.Reg)) + } else if p.From.Type == obj.TYPE_CONST { + /* reg imm */ + /* operand order: L, RT */ + l := int(c.regoff(&p.From)) + o1 = AOP_RRR(c.opirr(p.As), uint32(p.To.Reg), uint32(l), uint32(p.Reg)) + } else { + switch p.As { + case ACOPY, APASTECC: + o1 = AOP_RRR(c.opirr(p.As), uint32(1), uint32(p.From.Reg), uint32(p.To.Reg)) + default: + /* reg reg reg */ + /* operand order: RS, RB, RA */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) + } + } + } + + case 93: /* X-form instructions, 2-operands */ + if p.To.Type == obj.TYPE_CONST { + /* imm reg */ + /* operand order: FRB, BF */ + bf := int(c.regoff(&p.To)) << 2 + o1 = AOP_RR(c.opirr(p.As), uint32(bf), uint32(p.From.Reg)) + } else if p.Reg == 0 { + /* popcnt* r,r, X-form */ + /* operand order: RS, RA */ + o1 = AOP_RRR(c.oprrr(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(p.Reg)) + } + + case 94: /* Z23-form instructions, 4-operands */ + /* reg reg reg imm */ + /* operand order: RA, RB, CY, RT */ + cy := int(c.regoff(p.GetFrom3())) + o1 = AOP_Z23I(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg), uint32(cy)) + + case 96: /* VSX load, DQ-form */ + /* reg imm reg */ + /* operand order: (RA)(DQ), XT */ + dq := int16(c.regoff(&p.From)) + if (dq & 15) != 0 { + c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) + } + o1 = AOP_DQ(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(dq)) + + case 97: /* VSX store, DQ-form */ + /* reg imm reg */ + /* operand order: XT, (RA)(DQ) */ + dq := int16(c.regoff(&p.To)) + if (dq & 15) != 0 { + c.ctxt.Diag("invalid offset for DQ form load/store %v", dq) + } + o1 = AOP_DQ(c.opstore(p.As), uint32(p.From.Reg), uint32(p.To.Reg), uint32(dq)) + case 98: /* VSX indexed load or load with length (also left-justified), x-form */ + /* vsreg, reg, reg */ + o1 = AOP_XX1(c.opload(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) + case 99: /* VSX store with length (also left-justified) x-form */ + /* reg, reg, vsreg */ + o1 = AOP_XX1(c.opstore(p.As), uint32(p.From.Reg), uint32(p.Reg), uint32(p.To.Reg)) + case 100: /* VSX X-form XXSPLTIB */ + if p.From.Type == obj.TYPE_CONST { + /* imm reg */ + uim := int(c.regoff(&p.From)) + /* imm reg */ + /* Use AOP_XX1 form with 0 for one of the registers. */ + o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(uim)) + } else { + c.ctxt.Diag("invalid ops for %v", p.As) + } + case 101: + o1 = AOP_XX2(c.oprrr(p.As), uint32(p.To.Reg), uint32(0), uint32(p.From.Reg)) + + case 102: /* RLWMI $sh,rs,$mb,$me,rt (M-form opcode)*/ + mb := uint32(c.regoff(&p.RestArgs[0].Addr)) + me := uint32(c.regoff(&p.RestArgs[1].Addr)) + sh := uint32(c.regoff(&p.From)) + o1 = OP_RLW(c.opirr(p.As), uint32(p.To.Reg), uint32(p.Reg), sh, mb, me) + + case 103: /* RLWMI rb,rs,$mb,$me,rt (M-form opcode)*/ + mb := uint32(c.regoff(&p.RestArgs[0].Addr)) + me := uint32(c.regoff(&p.RestArgs[1].Addr)) + o1 = OP_RLW(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.Reg), uint32(p.From.Reg), mb, me) + + case 104: /* VSX mtvsr* instructions, XX1-form RA,RB,XT */ + o1 = AOP_XX1(c.oprrr(p.As), uint32(p.To.Reg), uint32(p.From.Reg), uint32(p.Reg)) + + case 106: /* MOVD spr, soreg */ + v := int32(p.From.Reg) + o1 = OPVCC(31, 339, 0, 0) /* mfspr */ + o1 = AOP_RRR(o1, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 + so := c.regoff(&p.To) + o2 = AOP_IRR(c.opstore(AMOVD), uint32(REGTMP), uint32(p.To.Reg), uint32(so)) + if so&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + if p.To.Reg == REGTMP { + log.Fatalf("SPR move to memory will clobber R31 %v", p) + } + + case 107: /* MOVD soreg, spr */ + v := int32(p.From.Reg) + so := c.regoff(&p.From) + o1 = AOP_IRR(c.opload(AMOVD), uint32(REGTMP), uint32(v), uint32(so)) + o2 = OPVCC(31, 467, 0, 0) /* mtspr */ + v = int32(p.To.Reg) + o2 = AOP_RRR(o2, uint32(REGTMP), 0, 0) | (uint32(v)&0x1f)<<16 | ((uint32(v)>>5)&0x1f)<<11 + if so&0x3 != 0 { + log.Fatalf("invalid offset for DS form load/store %v", p) + } + + case 108: /* mov r, xoreg ==> stwx rx,ry */ + r := int(p.To.Reg) + o1 = AOP_RRR(c.opstorex(p.As), uint32(p.From.Reg), uint32(p.To.Index), uint32(r)) + + case 109: /* mov xoreg, r ==> lbzx/lhzx/lwzx rx,ry, lbzx rx,ry + extsb r,r */ + r := int(p.From.Reg) + + o1 = AOP_RRR(c.oploadx(p.As), uint32(p.To.Reg), uint32(p.From.Index), uint32(r)) + // Sign extend MOVB operations. This is ignored for other cases (o.size == 4). + o2 = LOP_RRR(OP_EXTSB, uint32(p.To.Reg), uint32(p.To.Reg), 0) + } + + out[0] = o1 + out[1] = o2 + out[2] = o3 + out[3] = o4 + out[4] = o5 +} + +func (c *ctxt9) vregoff(a *obj.Addr) int64 { + c.instoffset = 0 + if a != nil { + c.aclass(a) + } + return c.instoffset +} + +func (c *ctxt9) regoff(a *obj.Addr) int32 { + return int32(c.vregoff(a)) +} + +func (c *ctxt9) oprrr(a obj.As) uint32 { + switch a { + case AADD: + return OPVCC(31, 266, 0, 0) + case AADDCC: + return OPVCC(31, 266, 0, 1) + case AADDV: + return OPVCC(31, 266, 1, 0) + case AADDVCC: + return OPVCC(31, 266, 1, 1) + case AADDC: + return OPVCC(31, 10, 0, 0) + case AADDCCC: + return OPVCC(31, 10, 0, 1) + case AADDCV: + return OPVCC(31, 10, 1, 0) + case AADDCVCC: + return OPVCC(31, 10, 1, 1) + case AADDE: + return OPVCC(31, 138, 0, 0) + case AADDECC: + return OPVCC(31, 138, 0, 1) + case AADDEV: + return OPVCC(31, 138, 1, 0) + case AADDEVCC: + return OPVCC(31, 138, 1, 1) + case AADDME: + return OPVCC(31, 234, 0, 0) + case AADDMECC: + return OPVCC(31, 234, 0, 1) + case AADDMEV: + return OPVCC(31, 234, 1, 0) + case AADDMEVCC: + return OPVCC(31, 234, 1, 1) + case AADDZE: + return OPVCC(31, 202, 0, 0) + case AADDZECC: + return OPVCC(31, 202, 0, 1) + case AADDZEV: + return OPVCC(31, 202, 1, 0) + case AADDZEVCC: + return OPVCC(31, 202, 1, 1) + case AADDEX: + return OPVCC(31, 170, 0, 0) /* addex - v3.0b */ + + case AAND: + return OPVCC(31, 28, 0, 0) + case AANDCC: + return OPVCC(31, 28, 0, 1) + case AANDN: + return OPVCC(31, 60, 0, 0) + case AANDNCC: + return OPVCC(31, 60, 0, 1) + + case ACMP: + return OPVCC(31, 0, 0, 0) | 1<<21 /* L=1 */ + case ACMPU: + return OPVCC(31, 32, 0, 0) | 1<<21 + case ACMPW: + return OPVCC(31, 0, 0, 0) /* L=0 */ + case ACMPWU: + return OPVCC(31, 32, 0, 0) + case ACMPB: + return OPVCC(31, 508, 0, 0) /* cmpb - v2.05 */ + case ACMPEQB: + return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ + + case ACNTLZW: + return OPVCC(31, 26, 0, 0) + case ACNTLZWCC: + return OPVCC(31, 26, 0, 1) + case ACNTLZD: + return OPVCC(31, 58, 0, 0) + case ACNTLZDCC: + return OPVCC(31, 58, 0, 1) + + case ACRAND: + return OPVCC(19, 257, 0, 0) + case ACRANDN: + return OPVCC(19, 129, 0, 0) + case ACREQV: + return OPVCC(19, 289, 0, 0) + case ACRNAND: + return OPVCC(19, 225, 0, 0) + case ACRNOR: + return OPVCC(19, 33, 0, 0) + case ACROR: + return OPVCC(19, 449, 0, 0) + case ACRORN: + return OPVCC(19, 417, 0, 0) + case ACRXOR: + return OPVCC(19, 193, 0, 0) + + case ADCBF: + return OPVCC(31, 86, 0, 0) + case ADCBI: + return OPVCC(31, 470, 0, 0) + case ADCBST: + return OPVCC(31, 54, 0, 0) + case ADCBT: + return OPVCC(31, 278, 0, 0) + case ADCBTST: + return OPVCC(31, 246, 0, 0) + case ADCBZ: + return OPVCC(31, 1014, 0, 0) + + case AMODUD: + return OPVCC(31, 265, 0, 0) /* modud - v3.0 */ + case AMODUW: + return OPVCC(31, 267, 0, 0) /* moduw - v3.0 */ + case AMODSD: + return OPVCC(31, 777, 0, 0) /* modsd - v3.0 */ + case AMODSW: + return OPVCC(31, 779, 0, 0) /* modsw - v3.0 */ + + case ADIVW, AREM: + return OPVCC(31, 491, 0, 0) + + case ADIVWCC: + return OPVCC(31, 491, 0, 1) + + case ADIVWV: + return OPVCC(31, 491, 1, 0) + + case ADIVWVCC: + return OPVCC(31, 491, 1, 1) + + case ADIVWU, AREMU: + return OPVCC(31, 459, 0, 0) + + case ADIVWUCC: + return OPVCC(31, 459, 0, 1) + + case ADIVWUV: + return OPVCC(31, 459, 1, 0) + + case ADIVWUVCC: + return OPVCC(31, 459, 1, 1) + + case ADIVD, AREMD: + return OPVCC(31, 489, 0, 0) + + case ADIVDCC: + return OPVCC(31, 489, 0, 1) + + case ADIVDE: + return OPVCC(31, 425, 0, 0) + + case ADIVDECC: + return OPVCC(31, 425, 0, 1) + + case ADIVDEU: + return OPVCC(31, 393, 0, 0) + + case ADIVDEUCC: + return OPVCC(31, 393, 0, 1) + + case ADIVDV: + return OPVCC(31, 489, 1, 0) + + case ADIVDVCC: + return OPVCC(31, 489, 1, 1) + + case ADIVDU, AREMDU: + return OPVCC(31, 457, 0, 0) + + case ADIVDUCC: + return OPVCC(31, 457, 0, 1) + + case ADIVDUV: + return OPVCC(31, 457, 1, 0) + + case ADIVDUVCC: + return OPVCC(31, 457, 1, 1) + + case AEIEIO: + return OPVCC(31, 854, 0, 0) + + case AEQV: + return OPVCC(31, 284, 0, 0) + case AEQVCC: + return OPVCC(31, 284, 0, 1) + + case AEXTSB: + return OPVCC(31, 954, 0, 0) + case AEXTSBCC: + return OPVCC(31, 954, 0, 1) + case AEXTSH: + return OPVCC(31, 922, 0, 0) + case AEXTSHCC: + return OPVCC(31, 922, 0, 1) + case AEXTSW: + return OPVCC(31, 986, 0, 0) + case AEXTSWCC: + return OPVCC(31, 986, 0, 1) + + case AFABS: + return OPVCC(63, 264, 0, 0) + case AFABSCC: + return OPVCC(63, 264, 0, 1) + case AFADD: + return OPVCC(63, 21, 0, 0) + case AFADDCC: + return OPVCC(63, 21, 0, 1) + case AFADDS: + return OPVCC(59, 21, 0, 0) + case AFADDSCC: + return OPVCC(59, 21, 0, 1) + case AFCMPO: + return OPVCC(63, 32, 0, 0) + case AFCMPU: + return OPVCC(63, 0, 0, 0) + case AFCFID: + return OPVCC(63, 846, 0, 0) + case AFCFIDCC: + return OPVCC(63, 846, 0, 1) + case AFCFIDU: + return OPVCC(63, 974, 0, 0) + case AFCFIDUCC: + return OPVCC(63, 974, 0, 1) + case AFCFIDS: + return OPVCC(59, 846, 0, 0) + case AFCFIDSCC: + return OPVCC(59, 846, 0, 1) + case AFCTIW: + return OPVCC(63, 14, 0, 0) + case AFCTIWCC: + return OPVCC(63, 14, 0, 1) + case AFCTIWZ: + return OPVCC(63, 15, 0, 0) + case AFCTIWZCC: + return OPVCC(63, 15, 0, 1) + case AFCTID: + return OPVCC(63, 814, 0, 0) + case AFCTIDCC: + return OPVCC(63, 814, 0, 1) + case AFCTIDZ: + return OPVCC(63, 815, 0, 0) + case AFCTIDZCC: + return OPVCC(63, 815, 0, 1) + case AFDIV: + return OPVCC(63, 18, 0, 0) + case AFDIVCC: + return OPVCC(63, 18, 0, 1) + case AFDIVS: + return OPVCC(59, 18, 0, 0) + case AFDIVSCC: + return OPVCC(59, 18, 0, 1) + case AFMADD: + return OPVCC(63, 29, 0, 0) + case AFMADDCC: + return OPVCC(63, 29, 0, 1) + case AFMADDS: + return OPVCC(59, 29, 0, 0) + case AFMADDSCC: + return OPVCC(59, 29, 0, 1) + + case AFMOVS, AFMOVD: + return OPVCC(63, 72, 0, 0) /* load */ + case AFMOVDCC: + return OPVCC(63, 72, 0, 1) + case AFMSUB: + return OPVCC(63, 28, 0, 0) + case AFMSUBCC: + return OPVCC(63, 28, 0, 1) + case AFMSUBS: + return OPVCC(59, 28, 0, 0) + case AFMSUBSCC: + return OPVCC(59, 28, 0, 1) + case AFMUL: + return OPVCC(63, 25, 0, 0) + case AFMULCC: + return OPVCC(63, 25, 0, 1) + case AFMULS: + return OPVCC(59, 25, 0, 0) + case AFMULSCC: + return OPVCC(59, 25, 0, 1) + case AFNABS: + return OPVCC(63, 136, 0, 0) + case AFNABSCC: + return OPVCC(63, 136, 0, 1) + case AFNEG: + return OPVCC(63, 40, 0, 0) + case AFNEGCC: + return OPVCC(63, 40, 0, 1) + case AFNMADD: + return OPVCC(63, 31, 0, 0) + case AFNMADDCC: + return OPVCC(63, 31, 0, 1) + case AFNMADDS: + return OPVCC(59, 31, 0, 0) + case AFNMADDSCC: + return OPVCC(59, 31, 0, 1) + case AFNMSUB: + return OPVCC(63, 30, 0, 0) + case AFNMSUBCC: + return OPVCC(63, 30, 0, 1) + case AFNMSUBS: + return OPVCC(59, 30, 0, 0) + case AFNMSUBSCC: + return OPVCC(59, 30, 0, 1) + case AFCPSGN: + return OPVCC(63, 8, 0, 0) + case AFCPSGNCC: + return OPVCC(63, 8, 0, 1) + case AFRES: + return OPVCC(59, 24, 0, 0) + case AFRESCC: + return OPVCC(59, 24, 0, 1) + case AFRIM: + return OPVCC(63, 488, 0, 0) + case AFRIMCC: + return OPVCC(63, 488, 0, 1) + case AFRIP: + return OPVCC(63, 456, 0, 0) + case AFRIPCC: + return OPVCC(63, 456, 0, 1) + case AFRIZ: + return OPVCC(63, 424, 0, 0) + case AFRIZCC: + return OPVCC(63, 424, 0, 1) + case AFRIN: + return OPVCC(63, 392, 0, 0) + case AFRINCC: + return OPVCC(63, 392, 0, 1) + case AFRSP: + return OPVCC(63, 12, 0, 0) + case AFRSPCC: + return OPVCC(63, 12, 0, 1) + case AFRSQRTE: + return OPVCC(63, 26, 0, 0) + case AFRSQRTECC: + return OPVCC(63, 26, 0, 1) + case AFSEL: + return OPVCC(63, 23, 0, 0) + case AFSELCC: + return OPVCC(63, 23, 0, 1) + case AFSQRT: + return OPVCC(63, 22, 0, 0) + case AFSQRTCC: + return OPVCC(63, 22, 0, 1) + case AFSQRTS: + return OPVCC(59, 22, 0, 0) + case AFSQRTSCC: + return OPVCC(59, 22, 0, 1) + case AFSUB: + return OPVCC(63, 20, 0, 0) + case AFSUBCC: + return OPVCC(63, 20, 0, 1) + case AFSUBS: + return OPVCC(59, 20, 0, 0) + case AFSUBSCC: + return OPVCC(59, 20, 0, 1) + + case AICBI: + return OPVCC(31, 982, 0, 0) + case AISYNC: + return OPVCC(19, 150, 0, 0) + + case AMTFSB0: + return OPVCC(63, 70, 0, 0) + case AMTFSB0CC: + return OPVCC(63, 70, 0, 1) + case AMTFSB1: + return OPVCC(63, 38, 0, 0) + case AMTFSB1CC: + return OPVCC(63, 38, 0, 1) + + case AMULHW: + return OPVCC(31, 75, 0, 0) + case AMULHWCC: + return OPVCC(31, 75, 0, 1) + case AMULHWU: + return OPVCC(31, 11, 0, 0) + case AMULHWUCC: + return OPVCC(31, 11, 0, 1) + case AMULLW: + return OPVCC(31, 235, 0, 0) + case AMULLWCC: + return OPVCC(31, 235, 0, 1) + case AMULLWV: + return OPVCC(31, 235, 1, 0) + case AMULLWVCC: + return OPVCC(31, 235, 1, 1) + + case AMULHD: + return OPVCC(31, 73, 0, 0) + case AMULHDCC: + return OPVCC(31, 73, 0, 1) + case AMULHDU: + return OPVCC(31, 9, 0, 0) + case AMULHDUCC: + return OPVCC(31, 9, 0, 1) + case AMULLD: + return OPVCC(31, 233, 0, 0) + case AMULLDCC: + return OPVCC(31, 233, 0, 1) + case AMULLDV: + return OPVCC(31, 233, 1, 0) + case AMULLDVCC: + return OPVCC(31, 233, 1, 1) + + case ANAND: + return OPVCC(31, 476, 0, 0) + case ANANDCC: + return OPVCC(31, 476, 0, 1) + case ANEG: + return OPVCC(31, 104, 0, 0) + case ANEGCC: + return OPVCC(31, 104, 0, 1) + case ANEGV: + return OPVCC(31, 104, 1, 0) + case ANEGVCC: + return OPVCC(31, 104, 1, 1) + case ANOR: + return OPVCC(31, 124, 0, 0) + case ANORCC: + return OPVCC(31, 124, 0, 1) + case AOR: + return OPVCC(31, 444, 0, 0) + case AORCC: + return OPVCC(31, 444, 0, 1) + case AORN: + return OPVCC(31, 412, 0, 0) + case AORNCC: + return OPVCC(31, 412, 0, 1) + + case APOPCNTD: + return OPVCC(31, 506, 0, 0) /* popcntd - v2.06 */ + case APOPCNTW: + return OPVCC(31, 378, 0, 0) /* popcntw - v2.06 */ + case APOPCNTB: + return OPVCC(31, 122, 0, 0) /* popcntb - v2.02 */ + case ACNTTZW: + return OPVCC(31, 538, 0, 0) /* cnttzw - v3.00 */ + case ACNTTZWCC: + return OPVCC(31, 538, 0, 1) /* cnttzw. - v3.00 */ + case ACNTTZD: + return OPVCC(31, 570, 0, 0) /* cnttzd - v3.00 */ + case ACNTTZDCC: + return OPVCC(31, 570, 0, 1) /* cnttzd. - v3.00 */ + + case ARFI: + return OPVCC(19, 50, 0, 0) + case ARFCI: + return OPVCC(19, 51, 0, 0) + case ARFID: + return OPVCC(19, 18, 0, 0) + case AHRFID: + return OPVCC(19, 274, 0, 0) + + case ARLWMI: + return OPVCC(20, 0, 0, 0) + case ARLWMICC: + return OPVCC(20, 0, 0, 1) + case ARLWNM: + return OPVCC(23, 0, 0, 0) + case ARLWNMCC: + return OPVCC(23, 0, 0, 1) + + case ARLDCL: + return OPVCC(30, 8, 0, 0) + case ARLDCLCC: + return OPVCC(30, 0, 0, 1) + + case ARLDCR: + return OPVCC(30, 9, 0, 0) + case ARLDCRCC: + return OPVCC(30, 9, 0, 1) + + case ARLDICL: + return OPVCC(30, 0, 0, 0) + case ARLDICLCC: + return OPVCC(30, 0, 0, 1) + case ARLDICR: + return OPMD(30, 1, 0) // rldicr + case ARLDICRCC: + return OPMD(30, 1, 1) // rldicr. + + case ARLDIC: + return OPMD(30, 2, 0) // rldic + case ARLDICCC: + return OPMD(30, 2, 1) // rldic. + + case ASYSCALL: + return OPVCC(17, 1, 0, 0) + + case ASLW: + return OPVCC(31, 24, 0, 0) + case ASLWCC: + return OPVCC(31, 24, 0, 1) + case ASLD: + return OPVCC(31, 27, 0, 0) + case ASLDCC: + return OPVCC(31, 27, 0, 1) + + case ASRAW: + return OPVCC(31, 792, 0, 0) + case ASRAWCC: + return OPVCC(31, 792, 0, 1) + case ASRAD: + return OPVCC(31, 794, 0, 0) + case ASRADCC: + return OPVCC(31, 794, 0, 1) + + case AEXTSWSLI: + return OPVCC(31, 445, 0, 0) + case AEXTSWSLICC: + return OPVCC(31, 445, 0, 1) + + case ASRW: + return OPVCC(31, 536, 0, 0) + case ASRWCC: + return OPVCC(31, 536, 0, 1) + case ASRD: + return OPVCC(31, 539, 0, 0) + case ASRDCC: + return OPVCC(31, 539, 0, 1) + + case ASUB: + return OPVCC(31, 40, 0, 0) + case ASUBCC: + return OPVCC(31, 40, 0, 1) + case ASUBV: + return OPVCC(31, 40, 1, 0) + case ASUBVCC: + return OPVCC(31, 40, 1, 1) + case ASUBC: + return OPVCC(31, 8, 0, 0) + case ASUBCCC: + return OPVCC(31, 8, 0, 1) + case ASUBCV: + return OPVCC(31, 8, 1, 0) + case ASUBCVCC: + return OPVCC(31, 8, 1, 1) + case ASUBE: + return OPVCC(31, 136, 0, 0) + case ASUBECC: + return OPVCC(31, 136, 0, 1) + case ASUBEV: + return OPVCC(31, 136, 1, 0) + case ASUBEVCC: + return OPVCC(31, 136, 1, 1) + case ASUBME: + return OPVCC(31, 232, 0, 0) + case ASUBMECC: + return OPVCC(31, 232, 0, 1) + case ASUBMEV: + return OPVCC(31, 232, 1, 0) + case ASUBMEVCC: + return OPVCC(31, 232, 1, 1) + case ASUBZE: + return OPVCC(31, 200, 0, 0) + case ASUBZECC: + return OPVCC(31, 200, 0, 1) + case ASUBZEV: + return OPVCC(31, 200, 1, 0) + case ASUBZEVCC: + return OPVCC(31, 200, 1, 1) + + case ASYNC: + return OPVCC(31, 598, 0, 0) + case ALWSYNC: + return OPVCC(31, 598, 0, 0) | 1<<21 + + case APTESYNC: + return OPVCC(31, 598, 0, 0) | 2<<21 + + case ATLBIE: + return OPVCC(31, 306, 0, 0) + case ATLBIEL: + return OPVCC(31, 274, 0, 0) + case ATLBSYNC: + return OPVCC(31, 566, 0, 0) + case ASLBIA: + return OPVCC(31, 498, 0, 0) + case ASLBIE: + return OPVCC(31, 434, 0, 0) + case ASLBMFEE: + return OPVCC(31, 915, 0, 0) + case ASLBMFEV: + return OPVCC(31, 851, 0, 0) + case ASLBMTE: + return OPVCC(31, 402, 0, 0) + + case ATW: + return OPVCC(31, 4, 0, 0) + case ATD: + return OPVCC(31, 68, 0, 0) + + /* Vector (VMX/Altivec) instructions */ + /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ + /* are enabled starting at POWER6 (ISA 2.05). */ + case AVAND: + return OPVX(4, 1028, 0, 0) /* vand - v2.03 */ + case AVANDC: + return OPVX(4, 1092, 0, 0) /* vandc - v2.03 */ + case AVNAND: + return OPVX(4, 1412, 0, 0) /* vnand - v2.07 */ + + case AVOR: + return OPVX(4, 1156, 0, 0) /* vor - v2.03 */ + case AVORC: + return OPVX(4, 1348, 0, 0) /* vorc - v2.07 */ + case AVNOR: + return OPVX(4, 1284, 0, 0) /* vnor - v2.03 */ + case AVXOR: + return OPVX(4, 1220, 0, 0) /* vxor - v2.03 */ + case AVEQV: + return OPVX(4, 1668, 0, 0) /* veqv - v2.07 */ + + case AVADDUBM: + return OPVX(4, 0, 0, 0) /* vaddubm - v2.03 */ + case AVADDUHM: + return OPVX(4, 64, 0, 0) /* vadduhm - v2.03 */ + case AVADDUWM: + return OPVX(4, 128, 0, 0) /* vadduwm - v2.03 */ + case AVADDUDM: + return OPVX(4, 192, 0, 0) /* vaddudm - v2.07 */ + case AVADDUQM: + return OPVX(4, 256, 0, 0) /* vadduqm - v2.07 */ + + case AVADDCUQ: + return OPVX(4, 320, 0, 0) /* vaddcuq - v2.07 */ + case AVADDCUW: + return OPVX(4, 384, 0, 0) /* vaddcuw - v2.03 */ + + case AVADDUBS: + return OPVX(4, 512, 0, 0) /* vaddubs - v2.03 */ + case AVADDUHS: + return OPVX(4, 576, 0, 0) /* vadduhs - v2.03 */ + case AVADDUWS: + return OPVX(4, 640, 0, 0) /* vadduws - v2.03 */ + + case AVADDSBS: + return OPVX(4, 768, 0, 0) /* vaddsbs - v2.03 */ + case AVADDSHS: + return OPVX(4, 832, 0, 0) /* vaddshs - v2.03 */ + case AVADDSWS: + return OPVX(4, 896, 0, 0) /* vaddsws - v2.03 */ + + case AVADDEUQM: + return OPVX(4, 60, 0, 0) /* vaddeuqm - v2.07 */ + case AVADDECUQ: + return OPVX(4, 61, 0, 0) /* vaddecuq - v2.07 */ + + case AVMULESB: + return OPVX(4, 776, 0, 0) /* vmulesb - v2.03 */ + case AVMULOSB: + return OPVX(4, 264, 0, 0) /* vmulosb - v2.03 */ + case AVMULEUB: + return OPVX(4, 520, 0, 0) /* vmuleub - v2.03 */ + case AVMULOUB: + return OPVX(4, 8, 0, 0) /* vmuloub - v2.03 */ + case AVMULESH: + return OPVX(4, 840, 0, 0) /* vmulesh - v2.03 */ + case AVMULOSH: + return OPVX(4, 328, 0, 0) /* vmulosh - v2.03 */ + case AVMULEUH: + return OPVX(4, 584, 0, 0) /* vmuleuh - v2.03 */ + case AVMULOUH: + return OPVX(4, 72, 0, 0) /* vmulouh - v2.03 */ + case AVMULESW: + return OPVX(4, 904, 0, 0) /* vmulesw - v2.07 */ + case AVMULOSW: + return OPVX(4, 392, 0, 0) /* vmulosw - v2.07 */ + case AVMULEUW: + return OPVX(4, 648, 0, 0) /* vmuleuw - v2.07 */ + case AVMULOUW: + return OPVX(4, 136, 0, 0) /* vmulouw - v2.07 */ + case AVMULUWM: + return OPVX(4, 137, 0, 0) /* vmuluwm - v2.07 */ + + case AVPMSUMB: + return OPVX(4, 1032, 0, 0) /* vpmsumb - v2.07 */ + case AVPMSUMH: + return OPVX(4, 1096, 0, 0) /* vpmsumh - v2.07 */ + case AVPMSUMW: + return OPVX(4, 1160, 0, 0) /* vpmsumw - v2.07 */ + case AVPMSUMD: + return OPVX(4, 1224, 0, 0) /* vpmsumd - v2.07 */ + + case AVMSUMUDM: + return OPVX(4, 35, 0, 0) /* vmsumudm - v3.00b */ + + case AVSUBUBM: + return OPVX(4, 1024, 0, 0) /* vsububm - v2.03 */ + case AVSUBUHM: + return OPVX(4, 1088, 0, 0) /* vsubuhm - v2.03 */ + case AVSUBUWM: + return OPVX(4, 1152, 0, 0) /* vsubuwm - v2.03 */ + case AVSUBUDM: + return OPVX(4, 1216, 0, 0) /* vsubudm - v2.07 */ + case AVSUBUQM: + return OPVX(4, 1280, 0, 0) /* vsubuqm - v2.07 */ + + case AVSUBCUQ: + return OPVX(4, 1344, 0, 0) /* vsubcuq - v2.07 */ + case AVSUBCUW: + return OPVX(4, 1408, 0, 0) /* vsubcuw - v2.03 */ + + case AVSUBUBS: + return OPVX(4, 1536, 0, 0) /* vsububs - v2.03 */ + case AVSUBUHS: + return OPVX(4, 1600, 0, 0) /* vsubuhs - v2.03 */ + case AVSUBUWS: + return OPVX(4, 1664, 0, 0) /* vsubuws - v2.03 */ + + case AVSUBSBS: + return OPVX(4, 1792, 0, 0) /* vsubsbs - v2.03 */ + case AVSUBSHS: + return OPVX(4, 1856, 0, 0) /* vsubshs - v2.03 */ + case AVSUBSWS: + return OPVX(4, 1920, 0, 0) /* vsubsws - v2.03 */ + + case AVSUBEUQM: + return OPVX(4, 62, 0, 0) /* vsubeuqm - v2.07 */ + case AVSUBECUQ: + return OPVX(4, 63, 0, 0) /* vsubecuq - v2.07 */ + + case AVRLB: + return OPVX(4, 4, 0, 0) /* vrlb - v2.03 */ + case AVRLH: + return OPVX(4, 68, 0, 0) /* vrlh - v2.03 */ + case AVRLW: + return OPVX(4, 132, 0, 0) /* vrlw - v2.03 */ + case AVRLD: + return OPVX(4, 196, 0, 0) /* vrld - v2.07 */ + + case AVMRGOW: + return OPVX(4, 1676, 0, 0) /* vmrgow - v2.07 */ + case AVMRGEW: + return OPVX(4, 1932, 0, 0) /* vmrgew - v2.07 */ + + case AVSLB: + return OPVX(4, 260, 0, 0) /* vslh - v2.03 */ + case AVSLH: + return OPVX(4, 324, 0, 0) /* vslh - v2.03 */ + case AVSLW: + return OPVX(4, 388, 0, 0) /* vslw - v2.03 */ + case AVSL: + return OPVX(4, 452, 0, 0) /* vsl - v2.03 */ + case AVSLO: + return OPVX(4, 1036, 0, 0) /* vsl - v2.03 */ + case AVSRB: + return OPVX(4, 516, 0, 0) /* vsrb - v2.03 */ + case AVSRH: + return OPVX(4, 580, 0, 0) /* vsrh - v2.03 */ + case AVSRW: + return OPVX(4, 644, 0, 0) /* vsrw - v2.03 */ + case AVSR: + return OPVX(4, 708, 0, 0) /* vsr - v2.03 */ + case AVSRO: + return OPVX(4, 1100, 0, 0) /* vsro - v2.03 */ + case AVSLD: + return OPVX(4, 1476, 0, 0) /* vsld - v2.07 */ + case AVSRD: + return OPVX(4, 1732, 0, 0) /* vsrd - v2.07 */ + + case AVSRAB: + return OPVX(4, 772, 0, 0) /* vsrab - v2.03 */ + case AVSRAH: + return OPVX(4, 836, 0, 0) /* vsrah - v2.03 */ + case AVSRAW: + return OPVX(4, 900, 0, 0) /* vsraw - v2.03 */ + case AVSRAD: + return OPVX(4, 964, 0, 0) /* vsrad - v2.07 */ + + case AVBPERMQ: + return OPVC(4, 1356, 0, 0) /* vbpermq - v2.07 */ + case AVBPERMD: + return OPVC(4, 1484, 0, 0) /* vbpermd - v3.00 */ + + case AVCLZB: + return OPVX(4, 1794, 0, 0) /* vclzb - v2.07 */ + case AVCLZH: + return OPVX(4, 1858, 0, 0) /* vclzh - v2.07 */ + case AVCLZW: + return OPVX(4, 1922, 0, 0) /* vclzw - v2.07 */ + case AVCLZD: + return OPVX(4, 1986, 0, 0) /* vclzd - v2.07 */ + + case AVPOPCNTB: + return OPVX(4, 1795, 0, 0) /* vpopcntb - v2.07 */ + case AVPOPCNTH: + return OPVX(4, 1859, 0, 0) /* vpopcnth - v2.07 */ + case AVPOPCNTW: + return OPVX(4, 1923, 0, 0) /* vpopcntw - v2.07 */ + case AVPOPCNTD: + return OPVX(4, 1987, 0, 0) /* vpopcntd - v2.07 */ + + case AVCMPEQUB: + return OPVC(4, 6, 0, 0) /* vcmpequb - v2.03 */ + case AVCMPEQUBCC: + return OPVC(4, 6, 0, 1) /* vcmpequb. - v2.03 */ + case AVCMPEQUH: + return OPVC(4, 70, 0, 0) /* vcmpequh - v2.03 */ + case AVCMPEQUHCC: + return OPVC(4, 70, 0, 1) /* vcmpequh. - v2.03 */ + case AVCMPEQUW: + return OPVC(4, 134, 0, 0) /* vcmpequw - v2.03 */ + case AVCMPEQUWCC: + return OPVC(4, 134, 0, 1) /* vcmpequw. - v2.03 */ + case AVCMPEQUD: + return OPVC(4, 199, 0, 0) /* vcmpequd - v2.07 */ + case AVCMPEQUDCC: + return OPVC(4, 199, 0, 1) /* vcmpequd. - v2.07 */ + + case AVCMPGTUB: + return OPVC(4, 518, 0, 0) /* vcmpgtub - v2.03 */ + case AVCMPGTUBCC: + return OPVC(4, 518, 0, 1) /* vcmpgtub. - v2.03 */ + case AVCMPGTUH: + return OPVC(4, 582, 0, 0) /* vcmpgtuh - v2.03 */ + case AVCMPGTUHCC: + return OPVC(4, 582, 0, 1) /* vcmpgtuh. - v2.03 */ + case AVCMPGTUW: + return OPVC(4, 646, 0, 0) /* vcmpgtuw - v2.03 */ + case AVCMPGTUWCC: + return OPVC(4, 646, 0, 1) /* vcmpgtuw. - v2.03 */ + case AVCMPGTUD: + return OPVC(4, 711, 0, 0) /* vcmpgtud - v2.07 */ + case AVCMPGTUDCC: + return OPVC(4, 711, 0, 1) /* vcmpgtud. v2.07 */ + case AVCMPGTSB: + return OPVC(4, 774, 0, 0) /* vcmpgtsb - v2.03 */ + case AVCMPGTSBCC: + return OPVC(4, 774, 0, 1) /* vcmpgtsb. - v2.03 */ + case AVCMPGTSH: + return OPVC(4, 838, 0, 0) /* vcmpgtsh - v2.03 */ + case AVCMPGTSHCC: + return OPVC(4, 838, 0, 1) /* vcmpgtsh. - v2.03 */ + case AVCMPGTSW: + return OPVC(4, 902, 0, 0) /* vcmpgtsw - v2.03 */ + case AVCMPGTSWCC: + return OPVC(4, 902, 0, 1) /* vcmpgtsw. - v2.03 */ + case AVCMPGTSD: + return OPVC(4, 967, 0, 0) /* vcmpgtsd - v2.07 */ + case AVCMPGTSDCC: + return OPVC(4, 967, 0, 1) /* vcmpgtsd. - v2.07 */ + + case AVCMPNEZB: + return OPVC(4, 263, 0, 0) /* vcmpnezb - v3.00 */ + case AVCMPNEZBCC: + return OPVC(4, 263, 0, 1) /* vcmpnezb. - v3.00 */ + case AVCMPNEB: + return OPVC(4, 7, 0, 0) /* vcmpneb - v3.00 */ + case AVCMPNEBCC: + return OPVC(4, 7, 0, 1) /* vcmpneb. - v3.00 */ + case AVCMPNEH: + return OPVC(4, 71, 0, 0) /* vcmpneh - v3.00 */ + case AVCMPNEHCC: + return OPVC(4, 71, 0, 1) /* vcmpneh. - v3.00 */ + case AVCMPNEW: + return OPVC(4, 135, 0, 0) /* vcmpnew - v3.00 */ + case AVCMPNEWCC: + return OPVC(4, 135, 0, 1) /* vcmpnew. - v3.00 */ + + case AVPERM: + return OPVX(4, 43, 0, 0) /* vperm - v2.03 */ + case AVPERMXOR: + return OPVX(4, 45, 0, 0) /* vpermxor - v2.03 */ + case AVPERMR: + return OPVX(4, 59, 0, 0) /* vpermr - v3.0 */ + + case AVSEL: + return OPVX(4, 42, 0, 0) /* vsel - v2.03 */ + + case AVCIPHER: + return OPVX(4, 1288, 0, 0) /* vcipher - v2.07 */ + case AVCIPHERLAST: + return OPVX(4, 1289, 0, 0) /* vcipherlast - v2.07 */ + case AVNCIPHER: + return OPVX(4, 1352, 0, 0) /* vncipher - v2.07 */ + case AVNCIPHERLAST: + return OPVX(4, 1353, 0, 0) /* vncipherlast - v2.07 */ + case AVSBOX: + return OPVX(4, 1480, 0, 0) /* vsbox - v2.07 */ + /* End of vector instructions */ + + /* Vector scalar (VSX) instructions */ + /* ISA 2.06 enables these for POWER7. */ + case AMFVSRD, AMFVRD, AMFFPRD: + return OPVXX1(31, 51, 0) /* mfvsrd - v2.07 */ + case AMFVSRWZ: + return OPVXX1(31, 115, 0) /* mfvsrwz - v2.07 */ + case AMFVSRLD: + return OPVXX1(31, 307, 0) /* mfvsrld - v3.00 */ + + case AMTVSRD, AMTFPRD, AMTVRD: + return OPVXX1(31, 179, 0) /* mtvsrd - v2.07 */ + case AMTVSRWA: + return OPVXX1(31, 211, 0) /* mtvsrwa - v2.07 */ + case AMTVSRWZ: + return OPVXX1(31, 243, 0) /* mtvsrwz - v2.07 */ + case AMTVSRDD: + return OPVXX1(31, 435, 0) /* mtvsrdd - v3.00 */ + case AMTVSRWS: + return OPVXX1(31, 403, 0) /* mtvsrws - v3.00 */ + + case AXXLAND: + return OPVXX3(60, 130, 0) /* xxland - v2.06 */ + case AXXLANDC: + return OPVXX3(60, 138, 0) /* xxlandc - v2.06 */ + case AXXLEQV: + return OPVXX3(60, 186, 0) /* xxleqv - v2.07 */ + case AXXLNAND: + return OPVXX3(60, 178, 0) /* xxlnand - v2.07 */ + + case AXXLORC: + return OPVXX3(60, 170, 0) /* xxlorc - v2.07 */ + case AXXLNOR: + return OPVXX3(60, 162, 0) /* xxlnor - v2.06 */ + case AXXLOR, AXXLORQ: + return OPVXX3(60, 146, 0) /* xxlor - v2.06 */ + case AXXLXOR: + return OPVXX3(60, 154, 0) /* xxlxor - v2.06 */ + + case AXXSEL: + return OPVXX4(60, 3, 0) /* xxsel - v2.06 */ + + case AXXMRGHW: + return OPVXX3(60, 18, 0) /* xxmrghw - v2.06 */ + case AXXMRGLW: + return OPVXX3(60, 50, 0) /* xxmrglw - v2.06 */ + + case AXXSPLTW: + return OPVXX2(60, 164, 0) /* xxspltw - v2.06 */ + + case AXXSPLTIB: + return OPVCC(60, 360, 0, 0) /* xxspltib - v3.0 */ + + case AXXPERM: + return OPVXX3(60, 26, 0) /* xxperm - v2.06 */ + case AXXPERMDI: + return OPVXX3(60, 10, 0) /* xxpermdi - v2.06 */ + + case AXXSLDWI: + return OPVXX3(60, 2, 0) /* xxsldwi - v2.06 */ + + case AXXBRQ: + return OPVXX2VA(60, 475, 31) /* xxbrq - v3.0 */ + case AXXBRD: + return OPVXX2VA(60, 475, 23) /* xxbrd - v3.0 */ + case AXXBRW: + return OPVXX2VA(60, 475, 15) /* xxbrw - v3.0 */ + case AXXBRH: + return OPVXX2VA(60, 475, 7) /* xxbrh - v3.0 */ + + case AXSCVDPSP: + return OPVXX2(60, 265, 0) /* xscvdpsp - v2.06 */ + case AXSCVSPDP: + return OPVXX2(60, 329, 0) /* xscvspdp - v2.06 */ + case AXSCVDPSPN: + return OPVXX2(60, 267, 0) /* xscvdpspn - v2.07 */ + case AXSCVSPDPN: + return OPVXX2(60, 331, 0) /* xscvspdpn - v2.07 */ + + case AXVCVDPSP: + return OPVXX2(60, 393, 0) /* xvcvdpsp - v2.06 */ + case AXVCVSPDP: + return OPVXX2(60, 457, 0) /* xvcvspdp - v2.06 */ + + case AXSCVDPSXDS: + return OPVXX2(60, 344, 0) /* xscvdpsxds - v2.06 */ + case AXSCVDPSXWS: + return OPVXX2(60, 88, 0) /* xscvdpsxws - v2.06 */ + case AXSCVDPUXDS: + return OPVXX2(60, 328, 0) /* xscvdpuxds - v2.06 */ + case AXSCVDPUXWS: + return OPVXX2(60, 72, 0) /* xscvdpuxws - v2.06 */ + + case AXSCVSXDDP: + return OPVXX2(60, 376, 0) /* xscvsxddp - v2.06 */ + case AXSCVUXDDP: + return OPVXX2(60, 360, 0) /* xscvuxddp - v2.06 */ + case AXSCVSXDSP: + return OPVXX2(60, 312, 0) /* xscvsxdsp - v2.06 */ + case AXSCVUXDSP: + return OPVXX2(60, 296, 0) /* xscvuxdsp - v2.06 */ + + case AXVCVDPSXDS: + return OPVXX2(60, 472, 0) /* xvcvdpsxds - v2.06 */ + case AXVCVDPSXWS: + return OPVXX2(60, 216, 0) /* xvcvdpsxws - v2.06 */ + case AXVCVDPUXDS: + return OPVXX2(60, 456, 0) /* xvcvdpuxds - v2.06 */ + case AXVCVDPUXWS: + return OPVXX2(60, 200, 0) /* xvcvdpuxws - v2.06 */ + case AXVCVSPSXDS: + return OPVXX2(60, 408, 0) /* xvcvspsxds - v2.07 */ + case AXVCVSPSXWS: + return OPVXX2(60, 152, 0) /* xvcvspsxws - v2.07 */ + case AXVCVSPUXDS: + return OPVXX2(60, 392, 0) /* xvcvspuxds - v2.07 */ + case AXVCVSPUXWS: + return OPVXX2(60, 136, 0) /* xvcvspuxws - v2.07 */ + + case AXVCVSXDDP: + return OPVXX2(60, 504, 0) /* xvcvsxddp - v2.06 */ + case AXVCVSXWDP: + return OPVXX2(60, 248, 0) /* xvcvsxwdp - v2.06 */ + case AXVCVUXDDP: + return OPVXX2(60, 488, 0) /* xvcvuxddp - v2.06 */ + case AXVCVUXWDP: + return OPVXX2(60, 232, 0) /* xvcvuxwdp - v2.06 */ + case AXVCVSXDSP: + return OPVXX2(60, 440, 0) /* xvcvsxdsp - v2.06 */ + case AXVCVSXWSP: + return OPVXX2(60, 184, 0) /* xvcvsxwsp - v2.06 */ + case AXVCVUXDSP: + return OPVXX2(60, 424, 0) /* xvcvuxdsp - v2.06 */ + case AXVCVUXWSP: + return OPVXX2(60, 168, 0) /* xvcvuxwsp - v2.06 */ + /* End of VSX instructions */ + + case AMADDHD: + return OPVX(4, 48, 0, 0) /* maddhd - v3.00 */ + case AMADDHDU: + return OPVX(4, 49, 0, 0) /* maddhdu - v3.00 */ + case AMADDLD: + return OPVX(4, 51, 0, 0) /* maddld - v3.00 */ + + case AXOR: + return OPVCC(31, 316, 0, 0) + case AXORCC: + return OPVCC(31, 316, 0, 1) + } + + c.ctxt.Diag("bad r/r, r/r/r or r/r/r/r opcode %v", a) + return 0 +} + +func (c *ctxt9) opirrr(a obj.As) uint32 { + switch a { + /* Vector (VMX/Altivec) instructions */ + /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ + /* are enabled starting at POWER6 (ISA 2.05). */ + case AVSLDOI: + return OPVX(4, 44, 0, 0) /* vsldoi - v2.03 */ + } + + c.ctxt.Diag("bad i/r/r/r opcode %v", a) + return 0 +} + +func (c *ctxt9) opiirr(a obj.As) uint32 { + switch a { + /* Vector (VMX/Altivec) instructions */ + /* ISA 2.07 enables these for POWER8 and beyond. */ + case AVSHASIGMAW: + return OPVX(4, 1666, 0, 0) /* vshasigmaw - v2.07 */ + case AVSHASIGMAD: + return OPVX(4, 1730, 0, 0) /* vshasigmad - v2.07 */ + } + + c.ctxt.Diag("bad i/i/r/r opcode %v", a) + return 0 +} + +func (c *ctxt9) opirr(a obj.As) uint32 { + switch a { + case AADD: + return OPVCC(14, 0, 0, 0) + case AADDC: + return OPVCC(12, 0, 0, 0) + case AADDCCC: + return OPVCC(13, 0, 0, 0) + case AADDIS: + return OPVCC(15, 0, 0, 0) /* ADDIS */ + + case AANDCC: + return OPVCC(28, 0, 0, 0) + case AANDISCC: + return OPVCC(29, 0, 0, 0) /* ANDIS. */ + + case ABR: + return OPVCC(18, 0, 0, 0) + case ABL: + return OPVCC(18, 0, 0, 0) | 1 + case obj.ADUFFZERO: + return OPVCC(18, 0, 0, 0) | 1 + case obj.ADUFFCOPY: + return OPVCC(18, 0, 0, 0) | 1 + case ABC: + return OPVCC(16, 0, 0, 0) + case ABCL: + return OPVCC(16, 0, 0, 0) | 1 + + case ABEQ: + return AOP_RRR(16<<26, BO_BCR, BI_EQ, 0) + case ABGE: + return AOP_RRR(16<<26, BO_NOTBCR, BI_LT, 0) + case ABGT: + return AOP_RRR(16<<26, BO_BCR, BI_GT, 0) + case ABLE: + return AOP_RRR(16<<26, BO_NOTBCR, BI_GT, 0) + case ABLT: + return AOP_RRR(16<<26, BO_BCR, BI_LT, 0) + case ABNE: + return AOP_RRR(16<<26, BO_NOTBCR, BI_EQ, 0) + case ABVC: + return AOP_RRR(16<<26, BO_NOTBCR, BI_FU, 0) + case ABVS: + return AOP_RRR(16<<26, BO_BCR, BI_FU, 0) + case ABDZ: + return AOP_RRR(16<<26, BO_NOTBCTR, 0, 0) + case ABDNZ: + return AOP_RRR(16<<26, BO_BCTR, 0, 0) + + case ACMP: + return OPVCC(11, 0, 0, 0) | 1<<21 /* L=1 */ + case ACMPU: + return OPVCC(10, 0, 0, 0) | 1<<21 + case ACMPW: + return OPVCC(11, 0, 0, 0) /* L=0 */ + case ACMPWU: + return OPVCC(10, 0, 0, 0) + case ACMPEQB: + return OPVCC(31, 224, 0, 0) /* cmpeqb - v3.00 */ + + case ALSW: + return OPVCC(31, 597, 0, 0) + + case ACOPY: + return OPVCC(31, 774, 0, 0) /* copy - v3.00 */ + case APASTECC: + return OPVCC(31, 902, 0, 1) /* paste. - v3.00 */ + case ADARN: + return OPVCC(31, 755, 0, 0) /* darn - v3.00 */ + + case AMULLW, AMULLD: + return OPVCC(7, 0, 0, 0) /* mulli works with MULLW or MULLD */ + + case AOR: + return OPVCC(24, 0, 0, 0) + case AORIS: + return OPVCC(25, 0, 0, 0) /* ORIS */ + + case ARLWMI: + return OPVCC(20, 0, 0, 0) /* rlwimi */ + case ARLWMICC: + return OPVCC(20, 0, 0, 1) + case ARLDMI: + return OPMD(30, 3, 0) /* rldimi */ + case ARLDMICC: + return OPMD(30, 3, 1) /* rldimi. */ + case ARLDIMI: + return OPMD(30, 3, 0) /* rldimi */ + case ARLDIMICC: + return OPMD(30, 3, 1) /* rldimi. */ + case ARLWNM: + return OPVCC(21, 0, 0, 0) /* rlwinm */ + case ARLWNMCC: + return OPVCC(21, 0, 0, 1) + + case ARLDCL: + return OPMD(30, 0, 0) /* rldicl */ + case ARLDCLCC: + return OPMD(30, 0, 1) /* rldicl. */ + case ARLDCR: + return OPMD(30, 1, 0) /* rldicr */ + case ARLDCRCC: + return OPMD(30, 1, 1) /* rldicr. */ + case ARLDC: + return OPMD(30, 2, 0) /* rldic */ + case ARLDCCC: + return OPMD(30, 2, 1) /* rldic. */ + + case ASRAW: + return OPVCC(31, 824, 0, 0) + case ASRAWCC: + return OPVCC(31, 824, 0, 1) + case ASRAD: + return OPVCC(31, (413 << 1), 0, 0) + case ASRADCC: + return OPVCC(31, (413 << 1), 0, 1) + case AEXTSWSLI: + return OPVCC(31, 445, 0, 0) + case AEXTSWSLICC: + return OPVCC(31, 445, 0, 1) + + case ASTSW: + return OPVCC(31, 725, 0, 0) + + case ASUBC: + return OPVCC(8, 0, 0, 0) + + case ATW: + return OPVCC(3, 0, 0, 0) + case ATD: + return OPVCC(2, 0, 0, 0) + + /* Vector (VMX/Altivec) instructions */ + /* ISA 2.03 enables these for PPC970. For POWERx processors, these */ + /* are enabled starting at POWER6 (ISA 2.05). */ + case AVSPLTB: + return OPVX(4, 524, 0, 0) /* vspltb - v2.03 */ + case AVSPLTH: + return OPVX(4, 588, 0, 0) /* vsplth - v2.03 */ + case AVSPLTW: + return OPVX(4, 652, 0, 0) /* vspltw - v2.03 */ + + case AVSPLTISB: + return OPVX(4, 780, 0, 0) /* vspltisb - v2.03 */ + case AVSPLTISH: + return OPVX(4, 844, 0, 0) /* vspltish - v2.03 */ + case AVSPLTISW: + return OPVX(4, 908, 0, 0) /* vspltisw - v2.03 */ + /* End of vector instructions */ + + case AFTDIV: + return OPVCC(63, 128, 0, 0) /* ftdiv - v2.06 */ + case AFTSQRT: + return OPVCC(63, 160, 0, 0) /* ftsqrt - v2.06 */ + + case AXOR: + return OPVCC(26, 0, 0, 0) /* XORIL */ + case AXORIS: + return OPVCC(27, 0, 0, 0) /* XORIS */ + } + + c.ctxt.Diag("bad opcode i/r or i/r/r %v", a) + return 0 +} + +/* + * load o(a),d + */ +func (c *ctxt9) opload(a obj.As) uint32 { + switch a { + case AMOVD: + return OPVCC(58, 0, 0, 0) /* ld */ + case AMOVDU: + return OPVCC(58, 0, 0, 1) /* ldu */ + case AMOVWZ: + return OPVCC(32, 0, 0, 0) /* lwz */ + case AMOVWZU: + return OPVCC(33, 0, 0, 0) /* lwzu */ + case AMOVW: + return OPVCC(58, 0, 0, 0) | 1<<1 /* lwa */ + case ALXV: + return OPDQ(61, 1, 0) /* lxv - ISA v3.0 */ + case ALXVL: + return OPVXX1(31, 269, 0) /* lxvl - ISA v3.0 */ + case ALXVLL: + return OPVXX1(31, 301, 0) /* lxvll - ISA v3.0 */ + case ALXVX: + return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ + + /* no AMOVWU */ + case AMOVB, AMOVBZ: + return OPVCC(34, 0, 0, 0) + /* load */ + + case AMOVBU, AMOVBZU: + return OPVCC(35, 0, 0, 0) + case AFMOVD: + return OPVCC(50, 0, 0, 0) + case AFMOVDU: + return OPVCC(51, 0, 0, 0) + case AFMOVS: + return OPVCC(48, 0, 0, 0) + case AFMOVSU: + return OPVCC(49, 0, 0, 0) + case AMOVH: + return OPVCC(42, 0, 0, 0) + case AMOVHU: + return OPVCC(43, 0, 0, 0) + case AMOVHZ: + return OPVCC(40, 0, 0, 0) + case AMOVHZU: + return OPVCC(41, 0, 0, 0) + case AMOVMW: + return OPVCC(46, 0, 0, 0) /* lmw */ + } + + c.ctxt.Diag("bad load opcode %v", a) + return 0 +} + +/* + * indexed load a(b),d + */ +func (c *ctxt9) oploadx(a obj.As) uint32 { + switch a { + case AMOVWZ: + return OPVCC(31, 23, 0, 0) /* lwzx */ + case AMOVWZU: + return OPVCC(31, 55, 0, 0) /* lwzux */ + case AMOVW: + return OPVCC(31, 341, 0, 0) /* lwax */ + case AMOVWU: + return OPVCC(31, 373, 0, 0) /* lwaux */ + + case AMOVB, AMOVBZ: + return OPVCC(31, 87, 0, 0) /* lbzx */ + + case AMOVBU, AMOVBZU: + return OPVCC(31, 119, 0, 0) /* lbzux */ + case AFMOVD: + return OPVCC(31, 599, 0, 0) /* lfdx */ + case AFMOVDU: + return OPVCC(31, 631, 0, 0) /* lfdux */ + case AFMOVS: + return OPVCC(31, 535, 0, 0) /* lfsx */ + case AFMOVSU: + return OPVCC(31, 567, 0, 0) /* lfsux */ + case AFMOVSX: + return OPVCC(31, 855, 0, 0) /* lfiwax - power6, isa 2.05 */ + case AFMOVSZ: + return OPVCC(31, 887, 0, 0) /* lfiwzx - power7, isa 2.06 */ + case AMOVH: + return OPVCC(31, 343, 0, 0) /* lhax */ + case AMOVHU: + return OPVCC(31, 375, 0, 0) /* lhaux */ + case AMOVHBR: + return OPVCC(31, 790, 0, 0) /* lhbrx */ + case AMOVWBR: + return OPVCC(31, 534, 0, 0) /* lwbrx */ + case AMOVDBR: + return OPVCC(31, 532, 0, 0) /* ldbrx */ + case AMOVHZ: + return OPVCC(31, 279, 0, 0) /* lhzx */ + case AMOVHZU: + return OPVCC(31, 311, 0, 0) /* lhzux */ + case ALBAR: + return OPVCC(31, 52, 0, 0) /* lbarx */ + case ALHAR: + return OPVCC(31, 116, 0, 0) /* lharx */ + case ALWAR: + return OPVCC(31, 20, 0, 0) /* lwarx */ + case ALDAR: + return OPVCC(31, 84, 0, 0) /* ldarx */ + case ALSW: + return OPVCC(31, 533, 0, 0) /* lswx */ + case AMOVD: + return OPVCC(31, 21, 0, 0) /* ldx */ + case AMOVDU: + return OPVCC(31, 53, 0, 0) /* ldux */ + + /* Vector (VMX/Altivec) instructions */ + case ALVEBX: + return OPVCC(31, 7, 0, 0) /* lvebx - v2.03 */ + case ALVEHX: + return OPVCC(31, 39, 0, 0) /* lvehx - v2.03 */ + case ALVEWX: + return OPVCC(31, 71, 0, 0) /* lvewx - v2.03 */ + case ALVX: + return OPVCC(31, 103, 0, 0) /* lvx - v2.03 */ + case ALVXL: + return OPVCC(31, 359, 0, 0) /* lvxl - v2.03 */ + case ALVSL: + return OPVCC(31, 6, 0, 0) /* lvsl - v2.03 */ + case ALVSR: + return OPVCC(31, 38, 0, 0) /* lvsr - v2.03 */ + /* End of vector instructions */ + + /* Vector scalar (VSX) instructions */ + case ALXVX: + return OPVXX1(31, 268, 0) /* lxvx - ISA v3.0 */ + case ALXVD2X: + return OPVXX1(31, 844, 0) /* lxvd2x - v2.06 */ + case ALXVW4X: + return OPVXX1(31, 780, 0) /* lxvw4x - v2.06 */ + case ALXVH8X: + return OPVXX1(31, 812, 0) /* lxvh8x - v3.00 */ + case ALXVB16X: + return OPVXX1(31, 876, 0) /* lxvb16x - v3.00 */ + case ALXVDSX: + return OPVXX1(31, 332, 0) /* lxvdsx - v2.06 */ + case ALXSDX: + return OPVXX1(31, 588, 0) /* lxsdx - v2.06 */ + case ALXSIWAX: + return OPVXX1(31, 76, 0) /* lxsiwax - v2.07 */ + case ALXSIWZX: + return OPVXX1(31, 12, 0) /* lxsiwzx - v2.07 */ + } + + c.ctxt.Diag("bad loadx opcode %v", a) + return 0 +} + +/* + * store s,o(d) + */ +func (c *ctxt9) opstore(a obj.As) uint32 { + switch a { + case AMOVB, AMOVBZ: + return OPVCC(38, 0, 0, 0) /* stb */ + + case AMOVBU, AMOVBZU: + return OPVCC(39, 0, 0, 0) /* stbu */ + case AFMOVD: + return OPVCC(54, 0, 0, 0) /* stfd */ + case AFMOVDU: + return OPVCC(55, 0, 0, 0) /* stfdu */ + case AFMOVS: + return OPVCC(52, 0, 0, 0) /* stfs */ + case AFMOVSU: + return OPVCC(53, 0, 0, 0) /* stfsu */ + + case AMOVHZ, AMOVH: + return OPVCC(44, 0, 0, 0) /* sth */ + + case AMOVHZU, AMOVHU: + return OPVCC(45, 0, 0, 0) /* sthu */ + case AMOVMW: + return OPVCC(47, 0, 0, 0) /* stmw */ + case ASTSW: + return OPVCC(31, 725, 0, 0) /* stswi */ + + case AMOVWZ, AMOVW: + return OPVCC(36, 0, 0, 0) /* stw */ + + case AMOVWZU, AMOVWU: + return OPVCC(37, 0, 0, 0) /* stwu */ + case AMOVD: + return OPVCC(62, 0, 0, 0) /* std */ + case AMOVDU: + return OPVCC(62, 0, 0, 1) /* stdu */ + case ASTXV: + return OPDQ(61, 5, 0) /* stxv ISA 3.0 */ + case ASTXVL: + return OPVXX1(31, 397, 0) /* stxvl ISA 3.0 */ + case ASTXVLL: + return OPVXX1(31, 429, 0) /* stxvll ISA 3.0 */ + case ASTXVX: + return OPVXX1(31, 396, 0) /* stxvx - ISA v3.0 */ + + } + + c.ctxt.Diag("unknown store opcode %v", a) + return 0 +} + +/* + * indexed store s,a(b) + */ +func (c *ctxt9) opstorex(a obj.As) uint32 { + switch a { + case AMOVB, AMOVBZ: + return OPVCC(31, 215, 0, 0) /* stbx */ + + case AMOVBU, AMOVBZU: + return OPVCC(31, 247, 0, 0) /* stbux */ + case AFMOVD: + return OPVCC(31, 727, 0, 0) /* stfdx */ + case AFMOVDU: + return OPVCC(31, 759, 0, 0) /* stfdux */ + case AFMOVS: + return OPVCC(31, 663, 0, 0) /* stfsx */ + case AFMOVSU: + return OPVCC(31, 695, 0, 0) /* stfsux */ + case AFMOVSX: + return OPVCC(31, 983, 0, 0) /* stfiwx */ + + case AMOVHZ, AMOVH: + return OPVCC(31, 407, 0, 0) /* sthx */ + case AMOVHBR: + return OPVCC(31, 918, 0, 0) /* sthbrx */ + + case AMOVHZU, AMOVHU: + return OPVCC(31, 439, 0, 0) /* sthux */ + + case AMOVWZ, AMOVW: + return OPVCC(31, 151, 0, 0) /* stwx */ + + case AMOVWZU, AMOVWU: + return OPVCC(31, 183, 0, 0) /* stwux */ + case ASTSW: + return OPVCC(31, 661, 0, 0) /* stswx */ + case AMOVWBR: + return OPVCC(31, 662, 0, 0) /* stwbrx */ + case AMOVDBR: + return OPVCC(31, 660, 0, 0) /* stdbrx */ + case ASTBCCC: + return OPVCC(31, 694, 0, 1) /* stbcx. */ + case ASTHCCC: + return OPVCC(31, 726, 0, 1) /* sthcx. */ + case ASTWCCC: + return OPVCC(31, 150, 0, 1) /* stwcx. */ + case ASTDCCC: + return OPVCC(31, 214, 0, 1) /* stwdx. */ + case AMOVD: + return OPVCC(31, 149, 0, 0) /* stdx */ + case AMOVDU: + return OPVCC(31, 181, 0, 0) /* stdux */ + + /* Vector (VMX/Altivec) instructions */ + case ASTVEBX: + return OPVCC(31, 135, 0, 0) /* stvebx - v2.03 */ + case ASTVEHX: + return OPVCC(31, 167, 0, 0) /* stvehx - v2.03 */ + case ASTVEWX: + return OPVCC(31, 199, 0, 0) /* stvewx - v2.03 */ + case ASTVX: + return OPVCC(31, 231, 0, 0) /* stvx - v2.03 */ + case ASTVXL: + return OPVCC(31, 487, 0, 0) /* stvxl - v2.03 */ + /* End of vector instructions */ + + /* Vector scalar (VSX) instructions */ + case ASTXVX: + return OPVXX1(31, 396, 0) /* stxvx - v3.0 */ + case ASTXVD2X: + return OPVXX1(31, 972, 0) /* stxvd2x - v2.06 */ + case ASTXVW4X: + return OPVXX1(31, 908, 0) /* stxvw4x - v2.06 */ + case ASTXVH8X: + return OPVXX1(31, 940, 0) /* stxvh8x - v3.0 */ + case ASTXVB16X: + return OPVXX1(31, 1004, 0) /* stxvb16x - v3.0 */ + + case ASTXSDX: + return OPVXX1(31, 716, 0) /* stxsdx - v2.06 */ + + case ASTXSIWX: + return OPVXX1(31, 140, 0) /* stxsiwx - v2.07 */ + + /* End of vector scalar instructions */ + + } + + c.ctxt.Diag("unknown storex opcode %v", a) + return 0 +} |