summaryrefslogtreecommitdiffstats
path: root/src/cmd/internal/obj/wasm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:14:23 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:14:23 +0000
commit73df946d56c74384511a194dd01dbe099584fd1a (patch)
treefd0bcea490dd81327ddfbb31e215439672c9a068 /src/cmd/internal/obj/wasm
parentInitial commit. (diff)
downloadgolang-1.16-73df946d56c74384511a194dd01dbe099584fd1a.tar.xz
golang-1.16-73df946d56c74384511a194dd01dbe099584fd1a.zip
Adding upstream version 1.16.10.upstream/1.16.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cmd/internal/obj/wasm')
-rw-r--r--src/cmd/internal/obj/wasm/a.out.go331
-rw-r--r--src/cmd/internal/obj/wasm/anames.go208
-rw-r--r--src/cmd/internal/obj/wasm/wasmobj.go1185
3 files changed, 1724 insertions, 0 deletions
diff --git a/src/cmd/internal/obj/wasm/a.out.go b/src/cmd/internal/obj/wasm/a.out.go
new file mode 100644
index 0000000..72ecaa9
--- /dev/null
+++ b/src/cmd/internal/obj/wasm/a.out.go
@@ -0,0 +1,331 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import "cmd/internal/obj"
+
+//go:generate go run ../stringer.go -i $GOFILE -o anames.go -p wasm
+
+const (
+ /* mark flags */
+ DONE = 1 << iota
+ PRESERVEFLAGS // not allowed to clobber flags
+)
+
+/*
+ * wasm
+ */
+const (
+ ACallImport = obj.ABaseWasm + obj.A_ARCHSPECIFIC + iota
+ AGet
+ ASet
+ ATee
+ ANot // alias for I32Eqz
+
+ // The following are low-level WebAssembly instructions.
+ // Their order matters, since it matches the opcode encoding.
+ // Gaps in the encoding are indicated by comments.
+
+ AUnreachable // opcode 0x00
+ ANop
+ ABlock
+ ALoop
+ AIf
+ AElse
+
+ AEnd // opcode 0x0B
+ ABr
+ ABrIf
+ ABrTable
+ // ACall and AReturn are WebAssembly instructions. obj.ACALL and obj.ARET are higher level instructions
+ // with Go semantics, e.g. they manipulate the Go stack on the linear memory.
+ AReturn
+ ACall
+ ACallIndirect
+
+ ADrop // opcode 0x1A
+ ASelect
+
+ ALocalGet // opcode 0x20
+ ALocalSet
+ ALocalTee
+ AGlobalGet
+ AGlobalSet
+
+ AI32Load // opcode 0x28
+ AI64Load
+ AF32Load
+ AF64Load
+ AI32Load8S
+ AI32Load8U
+ AI32Load16S
+ AI32Load16U
+ AI64Load8S
+ AI64Load8U
+ AI64Load16S
+ AI64Load16U
+ AI64Load32S
+ AI64Load32U
+ AI32Store
+ AI64Store
+ AF32Store
+ AF64Store
+ AI32Store8
+ AI32Store16
+ AI64Store8
+ AI64Store16
+ AI64Store32
+ ACurrentMemory
+ AGrowMemory
+
+ AI32Const
+ AI64Const
+ AF32Const
+ AF64Const
+
+ AI32Eqz
+ AI32Eq
+ AI32Ne
+ AI32LtS
+ AI32LtU
+ AI32GtS
+ AI32GtU
+ AI32LeS
+ AI32LeU
+ AI32GeS
+ AI32GeU
+
+ AI64Eqz
+ AI64Eq
+ AI64Ne
+ AI64LtS
+ AI64LtU
+ AI64GtS
+ AI64GtU
+ AI64LeS
+ AI64LeU
+ AI64GeS
+ AI64GeU
+
+ AF32Eq
+ AF32Ne
+ AF32Lt
+ AF32Gt
+ AF32Le
+ AF32Ge
+
+ AF64Eq
+ AF64Ne
+ AF64Lt
+ AF64Gt
+ AF64Le
+ AF64Ge
+
+ AI32Clz
+ AI32Ctz
+ AI32Popcnt
+ AI32Add
+ AI32Sub
+ AI32Mul
+ AI32DivS
+ AI32DivU
+ AI32RemS
+ AI32RemU
+ AI32And
+ AI32Or
+ AI32Xor
+ AI32Shl
+ AI32ShrS
+ AI32ShrU
+ AI32Rotl
+ AI32Rotr
+
+ AI64Clz
+ AI64Ctz
+ AI64Popcnt
+ AI64Add
+ AI64Sub
+ AI64Mul
+ AI64DivS
+ AI64DivU
+ AI64RemS
+ AI64RemU
+ AI64And
+ AI64Or
+ AI64Xor
+ AI64Shl
+ AI64ShrS
+ AI64ShrU
+ AI64Rotl
+ AI64Rotr
+
+ AF32Abs
+ AF32Neg
+ AF32Ceil
+ AF32Floor
+ AF32Trunc
+ AF32Nearest
+ AF32Sqrt
+ AF32Add
+ AF32Sub
+ AF32Mul
+ AF32Div
+ AF32Min
+ AF32Max
+ AF32Copysign
+
+ AF64Abs
+ AF64Neg
+ AF64Ceil
+ AF64Floor
+ AF64Trunc
+ AF64Nearest
+ AF64Sqrt
+ AF64Add
+ AF64Sub
+ AF64Mul
+ AF64Div
+ AF64Min
+ AF64Max
+ AF64Copysign
+
+ AI32WrapI64
+ AI32TruncF32S
+ AI32TruncF32U
+ AI32TruncF64S
+ AI32TruncF64U
+ AI64ExtendI32S
+ AI64ExtendI32U
+ AI64TruncF32S
+ AI64TruncF32U
+ AI64TruncF64S
+ AI64TruncF64U
+ AF32ConvertI32S
+ AF32ConvertI32U
+ AF32ConvertI64S
+ AF32ConvertI64U
+ AF32DemoteF64
+ AF64ConvertI32S
+ AF64ConvertI32U
+ AF64ConvertI64S
+ AF64ConvertI64U
+ AF64PromoteF32
+ AI32ReinterpretF32
+ AI64ReinterpretF64
+ AF32ReinterpretI32
+ AF64ReinterpretI64
+ AI32Extend8S
+ AI32Extend16S
+ AI64Extend8S
+ AI64Extend16S
+ AI64Extend32S
+
+ AI32TruncSatF32S // opcode 0xFC 0x00
+ AI32TruncSatF32U
+ AI32TruncSatF64S
+ AI32TruncSatF64U
+ AI64TruncSatF32S
+ AI64TruncSatF32U
+ AI64TruncSatF64S
+ AI64TruncSatF64U
+
+ ALast // Sentinel: End of low-level WebAssembly instructions.
+
+ ARESUMEPOINT
+ // ACALLNORESUME is a call which is not followed by a resume point.
+ // It is allowed inside of WebAssembly blocks, whereas obj.ACALL is not.
+ // However, it is not allowed to switch goroutines while inside of an ACALLNORESUME call.
+ ACALLNORESUME
+
+ ARETUNWIND
+
+ AMOVB
+ AMOVH
+ AMOVW
+ AMOVD
+
+ AWORD
+ ALAST
+)
+
+const (
+ REG_NONE = 0
+)
+
+const (
+ // globals
+ REG_SP = obj.RBaseWasm + iota // SP is currently 32-bit, until 64-bit memory operations are available
+ REG_CTXT
+ REG_g
+ // RET* are used by runtime.return0 and runtime.reflectcall. These functions pass return values in registers.
+ REG_RET0
+ REG_RET1
+ REG_RET2
+ REG_RET3
+ REG_PAUSE
+
+ // i32 locals
+ REG_R0
+ REG_R1
+ REG_R2
+ REG_R3
+ REG_R4
+ REG_R5
+ REG_R6
+ REG_R7
+ REG_R8
+ REG_R9
+ REG_R10
+ REG_R11
+ REG_R12
+ REG_R13
+ REG_R14
+ REG_R15
+
+ // f32 locals
+ REG_F0
+ REG_F1
+ REG_F2
+ REG_F3
+ REG_F4
+ REG_F5
+ REG_F6
+ REG_F7
+ REG_F8
+ REG_F9
+ REG_F10
+ REG_F11
+ REG_F12
+ REG_F13
+ REG_F14
+ REG_F15
+
+ // f64 locals
+ REG_F16
+ REG_F17
+ REG_F18
+ REG_F19
+ REG_F20
+ REG_F21
+ REG_F22
+ REG_F23
+ REG_F24
+ REG_F25
+ REG_F26
+ REG_F27
+ REG_F28
+ REG_F29
+ REG_F30
+ REG_F31
+
+ REG_PC_B // also first parameter, i32
+
+ MAXREG
+
+ MINREG = REG_SP
+ REGSP = REG_SP
+ REGCTXT = REG_CTXT
+ REGG = REG_g
+)
diff --git a/src/cmd/internal/obj/wasm/anames.go b/src/cmd/internal/obj/wasm/anames.go
new file mode 100644
index 0000000..9412384
--- /dev/null
+++ b/src/cmd/internal/obj/wasm/anames.go
@@ -0,0 +1,208 @@
+// Code generated by stringer -i a.out.go -o anames.go -p wasm; DO NOT EDIT.
+
+package wasm
+
+import "cmd/internal/obj"
+
+var Anames = []string{
+ obj.A_ARCHSPECIFIC: "CallImport",
+ "Get",
+ "Set",
+ "Tee",
+ "Not",
+ "Unreachable",
+ "Nop",
+ "Block",
+ "Loop",
+ "If",
+ "Else",
+ "End",
+ "Br",
+ "BrIf",
+ "BrTable",
+ "Return",
+ "Call",
+ "CallIndirect",
+ "Drop",
+ "Select",
+ "LocalGet",
+ "LocalSet",
+ "LocalTee",
+ "GlobalGet",
+ "GlobalSet",
+ "I32Load",
+ "I64Load",
+ "F32Load",
+ "F64Load",
+ "I32Load8S",
+ "I32Load8U",
+ "I32Load16S",
+ "I32Load16U",
+ "I64Load8S",
+ "I64Load8U",
+ "I64Load16S",
+ "I64Load16U",
+ "I64Load32S",
+ "I64Load32U",
+ "I32Store",
+ "I64Store",
+ "F32Store",
+ "F64Store",
+ "I32Store8",
+ "I32Store16",
+ "I64Store8",
+ "I64Store16",
+ "I64Store32",
+ "CurrentMemory",
+ "GrowMemory",
+ "I32Const",
+ "I64Const",
+ "F32Const",
+ "F64Const",
+ "I32Eqz",
+ "I32Eq",
+ "I32Ne",
+ "I32LtS",
+ "I32LtU",
+ "I32GtS",
+ "I32GtU",
+ "I32LeS",
+ "I32LeU",
+ "I32GeS",
+ "I32GeU",
+ "I64Eqz",
+ "I64Eq",
+ "I64Ne",
+ "I64LtS",
+ "I64LtU",
+ "I64GtS",
+ "I64GtU",
+ "I64LeS",
+ "I64LeU",
+ "I64GeS",
+ "I64GeU",
+ "F32Eq",
+ "F32Ne",
+ "F32Lt",
+ "F32Gt",
+ "F32Le",
+ "F32Ge",
+ "F64Eq",
+ "F64Ne",
+ "F64Lt",
+ "F64Gt",
+ "F64Le",
+ "F64Ge",
+ "I32Clz",
+ "I32Ctz",
+ "I32Popcnt",
+ "I32Add",
+ "I32Sub",
+ "I32Mul",
+ "I32DivS",
+ "I32DivU",
+ "I32RemS",
+ "I32RemU",
+ "I32And",
+ "I32Or",
+ "I32Xor",
+ "I32Shl",
+ "I32ShrS",
+ "I32ShrU",
+ "I32Rotl",
+ "I32Rotr",
+ "I64Clz",
+ "I64Ctz",
+ "I64Popcnt",
+ "I64Add",
+ "I64Sub",
+ "I64Mul",
+ "I64DivS",
+ "I64DivU",
+ "I64RemS",
+ "I64RemU",
+ "I64And",
+ "I64Or",
+ "I64Xor",
+ "I64Shl",
+ "I64ShrS",
+ "I64ShrU",
+ "I64Rotl",
+ "I64Rotr",
+ "F32Abs",
+ "F32Neg",
+ "F32Ceil",
+ "F32Floor",
+ "F32Trunc",
+ "F32Nearest",
+ "F32Sqrt",
+ "F32Add",
+ "F32Sub",
+ "F32Mul",
+ "F32Div",
+ "F32Min",
+ "F32Max",
+ "F32Copysign",
+ "F64Abs",
+ "F64Neg",
+ "F64Ceil",
+ "F64Floor",
+ "F64Trunc",
+ "F64Nearest",
+ "F64Sqrt",
+ "F64Add",
+ "F64Sub",
+ "F64Mul",
+ "F64Div",
+ "F64Min",
+ "F64Max",
+ "F64Copysign",
+ "I32WrapI64",
+ "I32TruncF32S",
+ "I32TruncF32U",
+ "I32TruncF64S",
+ "I32TruncF64U",
+ "I64ExtendI32S",
+ "I64ExtendI32U",
+ "I64TruncF32S",
+ "I64TruncF32U",
+ "I64TruncF64S",
+ "I64TruncF64U",
+ "F32ConvertI32S",
+ "F32ConvertI32U",
+ "F32ConvertI64S",
+ "F32ConvertI64U",
+ "F32DemoteF64",
+ "F64ConvertI32S",
+ "F64ConvertI32U",
+ "F64ConvertI64S",
+ "F64ConvertI64U",
+ "F64PromoteF32",
+ "I32ReinterpretF32",
+ "I64ReinterpretF64",
+ "F32ReinterpretI32",
+ "F64ReinterpretI64",
+ "I32Extend8S",
+ "I32Extend16S",
+ "I64Extend8S",
+ "I64Extend16S",
+ "I64Extend32S",
+ "I32TruncSatF32S",
+ "I32TruncSatF32U",
+ "I32TruncSatF64S",
+ "I32TruncSatF64U",
+ "I64TruncSatF32S",
+ "I64TruncSatF32U",
+ "I64TruncSatF64S",
+ "I64TruncSatF64U",
+ "Last",
+ "RESUMEPOINT",
+ "CALLNORESUME",
+ "RETUNWIND",
+ "MOVB",
+ "MOVH",
+ "MOVW",
+ "MOVD",
+ "WORD",
+ "LAST",
+}
diff --git a/src/cmd/internal/obj/wasm/wasmobj.go b/src/cmd/internal/obj/wasm/wasmobj.go
new file mode 100644
index 0000000..2e9890d
--- /dev/null
+++ b/src/cmd/internal/obj/wasm/wasmobj.go
@@ -0,0 +1,1185 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import (
+ "bytes"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+)
+
+var Register = map[string]int16{
+ "SP": REG_SP,
+ "CTXT": REG_CTXT,
+ "g": REG_g,
+ "RET0": REG_RET0,
+ "RET1": REG_RET1,
+ "RET2": REG_RET2,
+ "RET3": REG_RET3,
+ "PAUSE": REG_PAUSE,
+
+ "R0": REG_R0,
+ "R1": REG_R1,
+ "R2": REG_R2,
+ "R3": REG_R3,
+ "R4": REG_R4,
+ "R5": REG_R5,
+ "R6": REG_R6,
+ "R7": REG_R7,
+ "R8": REG_R8,
+ "R9": REG_R9,
+ "R10": REG_R10,
+ "R11": REG_R11,
+ "R12": REG_R12,
+ "R13": REG_R13,
+ "R14": REG_R14,
+ "R15": REG_R15,
+
+ "F0": REG_F0,
+ "F1": REG_F1,
+ "F2": REG_F2,
+ "F3": REG_F3,
+ "F4": REG_F4,
+ "F5": REG_F5,
+ "F6": REG_F6,
+ "F7": REG_F7,
+ "F8": REG_F8,
+ "F9": REG_F9,
+ "F10": REG_F10,
+ "F11": REG_F11,
+ "F12": REG_F12,
+ "F13": REG_F13,
+ "F14": REG_F14,
+ "F15": REG_F15,
+
+ "F16": REG_F16,
+ "F17": REG_F17,
+ "F18": REG_F18,
+ "F19": REG_F19,
+ "F20": REG_F20,
+ "F21": REG_F21,
+ "F22": REG_F22,
+ "F23": REG_F23,
+ "F24": REG_F24,
+ "F25": REG_F25,
+ "F26": REG_F26,
+ "F27": REG_F27,
+ "F28": REG_F28,
+ "F29": REG_F29,
+ "F30": REG_F30,
+ "F31": REG_F31,
+
+ "PC_B": REG_PC_B,
+}
+
+var registerNames []string
+
+func init() {
+ obj.RegisterRegister(MINREG, MAXREG, rconv)
+ obj.RegisterOpcode(obj.ABaseWasm, Anames)
+
+ registerNames = make([]string, MAXREG-MINREG)
+ for name, reg := range Register {
+ registerNames[reg-MINREG] = name
+ }
+}
+
+func rconv(r int) string {
+ return registerNames[r-MINREG]
+}
+
+var unaryDst = map[obj.As]bool{
+ ASet: true,
+ ATee: true,
+ ACall: true,
+ ACallIndirect: true,
+ ACallImport: true,
+ ABr: true,
+ ABrIf: true,
+ ABrTable: true,
+ AI32Store: true,
+ AI64Store: true,
+ AF32Store: true,
+ AF64Store: true,
+ AI32Store8: true,
+ AI32Store16: true,
+ AI64Store8: true,
+ AI64Store16: true,
+ AI64Store32: true,
+ ACALLNORESUME: true,
+}
+
+var Linkwasm = obj.LinkArch{
+ Arch: sys.ArchWasm,
+ Init: instinit,
+ Preprocess: preprocess,
+ Assemble: assemble,
+ UnaryDst: unaryDst,
+}
+
+var (
+ morestack *obj.LSym
+ morestackNoCtxt *obj.LSym
+ gcWriteBarrier *obj.LSym
+ sigpanic *obj.LSym
+ deferreturn *obj.LSym
+ jmpdefer *obj.LSym
+)
+
+const (
+ /* mark flags */
+ WasmImport = 1 << 0
+)
+
+func instinit(ctxt *obj.Link) {
+ morestack = ctxt.Lookup("runtime.morestack")
+ morestackNoCtxt = ctxt.Lookup("runtime.morestack_noctxt")
+ gcWriteBarrier = ctxt.LookupABI("runtime.gcWriteBarrier", obj.ABIInternal)
+ sigpanic = ctxt.LookupABI("runtime.sigpanic", obj.ABIInternal)
+ deferreturn = ctxt.LookupABI("runtime.deferreturn", obj.ABIInternal)
+ // jmpdefer is defined in assembly as ABI0, but what we're
+ // looking for is the *call* to jmpdefer from the Go function
+ // deferreturn, so we're looking for the ABIInternal version
+ // of jmpdefer that's called by Go.
+ jmpdefer = ctxt.LookupABI(`"".jmpdefer`, obj.ABIInternal)
+}
+
+func preprocess(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
+ appendp := func(p *obj.Prog, as obj.As, args ...obj.Addr) *obj.Prog {
+ if p.As != obj.ANOP {
+ p2 := obj.Appendp(p, newprog)
+ p2.Pc = p.Pc
+ p = p2
+ }
+ p.As = as
+ switch len(args) {
+ case 0:
+ p.From = obj.Addr{}
+ p.To = obj.Addr{}
+ case 1:
+ if unaryDst[as] {
+ p.From = obj.Addr{}
+ p.To = args[0]
+ } else {
+ p.From = args[0]
+ p.To = obj.Addr{}
+ }
+ case 2:
+ p.From = args[0]
+ p.To = args[1]
+ default:
+ panic("bad args")
+ }
+ return p
+ }
+
+ framesize := s.Func().Text.To.Offset
+ if framesize < 0 {
+ panic("bad framesize")
+ }
+ s.Func().Args = s.Func().Text.To.Val.(int32)
+ s.Func().Locals = int32(framesize)
+
+ if s.Func().Text.From.Sym.Wrapper() {
+ // if g._panic != nil && g._panic.argp == FP {
+ // g._panic.argp = bottom-of-frame
+ // }
+ //
+ // MOVD g_panic(g), R0
+ // Get R0
+ // I64Eqz
+ // Not
+ // If
+ // Get SP
+ // I64ExtendI32U
+ // I64Const $framesize+8
+ // I64Add
+ // I64Load panic_argp(R0)
+ // I64Eq
+ // If
+ // MOVD SP, panic_argp(R0)
+ // End
+ // End
+
+ gpanic := obj.Addr{
+ Type: obj.TYPE_MEM,
+ Reg: REGG,
+ Offset: 4 * 8, // g_panic
+ }
+
+ panicargp := obj.Addr{
+ Type: obj.TYPE_MEM,
+ Reg: REG_R0,
+ Offset: 0, // panic.argp
+ }
+
+ p := s.Func().Text
+ p = appendp(p, AMOVD, gpanic, regAddr(REG_R0))
+
+ p = appendp(p, AGet, regAddr(REG_R0))
+ p = appendp(p, AI64Eqz)
+ p = appendp(p, ANot)
+ p = appendp(p, AIf)
+
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI64ExtendI32U)
+ p = appendp(p, AI64Const, constAddr(framesize+8))
+ p = appendp(p, AI64Add)
+ p = appendp(p, AI64Load, panicargp)
+
+ p = appendp(p, AI64Eq)
+ p = appendp(p, AIf)
+ p = appendp(p, AMOVD, regAddr(REG_SP), panicargp)
+ p = appendp(p, AEnd)
+
+ p = appendp(p, AEnd)
+ }
+
+ if framesize > 0 {
+ p := s.Func().Text
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI32Const, constAddr(framesize))
+ p = appendp(p, AI32Sub)
+ p = appendp(p, ASet, regAddr(REG_SP))
+ p.Spadj = int32(framesize)
+ }
+
+ // Introduce resume points for CALL instructions
+ // and collect other explicit resume points.
+ numResumePoints := 0
+ explicitBlockDepth := 0
+ pc := int64(0) // pc is only incremented when necessary, this avoids bloat of the BrTable instruction
+ var tableIdxs []uint64
+ tablePC := int64(0)
+ base := ctxt.PosTable.Pos(s.Func().Text.Pos).Base()
+ for p := s.Func().Text; p != nil; p = p.Link {
+ prevBase := base
+ base = ctxt.PosTable.Pos(p.Pos).Base()
+ switch p.As {
+ case ABlock, ALoop, AIf:
+ explicitBlockDepth++
+
+ case AEnd:
+ if explicitBlockDepth == 0 {
+ panic("End without block")
+ }
+ explicitBlockDepth--
+
+ case ARESUMEPOINT:
+ if explicitBlockDepth != 0 {
+ panic("RESUME can only be used on toplevel")
+ }
+ p.As = AEnd
+ for tablePC <= pc {
+ tableIdxs = append(tableIdxs, uint64(numResumePoints))
+ tablePC++
+ }
+ numResumePoints++
+ pc++
+
+ case obj.ACALL:
+ if explicitBlockDepth != 0 {
+ panic("CALL can only be used on toplevel, try CALLNORESUME instead")
+ }
+ appendp(p, ARESUMEPOINT)
+ }
+
+ p.Pc = pc
+
+ // Increase pc whenever some pc-value table needs a new entry. Don't increase it
+ // more often to avoid bloat of the BrTable instruction.
+ // The "base != prevBase" condition detects inlined instructions. They are an
+ // implicit call, so entering and leaving this section affects the stack trace.
+ if p.As == ACALLNORESUME || p.As == obj.ANOP || p.As == ANop || p.Spadj != 0 || base != prevBase {
+ pc++
+ if p.To.Sym == sigpanic {
+ // The panic stack trace expects the PC at the call of sigpanic,
+ // not the next one. However, runtime.Caller subtracts 1 from the
+ // PC. To make both PC and PC-1 work (have the same line number),
+ // we advance the PC by 2 at sigpanic.
+ pc++
+ }
+ }
+ }
+ tableIdxs = append(tableIdxs, uint64(numResumePoints))
+ s.Size = pc + 1
+
+ if !s.Func().Text.From.Sym.NoSplit() {
+ p := s.Func().Text
+
+ if framesize <= objabi.StackSmall {
+ // small stack: SP <= stackguard
+ // Get SP
+ // Get g
+ // I32WrapI64
+ // I32Load $stackguard0
+ // I32GtU
+
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AGet, regAddr(REGG))
+ p = appendp(p, AI32WrapI64)
+ p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0
+ p = appendp(p, AI32LeU)
+ } else {
+ // large stack: SP-framesize <= stackguard-StackSmall
+ // SP <= stackguard+(framesize-StackSmall)
+ // Get SP
+ // Get g
+ // I32WrapI64
+ // I32Load $stackguard0
+ // I32Const $(framesize-StackSmall)
+ // I32Add
+ // I32GtU
+
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AGet, regAddr(REGG))
+ p = appendp(p, AI32WrapI64)
+ p = appendp(p, AI32Load, constAddr(2*int64(ctxt.Arch.PtrSize))) // G.stackguard0
+ p = appendp(p, AI32Const, constAddr(int64(framesize)-objabi.StackSmall))
+ p = appendp(p, AI32Add)
+ p = appendp(p, AI32LeU)
+ }
+ // TODO(neelance): handle wraparound case
+
+ p = appendp(p, AIf)
+ p = appendp(p, obj.ACALL, constAddr(0))
+ if s.Func().Text.From.Sym.NeedCtxt() {
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestack}
+ } else {
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: morestackNoCtxt}
+ }
+ p = appendp(p, AEnd)
+ }
+
+ // record the branches targeting the entry loop and the unwind exit,
+ // their targets with be filled in later
+ var entryPointLoopBranches []*obj.Prog
+ var unwindExitBranches []*obj.Prog
+ currentDepth := 0
+ for p := s.Func().Text; p != nil; p = p.Link {
+ switch p.As {
+ case ABlock, ALoop, AIf:
+ currentDepth++
+ case AEnd:
+ currentDepth--
+ }
+
+ switch p.As {
+ case obj.AJMP:
+ jmp := *p
+ p.As = obj.ANOP
+
+ if jmp.To.Type == obj.TYPE_BRANCH {
+ // jump to basic block
+ p = appendp(p, AI32Const, constAddr(jmp.To.Val.(*obj.Prog).Pc))
+ p = appendp(p, ASet, regAddr(REG_PC_B)) // write next basic block to PC_B
+ p = appendp(p, ABr) // jump to beginning of entryPointLoop
+ entryPointLoopBranches = append(entryPointLoopBranches, p)
+ break
+ }
+
+ // low-level WebAssembly call to function
+ switch jmp.To.Type {
+ case obj.TYPE_MEM:
+ if !notUsePC_B[jmp.To.Sym.Name] {
+ // Set PC_B parameter to function entry.
+ p = appendp(p, AI32Const, constAddr(0))
+ }
+ p = appendp(p, ACall, jmp.To)
+
+ case obj.TYPE_NONE:
+ // (target PC is on stack)
+ p = appendp(p, AI32WrapI64)
+ p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero
+ p = appendp(p, AI32ShrU)
+
+ // Set PC_B parameter to function entry.
+ // We need to push this before pushing the target PC_F,
+ // so temporarily pop PC_F, using our REG_PC_B as a
+ // scratch register, and push it back after pushing 0.
+ p = appendp(p, ASet, regAddr(REG_PC_B))
+ p = appendp(p, AI32Const, constAddr(0))
+ p = appendp(p, AGet, regAddr(REG_PC_B))
+
+ p = appendp(p, ACallIndirect)
+
+ default:
+ panic("bad target for JMP")
+ }
+
+ p = appendp(p, AReturn)
+
+ case obj.ACALL, ACALLNORESUME:
+ call := *p
+ p.As = obj.ANOP
+
+ pcAfterCall := call.Link.Pc
+ if call.To.Sym == sigpanic {
+ pcAfterCall-- // sigpanic expects to be called without advancing the pc
+ }
+
+ // jmpdefer manipulates the return address on the stack so deferreturn gets called repeatedly.
+ // Model this in WebAssembly with a loop.
+ if call.To.Sym == deferreturn {
+ p = appendp(p, ALoop)
+ }
+
+ // SP -= 8
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI32Const, constAddr(8))
+ p = appendp(p, AI32Sub)
+ p = appendp(p, ASet, regAddr(REG_SP))
+
+ // write return address to Go stack
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI64Const, obj.Addr{
+ Type: obj.TYPE_ADDR,
+ Name: obj.NAME_EXTERN,
+ Sym: s, // PC_F
+ Offset: pcAfterCall, // PC_B
+ })
+ p = appendp(p, AI64Store, constAddr(0))
+
+ // low-level WebAssembly call to function
+ switch call.To.Type {
+ case obj.TYPE_MEM:
+ if !notUsePC_B[call.To.Sym.Name] {
+ // Set PC_B parameter to function entry.
+ p = appendp(p, AI32Const, constAddr(0))
+ }
+ p = appendp(p, ACall, call.To)
+
+ case obj.TYPE_NONE:
+ // (target PC is on stack)
+ p = appendp(p, AI32WrapI64)
+ p = appendp(p, AI32Const, constAddr(16)) // only needs PC_F bits (16-31), PC_B bits (0-15) are zero
+ p = appendp(p, AI32ShrU)
+
+ // Set PC_B parameter to function entry.
+ // We need to push this before pushing the target PC_F,
+ // so temporarily pop PC_F, using our PC_B as a
+ // scratch register, and push it back after pushing 0.
+ p = appendp(p, ASet, regAddr(REG_PC_B))
+ p = appendp(p, AI32Const, constAddr(0))
+ p = appendp(p, AGet, regAddr(REG_PC_B))
+
+ p = appendp(p, ACallIndirect)
+
+ default:
+ panic("bad target for CALL")
+ }
+
+ // gcWriteBarrier has no return value, it never unwinds the stack
+ if call.To.Sym == gcWriteBarrier {
+ break
+ }
+
+ // jmpdefer removes the frame of deferreturn from the Go stack.
+ // However, its WebAssembly function still returns normally,
+ // so we need to return from deferreturn without removing its
+ // stack frame (no RET), because the frame is already gone.
+ if call.To.Sym == jmpdefer {
+ p = appendp(p, AReturn)
+ break
+ }
+
+ // return value of call is on the top of the stack, indicating whether to unwind the WebAssembly stack
+ if call.As == ACALLNORESUME && call.To.Sym != sigpanic { // sigpanic unwinds the stack, but it never resumes
+ // trying to unwind WebAssembly stack but call has no resume point, terminate with error
+ p = appendp(p, AIf)
+ p = appendp(p, obj.AUNDEF)
+ p = appendp(p, AEnd)
+ } else {
+ // unwinding WebAssembly stack to switch goroutine, return 1
+ p = appendp(p, ABrIf)
+ unwindExitBranches = append(unwindExitBranches, p)
+ }
+
+ // jump to before the call if jmpdefer has reset the return address to the call's PC
+ if call.To.Sym == deferreturn {
+ // get PC_B from -8(SP)
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI32Const, constAddr(8))
+ p = appendp(p, AI32Sub)
+ p = appendp(p, AI32Load16U, constAddr(0))
+ p = appendp(p, ATee, regAddr(REG_PC_B))
+
+ p = appendp(p, AI32Const, constAddr(call.Pc))
+ p = appendp(p, AI32Eq)
+ p = appendp(p, ABrIf, constAddr(0))
+ p = appendp(p, AEnd) // end of Loop
+ }
+
+ case obj.ARET, ARETUNWIND:
+ ret := *p
+ p.As = obj.ANOP
+
+ if framesize > 0 {
+ // SP += framesize
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI32Const, constAddr(framesize))
+ p = appendp(p, AI32Add)
+ p = appendp(p, ASet, regAddr(REG_SP))
+ // TODO(neelance): This should theoretically set Spadj, but it only works without.
+ // p.Spadj = int32(-framesize)
+ }
+
+ if ret.To.Type == obj.TYPE_MEM {
+ // Set PC_B parameter to function entry.
+ p = appendp(p, AI32Const, constAddr(0))
+
+ // low-level WebAssembly call to function
+ p = appendp(p, ACall, ret.To)
+ p = appendp(p, AReturn)
+ break
+ }
+
+ // SP += 8
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, AI32Const, constAddr(8))
+ p = appendp(p, AI32Add)
+ p = appendp(p, ASet, regAddr(REG_SP))
+
+ if ret.As == ARETUNWIND {
+ // function needs to unwind the WebAssembly stack, return 1
+ p = appendp(p, AI32Const, constAddr(1))
+ p = appendp(p, AReturn)
+ break
+ }
+
+ // not unwinding the WebAssembly stack, return 0
+ p = appendp(p, AI32Const, constAddr(0))
+ p = appendp(p, AReturn)
+ }
+ }
+
+ for p := s.Func().Text; p != nil; p = p.Link {
+ switch p.From.Name {
+ case obj.NAME_AUTO:
+ p.From.Offset += int64(framesize)
+ case obj.NAME_PARAM:
+ p.From.Reg = REG_SP
+ p.From.Offset += int64(framesize) + 8 // parameters are after the frame and the 8-byte return address
+ }
+
+ switch p.To.Name {
+ case obj.NAME_AUTO:
+ p.To.Offset += int64(framesize)
+ case obj.NAME_PARAM:
+ p.To.Reg = REG_SP
+ p.To.Offset += int64(framesize) + 8 // parameters are after the frame and the 8-byte return address
+ }
+
+ switch p.As {
+ case AGet:
+ if p.From.Type == obj.TYPE_ADDR {
+ get := *p
+ p.As = obj.ANOP
+
+ switch get.From.Name {
+ case obj.NAME_EXTERN:
+ p = appendp(p, AI64Const, get.From)
+ case obj.NAME_AUTO, obj.NAME_PARAM:
+ p = appendp(p, AGet, regAddr(get.From.Reg))
+ if get.From.Reg == REG_SP {
+ p = appendp(p, AI64ExtendI32U)
+ }
+ if get.From.Offset != 0 {
+ p = appendp(p, AI64Const, constAddr(get.From.Offset))
+ p = appendp(p, AI64Add)
+ }
+ default:
+ panic("bad Get: invalid name")
+ }
+ }
+
+ case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U:
+ if p.From.Type == obj.TYPE_MEM {
+ as := p.As
+ from := p.From
+
+ p.As = AGet
+ p.From = regAddr(from.Reg)
+
+ if from.Reg != REG_SP {
+ p = appendp(p, AI32WrapI64)
+ }
+
+ p = appendp(p, as, constAddr(from.Offset))
+ }
+
+ case AMOVB, AMOVH, AMOVW, AMOVD:
+ mov := *p
+ p.As = obj.ANOP
+
+ var loadAs obj.As
+ var storeAs obj.As
+ switch mov.As {
+ case AMOVB:
+ loadAs = AI64Load8U
+ storeAs = AI64Store8
+ case AMOVH:
+ loadAs = AI64Load16U
+ storeAs = AI64Store16
+ case AMOVW:
+ loadAs = AI64Load32U
+ storeAs = AI64Store32
+ case AMOVD:
+ loadAs = AI64Load
+ storeAs = AI64Store
+ }
+
+ appendValue := func() {
+ switch mov.From.Type {
+ case obj.TYPE_CONST:
+ p = appendp(p, AI64Const, constAddr(mov.From.Offset))
+
+ case obj.TYPE_ADDR:
+ switch mov.From.Name {
+ case obj.NAME_NONE, obj.NAME_PARAM, obj.NAME_AUTO:
+ p = appendp(p, AGet, regAddr(mov.From.Reg))
+ if mov.From.Reg == REG_SP {
+ p = appendp(p, AI64ExtendI32U)
+ }
+ p = appendp(p, AI64Const, constAddr(mov.From.Offset))
+ p = appendp(p, AI64Add)
+ case obj.NAME_EXTERN:
+ p = appendp(p, AI64Const, mov.From)
+ default:
+ panic("bad name for MOV")
+ }
+
+ case obj.TYPE_REG:
+ p = appendp(p, AGet, mov.From)
+ if mov.From.Reg == REG_SP {
+ p = appendp(p, AI64ExtendI32U)
+ }
+
+ case obj.TYPE_MEM:
+ p = appendp(p, AGet, regAddr(mov.From.Reg))
+ if mov.From.Reg != REG_SP {
+ p = appendp(p, AI32WrapI64)
+ }
+ p = appendp(p, loadAs, constAddr(mov.From.Offset))
+
+ default:
+ panic("bad MOV type")
+ }
+ }
+
+ switch mov.To.Type {
+ case obj.TYPE_REG:
+ appendValue()
+ if mov.To.Reg == REG_SP {
+ p = appendp(p, AI32WrapI64)
+ }
+ p = appendp(p, ASet, mov.To)
+
+ case obj.TYPE_MEM:
+ switch mov.To.Name {
+ case obj.NAME_NONE, obj.NAME_PARAM:
+ p = appendp(p, AGet, regAddr(mov.To.Reg))
+ if mov.To.Reg != REG_SP {
+ p = appendp(p, AI32WrapI64)
+ }
+ case obj.NAME_EXTERN:
+ p = appendp(p, AI32Const, obj.Addr{Type: obj.TYPE_ADDR, Name: obj.NAME_EXTERN, Sym: mov.To.Sym})
+ default:
+ panic("bad MOV name")
+ }
+ appendValue()
+ p = appendp(p, storeAs, constAddr(mov.To.Offset))
+
+ default:
+ panic("bad MOV type")
+ }
+
+ case ACallImport:
+ p.As = obj.ANOP
+ p = appendp(p, AGet, regAddr(REG_SP))
+ p = appendp(p, ACall, obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: s})
+ p.Mark = WasmImport
+ }
+ }
+
+ {
+ p := s.Func().Text
+ if len(unwindExitBranches) > 0 {
+ p = appendp(p, ABlock) // unwindExit, used to return 1 when unwinding the stack
+ for _, b := range unwindExitBranches {
+ b.To = obj.Addr{Type: obj.TYPE_BRANCH, Val: p}
+ }
+ }
+ if len(entryPointLoopBranches) > 0 {
+ p = appendp(p, ALoop) // entryPointLoop, used to jump between basic blocks
+ for _, b := range entryPointLoopBranches {
+ b.To = obj.Addr{Type: obj.TYPE_BRANCH, Val: p}
+ }
+ }
+ if numResumePoints > 0 {
+ // Add Block instructions for resume points and BrTable to jump to selected resume point.
+ for i := 0; i < numResumePoints+1; i++ {
+ p = appendp(p, ABlock)
+ }
+ p = appendp(p, AGet, regAddr(REG_PC_B)) // read next basic block from PC_B
+ p = appendp(p, ABrTable, obj.Addr{Val: tableIdxs})
+ p = appendp(p, AEnd) // end of Block
+ }
+ for p.Link != nil {
+ p = p.Link // function instructions
+ }
+ if len(entryPointLoopBranches) > 0 {
+ p = appendp(p, AEnd) // end of entryPointLoop
+ }
+ p = appendp(p, obj.AUNDEF)
+ if len(unwindExitBranches) > 0 {
+ p = appendp(p, AEnd) // end of unwindExit
+ p = appendp(p, AI32Const, constAddr(1))
+ }
+ }
+
+ currentDepth = 0
+ blockDepths := make(map[*obj.Prog]int)
+ for p := s.Func().Text; p != nil; p = p.Link {
+ switch p.As {
+ case ABlock, ALoop, AIf:
+ currentDepth++
+ blockDepths[p] = currentDepth
+ case AEnd:
+ currentDepth--
+ }
+
+ switch p.As {
+ case ABr, ABrIf:
+ if p.To.Type == obj.TYPE_BRANCH {
+ blockDepth, ok := blockDepths[p.To.Val.(*obj.Prog)]
+ if !ok {
+ panic("label not at block")
+ }
+ p.To = constAddr(int64(currentDepth - blockDepth))
+ }
+ }
+ }
+}
+
+func constAddr(value int64) obj.Addr {
+ return obj.Addr{Type: obj.TYPE_CONST, Offset: value}
+}
+
+func regAddr(reg int16) obj.Addr {
+ return obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+// Most of the Go functions has a single parameter (PC_B) in
+// Wasm ABI. This is a list of exceptions.
+var notUsePC_B = map[string]bool{
+ "_rt0_wasm_js": true,
+ "wasm_export_run": true,
+ "wasm_export_resume": true,
+ "wasm_export_getsp": true,
+ "wasm_pc_f_loop": true,
+ "runtime.wasmMove": true,
+ "runtime.wasmZero": true,
+ "runtime.wasmDiv": true,
+ "runtime.wasmTruncS": true,
+ "runtime.wasmTruncU": true,
+ "runtime.gcWriteBarrier": true,
+ "cmpbody": true,
+ "memeqbody": true,
+ "memcmp": true,
+ "memchr": true,
+}
+
+func assemble(ctxt *obj.Link, s *obj.LSym, newprog obj.ProgAlloc) {
+ type regVar struct {
+ global bool
+ index uint64
+ }
+
+ type varDecl struct {
+ count uint64
+ typ valueType
+ }
+
+ hasLocalSP := false
+ regVars := [MAXREG - MINREG]*regVar{
+ REG_SP - MINREG: {true, 0},
+ REG_CTXT - MINREG: {true, 1},
+ REG_g - MINREG: {true, 2},
+ REG_RET0 - MINREG: {true, 3},
+ REG_RET1 - MINREG: {true, 4},
+ REG_RET2 - MINREG: {true, 5},
+ REG_RET3 - MINREG: {true, 6},
+ REG_PAUSE - MINREG: {true, 7},
+ }
+ var varDecls []*varDecl
+ useAssemblyRegMap := func() {
+ for i := int16(0); i < 16; i++ {
+ regVars[REG_R0+i-MINREG] = &regVar{false, uint64(i)}
+ }
+ }
+
+ // Function starts with declaration of locals: numbers and types.
+ // Some functions use a special calling convention.
+ switch s.Name {
+ case "_rt0_wasm_js", "wasm_export_run", "wasm_export_resume", "wasm_export_getsp", "wasm_pc_f_loop",
+ "runtime.wasmMove", "runtime.wasmZero", "runtime.wasmDiv", "runtime.wasmTruncS", "runtime.wasmTruncU", "memeqbody":
+ varDecls = []*varDecl{}
+ useAssemblyRegMap()
+ case "memchr", "memcmp":
+ varDecls = []*varDecl{{count: 2, typ: i32}}
+ useAssemblyRegMap()
+ case "cmpbody":
+ varDecls = []*varDecl{{count: 2, typ: i64}}
+ useAssemblyRegMap()
+ case "runtime.gcWriteBarrier":
+ varDecls = []*varDecl{{count: 4, typ: i64}}
+ useAssemblyRegMap()
+ default:
+ // Normal calling convention: PC_B as WebAssembly parameter. First local variable is local SP cache.
+ regVars[REG_PC_B-MINREG] = &regVar{false, 0}
+ hasLocalSP = true
+
+ var regUsed [MAXREG - MINREG]bool
+ for p := s.Func().Text; p != nil; p = p.Link {
+ if p.From.Reg != 0 {
+ regUsed[p.From.Reg-MINREG] = true
+ }
+ if p.To.Reg != 0 {
+ regUsed[p.To.Reg-MINREG] = true
+ }
+ }
+
+ regs := []int16{REG_SP}
+ for reg := int16(REG_R0); reg <= REG_F31; reg++ {
+ if regUsed[reg-MINREG] {
+ regs = append(regs, reg)
+ }
+ }
+
+ var lastDecl *varDecl
+ for i, reg := range regs {
+ t := regType(reg)
+ if lastDecl == nil || lastDecl.typ != t {
+ lastDecl = &varDecl{
+ count: 0,
+ typ: t,
+ }
+ varDecls = append(varDecls, lastDecl)
+ }
+ lastDecl.count++
+ if reg != REG_SP {
+ regVars[reg-MINREG] = &regVar{false, 1 + uint64(i)}
+ }
+ }
+ }
+
+ w := new(bytes.Buffer)
+
+ writeUleb128(w, uint64(len(varDecls)))
+ for _, decl := range varDecls {
+ writeUleb128(w, decl.count)
+ w.WriteByte(byte(decl.typ))
+ }
+
+ if hasLocalSP {
+ // Copy SP from its global variable into a local variable. Accessing a local variable is more efficient.
+ updateLocalSP(w)
+ }
+
+ for p := s.Func().Text; p != nil; p = p.Link {
+ switch p.As {
+ case AGet:
+ if p.From.Type != obj.TYPE_REG {
+ panic("bad Get: argument is not a register")
+ }
+ reg := p.From.Reg
+ v := regVars[reg-MINREG]
+ if v == nil {
+ panic("bad Get: invalid register")
+ }
+ if reg == REG_SP && hasLocalSP {
+ writeOpcode(w, ALocalGet)
+ writeUleb128(w, 1) // local SP
+ continue
+ }
+ if v.global {
+ writeOpcode(w, AGlobalGet)
+ } else {
+ writeOpcode(w, ALocalGet)
+ }
+ writeUleb128(w, v.index)
+ continue
+
+ case ASet:
+ if p.To.Type != obj.TYPE_REG {
+ panic("bad Set: argument is not a register")
+ }
+ reg := p.To.Reg
+ v := regVars[reg-MINREG]
+ if v == nil {
+ panic("bad Set: invalid register")
+ }
+ if reg == REG_SP && hasLocalSP {
+ writeOpcode(w, ALocalTee)
+ writeUleb128(w, 1) // local SP
+ }
+ if v.global {
+ writeOpcode(w, AGlobalSet)
+ } else {
+ if p.Link.As == AGet && p.Link.From.Reg == reg {
+ writeOpcode(w, ALocalTee)
+ p = p.Link
+ } else {
+ writeOpcode(w, ALocalSet)
+ }
+ }
+ writeUleb128(w, v.index)
+ continue
+
+ case ATee:
+ if p.To.Type != obj.TYPE_REG {
+ panic("bad Tee: argument is not a register")
+ }
+ reg := p.To.Reg
+ v := regVars[reg-MINREG]
+ if v == nil {
+ panic("bad Tee: invalid register")
+ }
+ writeOpcode(w, ALocalTee)
+ writeUleb128(w, v.index)
+ continue
+
+ case ANot:
+ writeOpcode(w, AI32Eqz)
+ continue
+
+ case obj.AUNDEF:
+ writeOpcode(w, AUnreachable)
+ continue
+
+ case obj.ANOP, obj.ATEXT, obj.AFUNCDATA, obj.APCDATA:
+ // ignore
+ continue
+ }
+
+ writeOpcode(w, p.As)
+
+ switch p.As {
+ case ABlock, ALoop, AIf:
+ if p.From.Offset != 0 {
+ // block type, rarely used, e.g. for code compiled with emscripten
+ w.WriteByte(0x80 - byte(p.From.Offset))
+ continue
+ }
+ w.WriteByte(0x40)
+
+ case ABr, ABrIf:
+ if p.To.Type != obj.TYPE_CONST {
+ panic("bad Br/BrIf")
+ }
+ writeUleb128(w, uint64(p.To.Offset))
+
+ case ABrTable:
+ idxs := p.To.Val.([]uint64)
+ writeUleb128(w, uint64(len(idxs)-1))
+ for _, idx := range idxs {
+ writeUleb128(w, idx)
+ }
+
+ case ACall:
+ switch p.To.Type {
+ case obj.TYPE_CONST:
+ writeUleb128(w, uint64(p.To.Offset))
+
+ case obj.TYPE_MEM:
+ if p.To.Name != obj.NAME_EXTERN && p.To.Name != obj.NAME_STATIC {
+ fmt.Println(p.To)
+ panic("bad name for Call")
+ }
+ r := obj.Addrel(s)
+ r.Siz = 1 // actually variable sized
+ r.Off = int32(w.Len())
+ r.Type = objabi.R_CALL
+ if p.Mark&WasmImport != 0 {
+ r.Type = objabi.R_WASMIMPORT
+ }
+ r.Sym = p.To.Sym
+ if hasLocalSP {
+ // The stack may have moved, which changes SP. Update the local SP variable.
+ updateLocalSP(w)
+ }
+
+ default:
+ panic("bad type for Call")
+ }
+
+ case ACallIndirect:
+ writeUleb128(w, uint64(p.To.Offset))
+ w.WriteByte(0x00) // reserved value
+ if hasLocalSP {
+ // The stack may have moved, which changes SP. Update the local SP variable.
+ updateLocalSP(w)
+ }
+
+ case AI32Const, AI64Const:
+ if p.From.Name == obj.NAME_EXTERN {
+ r := obj.Addrel(s)
+ r.Siz = 1 // actually variable sized
+ r.Off = int32(w.Len())
+ r.Type = objabi.R_ADDR
+ r.Sym = p.From.Sym
+ r.Add = p.From.Offset
+ break
+ }
+ writeSleb128(w, p.From.Offset)
+
+ case AF32Const:
+ b := make([]byte, 4)
+ binary.LittleEndian.PutUint32(b, math.Float32bits(float32(p.From.Val.(float64))))
+ w.Write(b)
+
+ case AF64Const:
+ b := make([]byte, 8)
+ binary.LittleEndian.PutUint64(b, math.Float64bits(p.From.Val.(float64)))
+ w.Write(b)
+
+ case AI32Load, AI64Load, AF32Load, AF64Load, AI32Load8S, AI32Load8U, AI32Load16S, AI32Load16U, AI64Load8S, AI64Load8U, AI64Load16S, AI64Load16U, AI64Load32S, AI64Load32U:
+ if p.From.Offset < 0 {
+ panic("negative offset for *Load")
+ }
+ if p.From.Type != obj.TYPE_CONST {
+ panic("bad type for *Load")
+ }
+ if p.From.Offset > math.MaxUint32 {
+ ctxt.Diag("bad offset in %v", p)
+ }
+ writeUleb128(w, align(p.As))
+ writeUleb128(w, uint64(p.From.Offset))
+
+ case AI32Store, AI64Store, AF32Store, AF64Store, AI32Store8, AI32Store16, AI64Store8, AI64Store16, AI64Store32:
+ if p.To.Offset < 0 {
+ panic("negative offset")
+ }
+ if p.From.Offset > math.MaxUint32 {
+ ctxt.Diag("bad offset in %v", p)
+ }
+ writeUleb128(w, align(p.As))
+ writeUleb128(w, uint64(p.To.Offset))
+
+ case ACurrentMemory, AGrowMemory:
+ w.WriteByte(0x00)
+
+ }
+ }
+
+ w.WriteByte(0x0b) // end
+
+ s.P = w.Bytes()
+}
+
+func updateLocalSP(w *bytes.Buffer) {
+ writeOpcode(w, AGlobalGet)
+ writeUleb128(w, 0) // global SP
+ writeOpcode(w, ALocalSet)
+ writeUleb128(w, 1) // local SP
+}
+
+func writeOpcode(w *bytes.Buffer, as obj.As) {
+ switch {
+ case as < AUnreachable:
+ panic(fmt.Sprintf("unexpected assembler op: %s", as))
+ case as < AEnd:
+ w.WriteByte(byte(as - AUnreachable + 0x00))
+ case as < ADrop:
+ w.WriteByte(byte(as - AEnd + 0x0B))
+ case as < ALocalGet:
+ w.WriteByte(byte(as - ADrop + 0x1A))
+ case as < AI32Load:
+ w.WriteByte(byte(as - ALocalGet + 0x20))
+ case as < AI32TruncSatF32S:
+ w.WriteByte(byte(as - AI32Load + 0x28))
+ case as < ALast:
+ w.WriteByte(0xFC)
+ w.WriteByte(byte(as - AI32TruncSatF32S + 0x00))
+ default:
+ panic(fmt.Sprintf("unexpected assembler op: %s", as))
+ }
+}
+
+type valueType byte
+
+const (
+ i32 valueType = 0x7F
+ i64 valueType = 0x7E
+ f32 valueType = 0x7D
+ f64 valueType = 0x7C
+)
+
+func regType(reg int16) valueType {
+ switch {
+ case reg == REG_SP:
+ return i32
+ case reg >= REG_R0 && reg <= REG_R15:
+ return i64
+ case reg >= REG_F0 && reg <= REG_F15:
+ return f32
+ case reg >= REG_F16 && reg <= REG_F31:
+ return f64
+ default:
+ panic("invalid register")
+ }
+}
+
+func align(as obj.As) uint64 {
+ switch as {
+ case AI32Load8S, AI32Load8U, AI64Load8S, AI64Load8U, AI32Store8, AI64Store8:
+ return 0
+ case AI32Load16S, AI32Load16U, AI64Load16S, AI64Load16U, AI32Store16, AI64Store16:
+ return 1
+ case AI32Load, AF32Load, AI64Load32S, AI64Load32U, AI32Store, AF32Store, AI64Store32:
+ return 2
+ case AI64Load, AF64Load, AI64Store, AF64Store:
+ return 3
+ default:
+ panic("align: bad op")
+ }
+}
+
+func writeUleb128(w io.ByteWriter, v uint64) {
+ if v < 128 {
+ w.WriteByte(uint8(v))
+ return
+ }
+ more := true
+ for more {
+ c := uint8(v & 0x7f)
+ v >>= 7
+ more = v != 0
+ if more {
+ c |= 0x80
+ }
+ w.WriteByte(c)
+ }
+}
+
+func writeSleb128(w io.ByteWriter, v int64) {
+ more := true
+ for more {
+ c := uint8(v & 0x7f)
+ s := uint8(v & 0x40)
+ v >>= 7
+ more = !((v == 0 && s == 0) || (v == -1 && s != 0))
+ if more {
+ c |= 0x80
+ }
+ w.WriteByte(c)
+ }
+}