Skip to content
This repository has been archived by the owner on May 11, 2020. It is now read-only.

Abstract/Separate Platform Specific Code #162

Open
wants to merge 2 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion exec/internal/compile/allocator.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// +build !appengine
// +build !appengine,amd64
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Wouldn't the allocator work on ARM?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

The problem was with the dependence on the asmBlock type in native_exec

return JITExitSignal(jitcall(unsafe.Pointer(&b.mem), stack, locals, globals, mem))

which depends on the amd64 generated function jitcall from native_exec_amd64.go
func jitcall(asm unsafe.Pointer, stack, locals, globals *[]uint64, mem *[]byte) uint64

TEXT ·jitcall(SB),NOSPLIT|NOFRAME,$0-48

So the allocator could...with some more abstraction.

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Okay, so we should add an implementation of jitcall that works on other arch's (or even just panicks), but the MMapAllocator & asmBlock types should be shareable, right?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I believe so...i'll give that a try.


package compile

Expand Down
6 changes: 6 additions & 0 deletions exec/internal/compile/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

package compile

import "github.com/twitchyliquid64/golang-asm/obj"

type dirtyState uint8

const (
Expand All @@ -13,3 +15,7 @@ const (
stateLocalFirstElem // Caches a pointer to the locals array.
stateGlobalSliceHeader // Caches a pointer to the globals slice header.
)

type Backend interface {
paramsForMemoryOp(op byte) (size uint, inst obj.As)
}
2 changes: 2 additions & 0 deletions exec/internal/compile/backend_amd64.go
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,8 @@ import (
"github.com/twitchyliquid64/golang-asm/obj/x86"
)

type PlatformBackend = AMD64Backend

var rhsConstOptimizable = map[byte]bool{
ops.I64Add: true,
ops.I64Sub: true,
Expand Down
26 changes: 26 additions & 0 deletions exec/internal/compile/defaultBackend.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,26 @@
// +build !amd64

package compile

import (
ops "github.com/go-interpreter/wagon/wasm/operators"
"github.com/twitchyliquid64/golang-asm/obj"
"github.com/twitchyliquid64/golang-asm/obj/x86"
)

type PlatformBackend struct {
}

func (b *PlatformBackend) paramsForMemoryOp(op byte) (size uint, inst obj.As) {
switch op {
case ops.I64Load, ops.F64Load:
return 8, x86.AMOVQ
case ops.I32Load, ops.F32Load:
return 4, x86.AMOVL
case ops.I64Store, ops.F64Store:
return 8, x86.AMOVQ
case ops.I32Store, ops.F32Store:
return 4, x86.AMOVL
}
panic("unreachable")
}
21 changes: 0 additions & 21 deletions exec/internal/compile/native.go
Original file line number Diff line number Diff line change
Expand Up @@ -33,31 +33,10 @@ const (
CompletionFatalInternalError
)

func makeExitIndex(idx int) CompletionStatus {
return CompletionStatus((idx << 8) & exitIndexMask)
}

const (
statusMask = 15
exitIndexMask = 0x00000000ffffff00
unknownIndex = 0xffffff
)

// JITExitSignal is the value returned from the execution of a native section.
// The bits of this packed 64bit value is encoded as follows:
// [00:04] Completion Status
// [04:08] Reserved
// [08:32] Index of the WASM instruction where the exit occurred.
// [32:64] Status-specific 32bit value.
type JITExitSignal uint64

// CompletionStatus decodes and returns the completion status of the exit.
func (s JITExitSignal) CompletionStatus() CompletionStatus {
return CompletionStatus(s & statusMask)
}

// Index returns the index to the instruction where the exit happened.
// 0xffffff is returned if the exit was due to normal completion.
func (s JITExitSignal) Index() int {
return (int(s) & exitIndexMask) >> 8
}
22 changes: 22 additions & 0 deletions exec/internal/compile/native_completion_amd64.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
package compile

func makeExitIndex(idx int) CompletionStatus {
return CompletionStatus((idx << 8) & exitIndexMask)
}

const (
statusMask = 15
exitIndexMask = 0x00000000ffffff00
unknownIndex = 0xffffff
)

// CompletionStatus decodes and returns the completion status of the exit.
func (s JITExitSignal) CompletionStatus() CompletionStatus {
return CompletionStatus(s & statusMask)
}

// Index returns the index to the instruction where the exit happened.
// 0xffffff is returned if the exit was due to normal completion.
func (s JITExitSignal) Index() int {
return (int(s) & exitIndexMask) >> 8
}
2 changes: 1 addition & 1 deletion exec/internal/compile/native_exec.go
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

// +build !appengine
// +build !appengine,amd64

package compile

Expand Down
4 changes: 2 additions & 2 deletions exec/internal/compile/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -92,12 +92,12 @@ func (s *scanner) ScanFunc(bytecode []byte, meta *BytecodeMetadata) ([]Compilati
// TODO: Add to this table as backends support more opcodes.
switch inst.Op {
case ops.I64Load, ops.I32Load, ops.F64Load, ops.F32Load:
fakeBE := &AMD64Backend{}
fakeBE := &PlatformBackend{}
memSize, _ := fakeBE.paramsForMemoryOp(inst.Op)
inProgress.Metrics.MemoryReads += memSize
inProgress.Metrics.StackWrites++
case ops.I64Store, ops.I32Store, ops.F64Store, ops.F32Store:
fakeBE := &AMD64Backend{}
fakeBE := &PlatformBackend{}
memSize, _ := fakeBE.paramsForMemoryOp(inst.Op)
inProgress.Metrics.MemoryWrites += memSize
inProgress.Metrics.StackReads += 2
Expand Down
191 changes: 16 additions & 175 deletions exec/native_compile.go
Original file line number Diff line number Diff line change
@@ -1,35 +1,11 @@
// Copyright 2019 The go-interpreter Authors. All rights reserved.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

What part of this file won't be applicable/compile on ARM?

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This one was due to a overrun from exitIndexMask

func makeExitIndex(idx int) CompletionStatus {
return CompletionStatus((idx << 8) & exitIndexMask)
}

I didnt take the time to debug it..just took it out...i'll look into it more

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Did you run it on a 32bit system? We probably should use uint64 everywhere here instead of int, which will match the platforms register size.

Copy link
Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

yes i tested it on a raspberry pi running raspbian so they run 32 bit arm

Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Gotcha. I think if you change idx (and values adjacent to it) to uint64, it should work.

// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// +build !amd64

package exec

import (
"encoding/binary"
"fmt"
"runtime"

"github.com/go-interpreter/wagon/exec/internal/compile"
ops "github.com/go-interpreter/wagon/wasm/operators"
)

// Parameters that decide whether a sequence should be compiled.
// TODO: Expose some way for these to be customized at runtime
// via VMOptions.
const (
// NOTE: must never be less than 5, as room is needed to pack the
// wagon.nativeExec instruction and its parameter.
minInstBytes = 5
minArithInstructionSequence = 2
)

var supportedNativeArchs []nativeArch

type nativeArch struct {
Arch, OS string
make func(endianness binary.ByteOrder) *nativeCompiler
}

// nativeCompiler represents a backend for native code generation + execution.
type nativeCompiler struct {
Scanner sequenceScanner
Expand All @@ -40,6 +16,21 @@ type nativeCompiler struct {
func (c *nativeCompiler) Close() error {
return c.allocator.Close()
}
func nativeBackend() (bool, *nativeCompiler) {
return false, nil
}

func (vm *VM) tryNativeCompile() error {
return nil
}

// nativeCodeInvocation calls into one of the assembled code blocks.
// Assembled code blocks expect the following two pieces of
// information on the stack:
// [fp:fp+pointerSize]: sliceHeader for the stack.
// [fp+pointerSize:fp+pointerSize*2]: sliceHeader for locals variables.
func (vm *VM) nativeCodeInvocation(asmIndex uint32) {
}

// pageAllocator is responsible for the efficient allocation of
// executable, aligned regions of executable memory.
Expand All @@ -62,153 +53,3 @@ type instructionBuilder interface {
// Build compiles the specified bytecode into native instructions.
Build(candidate compile.CompilationCandidate, code []byte, meta *compile.BytecodeMetadata) ([]byte, error)
}

// NativeCompilationError represents a failure to compile a sequence
// of instructions to native code.
type NativeCompilationError struct {
Start, End uint
FuncIndex int
Err error
}

func (e NativeCompilationError) Error() string {
return fmt.Sprintf("exec: native compilation failed on vm.funcs[%d].code[%d:%d]: %v", e.FuncIndex, e.Start, e.End, e.Err)
}

func nativeBackend() (bool, *nativeCompiler) {
for _, c := range supportedNativeArchs {
if c.Arch == runtime.GOARCH && c.OS == runtime.GOOS {
backend := c.make(endianess)
return true, backend
}
}
return false, nil
}

func (vm *VM) tryNativeCompile() error {
if vm.nativeBackend == nil {
return nil
}

for i := range vm.funcs {
if _, isGoFunc := vm.funcs[i].(*goFunction); isGoFunc {
continue
}

fn := vm.funcs[i].(compiledFunction)
candidates, err := vm.nativeBackend.Scanner.ScanFunc(fn.code, fn.codeMeta)
if err != nil {
return fmt.Errorf("exec: AOT scan failed on vm.funcs[%d]: %v", i, err)
}

for _, candidate := range candidates {
if (candidate.Metrics.IntegerOps + candidate.Metrics.FloatOps) < minArithInstructionSequence {
continue
}
lower, upper := candidate.Bounds()
if (upper - lower) < minInstBytes {
continue
}

asm, err := vm.nativeBackend.Builder.Build(candidate, fn.code, fn.codeMeta)
if err != nil {
return NativeCompilationError{
Err: err,
Start: lower,
End: upper,
FuncIndex: i,
}
}
unit, err := vm.nativeBackend.allocator.AllocateExec(asm)
if err != nil {
return fmt.Errorf("exec: allocator.AllocateExec() failed: %v", err)
}
fn.asm = append(fn.asm, asmBlock{
nativeUnit: unit,
resumePC: upper,
})

// Patch the wasm opcode stream to call into the native section.
// The number of bytes touched here must always be equal to
// nativeExecPrologueSize and <= minInstructionSequence.
fn.code[lower] = ops.WagonNativeExec
endianess.PutUint32(fn.code[lower+1:], uint32(len(fn.asm)-1))
// make the remainder of the recompiled instructions
// unreachable: this should trap the program in the event that
// a bug in code offsets & candidate sequence detection results in
// a jump to the middle of re-compiled code.
// This conservative behaviour is the least likely to result in
// bugs becoming security issues.
for i := lower + 5; i < upper-1; i++ {
fn.code[i] = ops.Unreachable
}
}
vm.funcs[i] = fn
}

return nil
}

// nativeCodeInvocation calls into one of the assembled code blocks.
// Assembled code blocks expect the following two pieces of
// information on the stack:
// [fp:fp+pointerSize]: sliceHeader for the stack.
// [fp+pointerSize:fp+pointerSize*2]: sliceHeader for locals variables.
func (vm *VM) nativeCodeInvocation(asmIndex uint32) {
block := vm.ctx.asm[asmIndex]
finishSignal := block.nativeUnit.Invoke(&vm.ctx.stack, &vm.ctx.locals, &vm.globals, &vm.memory)

switch finishSignal.CompletionStatus() {
case compile.CompletionOK:
case compile.CompletionFatalInternalError:
panic("fatal error in native execution")
case compile.CompletionBadBounds:
panic("exec: out of bounds memory access")
}
vm.ctx.pc = int64(block.resumePC)
}

// CompileStats returns statistics about native compilation performed on
// the VM.
func (vm *VM) CompileStats() NativeCompileStats {
out := NativeCompileStats{
Ops: map[byte]*OpStats{},
}

for i := range vm.funcs {
if _, isGoFunc := vm.funcs[i].(*goFunction); isGoFunc {
continue
}

fn := vm.funcs[i].(compiledFunction)
out.NumCompiledBlocks += len(fn.asm)

for _, inst := range fn.codeMeta.Instructions {
if _, exists := out.Ops[inst.Op]; !exists {
out.Ops[inst.Op] = &OpStats{}
}

// Instructions which are native-compiled are re-written to the
// ops.WagonNativeExec opcode, so a mismatch indicates native compilation.
if fn.code[inst.Start] == inst.Op {
out.Ops[inst.Op].Interpreted++
} else {
out.Ops[inst.Op].Compiled++
}
}
}

return out
}

type OpStats struct {
Interpreted int
Compiled int
}

// NativeCompileStats encapsulates statistics about any native
// compilation performed on the VM.
type NativeCompileStats struct {
Ops map[byte]*OpStats
NumCompiledBlocks int
}
Loading