3441 lines
146 KiB
Diff
3441 lines
146 KiB
Diff
diff --git a/config/check_macroassembler_style.py b/config/check_macroassembler_style.py
|
|
index 0d040a939b..b83e3691dd 100644
|
|
--- a/config/check_macroassembler_style.py
|
|
+++ b/config/check_macroassembler_style.py
|
|
@@ -24,17 +24,17 @@ from __future__ import absolute_import
|
|
from __future__ import print_function
|
|
|
|
import difflib
|
|
import os
|
|
import re
|
|
import sys
|
|
|
|
architecture_independent = set(["generic"])
|
|
-all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared"])
|
|
+all_unsupported_architectures_names = set(["mips32", "mips64", "mips_shared", "ppc64"])
|
|
all_architecture_names = set(["x86", "x64", "arm", "arm64"])
|
|
all_shared_architecture_names = set(["x86_shared", "arm", "arm64"])
|
|
|
|
reBeforeArg = "(?<=[(,\s])"
|
|
reArgType = "(?P<type>[\w\s:*&]+)"
|
|
reArgName = "(?P<name>\s\w+)"
|
|
reArgDefault = "(?P<default>(?:\s=[^,)]+)?)"
|
|
reAfterArg = "(?=[,)])"
|
|
diff --git a/js/moz.configure b/js/moz.configure
|
|
index 3c3d0d4359..b217d0e15c 100644
|
|
--- a/js/moz.configure
|
|
+++ b/js/moz.configure
|
|
@@ -214,23 +214,25 @@ def jit_codegen(jit_enabled, simulator, target):
|
|
return namespace(**{str(target.cpu): True})
|
|
|
|
|
|
set_config("JS_CODEGEN_NONE", jit_codegen.none)
|
|
set_config("JS_CODEGEN_ARM", jit_codegen.arm)
|
|
set_config("JS_CODEGEN_ARM64", jit_codegen.arm64)
|
|
set_config("JS_CODEGEN_MIPS32", jit_codegen.mips32)
|
|
set_config("JS_CODEGEN_MIPS64", jit_codegen.mips64)
|
|
+set_config("JS_CODEGEN_PPC64", jit_codegen.ppc64)
|
|
set_config("JS_CODEGEN_X86", jit_codegen.x86)
|
|
set_config("JS_CODEGEN_X64", jit_codegen.x64)
|
|
set_define("JS_CODEGEN_NONE", jit_codegen.none)
|
|
set_define("JS_CODEGEN_ARM", jit_codegen.arm)
|
|
set_define("JS_CODEGEN_ARM64", jit_codegen.arm64)
|
|
set_define("JS_CODEGEN_MIPS32", jit_codegen.mips32)
|
|
set_define("JS_CODEGEN_MIPS64", jit_codegen.mips64)
|
|
+set_define("JS_CODEGEN_PPC64", jit_codegen.ppc64)
|
|
set_define("JS_CODEGEN_X86", jit_codegen.x86)
|
|
set_define("JS_CODEGEN_X64", jit_codegen.x64)
|
|
|
|
# Profiling
|
|
# =======================================================
|
|
option(
|
|
"--enable-instruments",
|
|
env="MOZ_INSTRUMENTS",
|
|
diff --git a/js/src/irregexp/RegExpNativeMacroAssembler.cpp b/js/src/irregexp/RegExpNativeMacroAssembler.cpp
|
|
index e0ef7e64f5..81d8e2a198 100644
|
|
--- a/js/src/irregexp/RegExpNativeMacroAssembler.cpp
|
|
+++ b/js/src/irregexp/RegExpNativeMacroAssembler.cpp
|
|
@@ -813,18 +813,33 @@ void SMRegExpMacroAssembler::JumpOrBacktrack(Label* to) {
|
|
// If the test fails, call an OOL handler to try growing the stack.
|
|
void SMRegExpMacroAssembler::CheckBacktrackStackLimit() {
|
|
js::jit::Label no_stack_overflow;
|
|
masm_.branchPtr(
|
|
Assembler::BelowOrEqual,
|
|
AbsoluteAddress(isolate()->regexp_stack()->limit_address_address()),
|
|
backtrack_stack_pointer_, &no_stack_overflow);
|
|
|
|
+#ifdef JS_CODEGEN_PPC64
|
|
+ // LR on PowerPC isn't a GPR, so we have to explicitly save it here before
|
|
+ // we call or we will end up erroneously returning after the call to the
|
|
+ // stack overflow handler when we |blr| out and inevitably underflow the
|
|
+ // irregexp stack on the next backtrack.
|
|
+ masm_.xs_mflr(temp1_);
|
|
+ masm_.as_stdu(temp1_, masm_.getStackPointer(), -8);
|
|
+#endif
|
|
+
|
|
masm_.call(&stack_overflow_label_);
|
|
|
|
+#ifdef JS_CODEGEN_PPC64
|
|
+ masm_.as_ld(temp1_, masm_.getStackPointer(), 0);
|
|
+ masm_.xs_mtlr(temp1_);
|
|
+ masm_.as_addi(masm_.getStackPointer(), masm_.getStackPointer(), 8);
|
|
+#endif
|
|
+
|
|
// Exit with an exception if the call failed
|
|
masm_.branchTest32(Assembler::Zero, temp0_, temp0_,
|
|
&exit_with_exception_label_);
|
|
|
|
masm_.bind(&no_stack_overflow);
|
|
}
|
|
|
|
// This is used to sneak an OOM through the V8 layer.
|
|
@@ -1127,16 +1142,20 @@ void SMRegExpMacroAssembler::stackOverflowHandler() {
|
|
LiveGeneralRegisterSet volatileRegs(GeneralRegisterSet::Volatile());
|
|
|
|
#ifdef JS_USE_LINK_REGISTER
|
|
masm_.pushReturnAddress();
|
|
#endif
|
|
|
|
// Adjust for the return address on the stack.
|
|
size_t frameOffset = sizeof(void*);
|
|
+#ifdef JS_CODEGEN_PPC64
|
|
+ // We have a double return address.
|
|
+ frameOffset += sizeof(void*);
|
|
+#endif
|
|
|
|
volatileRegs.takeUnchecked(temp0_);
|
|
volatileRegs.takeUnchecked(temp1_);
|
|
masm_.PushRegsInMask(volatileRegs);
|
|
|
|
using Fn = bool (*)(RegExpStack * regexp_stack);
|
|
masm_.setupUnalignedABICall(temp0_);
|
|
masm_.passABIArg(temp1_);
|
|
diff --git a/js/src/jit/AtomicOperations.h b/js/src/jit/AtomicOperations.h
|
|
index f4a5727d05..138612d53b 100644
|
|
--- a/js/src/jit/AtomicOperations.h
|
|
+++ b/js/src/jit/AtomicOperations.h
|
|
@@ -373,19 +373,26 @@ constexpr inline bool AtomicOperations::isLockfreeJS(int32_t size) {
|
|
# include "jit/shared/AtomicOperations-feeling-lucky.h"
|
|
# endif
|
|
#elif defined(__mips__)
|
|
# if defined(__clang__) || defined(__GNUC__)
|
|
# include "jit/mips-shared/AtomicOperations-mips-shared.h"
|
|
# else
|
|
# error "AtomicOperations on MIPS for an unknown compiler"
|
|
# endif
|
|
+#elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
|
|
+ defined(__PPC64LE__)
|
|
+# if defined(JS_CODEGEN_PPC64)
|
|
+/* XXX: should be # include "jit/shared/AtomicOperations-shared-jit.h" */
|
|
+# include "jit/shared/AtomicOperations-feeling-lucky.h"
|
|
+# else
|
|
+# include "jit/shared/AtomicOperations-feeling-lucky.h"
|
|
+# endif
|
|
#elif defined(__ppc__) || defined(__PPC__) || defined(__sparc__) || \
|
|
- defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
|
|
- defined(__PPC64LE__) || defined(__alpha__) || defined(__hppa__) || \
|
|
+ defined(__alpha__) || defined(__hppa__) || \
|
|
defined(__sh__) || defined(__s390__) || defined(__s390x__) || \
|
|
defined(__m68k__) || defined(__riscv) || defined(__wasi__)
|
|
# include "jit/shared/AtomicOperations-feeling-lucky.h"
|
|
#else
|
|
# error "No AtomicOperations support provided for this platform"
|
|
#endif
|
|
|
|
#endif // jit_AtomicOperations_h
|
|
diff --git a/js/src/jit/BaselineBailouts.cpp b/js/src/jit/BaselineBailouts.cpp
|
|
index bca1427f93..eb499b34cf 100644
|
|
--- a/js/src/jit/BaselineBailouts.cpp
|
|
+++ b/js/src/jit/BaselineBailouts.cpp
|
|
@@ -481,16 +481,21 @@ class MOZ_STACK_CLASS BaselineStackBuilder {
|
|
// let X = STACK_START_ADDR + JitFrameLayout::Size() + PREV_FRAME_SIZE
|
|
// X + RectifierFrameLayout::Size()
|
|
// + ((RectifierFrameLayout*) X)->prevFrameLocalSize()
|
|
// - BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr()
|
|
size_t extraOffset =
|
|
RectifierFrameLayout::Size() + priorFrame->prevFrameLocalSize() +
|
|
BaselineStubFrameLayout::reverseOffsetOfSavedFramePtr();
|
|
return virtualPointerAtStackOffset(priorOffset + extraOffset);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ (void)priorOffset;
|
|
+// XXX. The above code might work though
|
|
+#warning "TODO! BaselineStackBuilder::calculatePrevFramePtr()"
|
|
+ MOZ_CRASH();
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
(void)priorOffset;
|
|
MOZ_CRASH();
|
|
#else
|
|
# error "Bad architecture!"
|
|
#endif
|
|
}
|
|
};
|
|
diff --git a/js/src/jit/BaselineCodeGen.cpp b/js/src/jit/BaselineCodeGen.cpp
|
|
index 7089f5e300..d67236d2c5 100644
|
|
--- a/js/src/jit/BaselineCodeGen.cpp
|
|
+++ b/js/src/jit/BaselineCodeGen.cpp
|
|
@@ -520,16 +520,19 @@ bool BaselineCodeGen<Handler>::emitOutOfLinePostBarrierSlot() {
|
|
regs.take(BaselineFrameReg);
|
|
Register scratch = regs.takeAny();
|
|
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
|
// On ARM, save the link register before calling. It contains the return
|
|
// address. The |masm.ret()| later will pop this into |pc| to return.
|
|
masm.push(lr);
|
|
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
masm.push(ra);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ masm.xs_mflr(ScratchRegister);
|
|
+ masm.push(ScratchRegister);
|
|
#endif
|
|
masm.pushValue(R0);
|
|
|
|
using Fn = void (*)(JSRuntime * rt, js::gc::Cell * cell);
|
|
masm.setupUnalignedABICall(scratch);
|
|
masm.movePtr(ImmPtr(cx->runtime()), scratch);
|
|
masm.passABIArg(scratch);
|
|
masm.passABIArg(objReg);
|
|
diff --git a/js/src/jit/BaselineIC.cpp b/js/src/jit/BaselineIC.cpp
|
|
index 9572394e76..dfe762e5c8 100644
|
|
--- a/js/src/jit/BaselineIC.cpp
|
|
+++ b/js/src/jit/BaselineIC.cpp
|
|
@@ -127,17 +127,18 @@ class MOZ_RAII FallbackICCodeCompiler final {
|
|
};
|
|
|
|
AllocatableGeneralRegisterSet BaselineICAvailableGeneralRegs(size_t numInputs) {
|
|
AllocatableGeneralRegisterSet regs(GeneralRegisterSet::All());
|
|
#if defined(JS_CODEGEN_ARM)
|
|
MOZ_ASSERT(!regs.has(BaselineStackReg));
|
|
MOZ_ASSERT(!regs.has(ICTailCallReg));
|
|
regs.take(BaselineSecondScratchReg);
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
MOZ_ASSERT(!regs.has(BaselineStackReg));
|
|
MOZ_ASSERT(!regs.has(ICTailCallReg));
|
|
MOZ_ASSERT(!regs.has(BaselineSecondScratchReg));
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
MOZ_ASSERT(!regs.has(PseudoStackPointer));
|
|
MOZ_ASSERT(!regs.has(RealStackPointer));
|
|
MOZ_ASSERT(!regs.has(ICTailCallReg));
|
|
#else
|
|
diff --git a/js/src/jit/CodeGenerator.h b/js/src/jit/CodeGenerator.h
|
|
index 5321978fc2..b2d9a8f5a5 100644
|
|
--- a/js/src/jit/CodeGenerator.h
|
|
+++ b/js/src/jit/CodeGenerator.h
|
|
@@ -20,16 +20,18 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/CodeGenerator-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/CodeGenerator-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/CodeGenerator-mips32.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/CodeGenerator-mips64.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/CodeGenerator-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/CodeGenerator-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
#include "wasm/WasmGC.h"
|
|
|
|
diff --git a/js/src/jit/FlushICache.h b/js/src/jit/FlushICache.h
|
|
index fe66080df5..2071563c1e 100644
|
|
--- a/js/src/jit/FlushICache.h
|
|
+++ b/js/src/jit/FlushICache.h
|
|
@@ -19,17 +19,18 @@ namespace jit {
|
|
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
|
|
inline void FlushICache(void* code, size_t size,
|
|
bool codeIsThreadLocal = true) {
|
|
// No-op. Code and data caches are coherent on x86 and x64.
|
|
}
|
|
|
|
#elif (defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)) || \
|
|
- (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64))
|
|
+ (defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
|
|
extern void FlushICache(void* code, size_t size, bool codeIsThreadLocal = true);
|
|
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
|
|
inline void FlushICache(void* code, size_t size,
|
|
bool codeIsThreadLocal = true) {
|
|
MOZ_CRASH();
|
|
diff --git a/js/src/jit/JitFrames.cpp b/js/src/jit/JitFrames.cpp
|
|
index 77cfe6a9cd..507f1551e6 100644
|
|
--- a/js/src/jit/JitFrames.cpp
|
|
+++ b/js/src/jit/JitFrames.cpp
|
|
@@ -2220,16 +2220,24 @@ MachineState MachineState::FromBailout(RegisterDump::GPRArray& regs,
|
|
machine.setRegisterLocation(
|
|
FloatRegister(FloatRegisters::Encoding(i), FloatRegisters::Single),
|
|
&fpregs[i]);
|
|
machine.setRegisterLocation(
|
|
FloatRegister(FloatRegisters::Encoding(i), FloatRegisters::Double),
|
|
&fpregs[i]);
|
|
// No SIMD support in bailouts, SIMD is internal to wasm
|
|
}
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ for (unsigned i = 0; i < FloatRegisters::TotalPhys; i++) {
|
|
+ machine.setRegisterLocation(FloatRegister(i), &fpregs[i]);
|
|
+# ifdef ENABLE_WASM_SIMD
|
|
+ // Needs additional handling if VMX or non-FPR VSX regs are in play.
|
|
+# error "SIMD for PPC NYI"
|
|
+# endif
|
|
+ }
|
|
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
MOZ_CRASH();
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
return machine;
|
|
}
|
|
diff --git a/js/src/jit/JitFrames.h b/js/src/jit/JitFrames.h
|
|
index 40c661d146..7b4ea3157d 100644
|
|
--- a/js/src/jit/JitFrames.h
|
|
+++ b/js/src/jit/JitFrames.h
|
|
@@ -152,16 +152,26 @@ struct ResumeFromException {
|
|
static const uint32_t RESUME_ENTRY_FRAME = 0;
|
|
static const uint32_t RESUME_CATCH = 1;
|
|
static const uint32_t RESUME_FINALLY = 2;
|
|
static const uint32_t RESUME_FORCED_RETURN = 3;
|
|
static const uint32_t RESUME_BAILOUT = 4;
|
|
static const uint32_t RESUME_WASM = 5;
|
|
static const uint32_t RESUME_WASM_CATCH = 6;
|
|
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ // This gets built on the stack as part of exception returns. Because
|
|
+ // it goes right on top of the stack, an ABI-compliant routine can wreck
|
|
+ // it, so we implement a minimum Power ISA linkage area (four doublewords).
|
|
+ void *_ppc_sp_;
|
|
+ void *_ppc_cr_;
|
|
+ void *_ppc_lr_;
|
|
+ void *_ppc_toc_;
|
|
+#endif
|
|
+
|
|
uint8_t* framePointer;
|
|
uint8_t* stackPointer;
|
|
uint8_t* target;
|
|
uint32_t kind;
|
|
|
|
// Value to push when resuming into a |finally| block.
|
|
// Also used by Wasm to send the exception object to the throw stub.
|
|
JS::Value exception;
|
|
diff --git a/js/src/jit/JitOptions.cpp b/js/src/jit/JitOptions.cpp
|
|
index de13777fc3..795e41bf21 100644
|
|
--- a/js/src/jit/JitOptions.cpp
|
|
+++ b/js/src/jit/JitOptions.cpp
|
|
@@ -132,17 +132,22 @@ DefaultJitOptions::DefaultJitOptions() {
|
|
// Warp compile Generator functions
|
|
SET_DEFAULT(warpGenerator, true);
|
|
|
|
// Whether the IonMonkey and Baseline JITs are enabled for Trusted Principals.
|
|
// (Ignored if ion or baselineJit is set to true.)
|
|
SET_DEFAULT(jitForTrustedPrincipals, false);
|
|
|
|
// Whether the RegExp JIT is enabled.
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ // This may generate ISA 3 instructions. The other JIT tiers gate on it too.
|
|
+ SET_DEFAULT(nativeRegExp, MacroAssembler::SupportsFloatingPoint());
|
|
+#else
|
|
SET_DEFAULT(nativeRegExp, true);
|
|
+#endif
|
|
|
|
// Whether Warp should use ICs instead of transpiling Baseline CacheIR.
|
|
SET_DEFAULT(forceInlineCaches, false);
|
|
|
|
// Whether all ICs should be initialized as megamorphic ICs.
|
|
SET_DEFAULT(forceMegamorphicICs, false);
|
|
|
|
// Toggles whether large scripts are rejected.
|
|
diff --git a/js/src/jit/LIR.h b/js/src/jit/LIR.h
|
|
index 024bd798ca..0cd43c12ab 100644
|
|
--- a/js/src/jit/LIR.h
|
|
+++ b/js/src/jit/LIR.h
|
|
@@ -1939,16 +1939,18 @@ AnyRegister LAllocation::toRegister() const {
|
|
# include "jit/arm64/LIR-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
# if defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/LIR-mips32.h"
|
|
# elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/LIR-mips64.h"
|
|
# endif
|
|
# include "jit/mips-shared/LIR-mips-shared.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/LIR-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/LIR-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
#undef LIR_HEADER
|
|
|
|
diff --git a/js/src/jit/Label.h b/js/src/jit/Label.h
|
|
index a8f93de378..480b18b251 100644
|
|
--- a/js/src/jit/Label.h
|
|
+++ b/js/src/jit/Label.h
|
|
@@ -21,17 +21,18 @@ struct LabelBase {
|
|
uint32_t bound_ : 1;
|
|
|
|
// offset_ < INVALID_OFFSET means that the label is either bound or has
|
|
// incoming uses and needs to be bound.
|
|
uint32_t offset_ : 31;
|
|
|
|
void operator=(const LabelBase& label) = delete;
|
|
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
public:
|
|
#endif
|
|
static const uint32_t INVALID_OFFSET = 0x7fffffff; // UINT31_MAX.
|
|
|
|
public:
|
|
LabelBase() : bound_(false), offset_(INVALID_OFFSET) {}
|
|
|
|
// If the label is bound, all incoming edges have been patched and any
|
|
diff --git a/js/src/jit/Lowering.h b/js/src/jit/Lowering.h
|
|
index 979687da85..c064e5d914 100644
|
|
--- a/js/src/jit/Lowering.h
|
|
+++ b/js/src/jit/Lowering.h
|
|
@@ -18,16 +18,18 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/Lowering-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/Lowering-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/Lowering-mips32.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/Lowering-mips64.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/Lowering-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/Lowering-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
namespace js {
|
|
namespace jit {
|
|
diff --git a/js/src/jit/MacroAssembler-inl.h b/js/src/jit/MacroAssembler-inl.h
|
|
index cf16cdf0a7..fa39c5f4d2 100644
|
|
--- a/js/src/jit/MacroAssembler-inl.h
|
|
+++ b/js/src/jit/MacroAssembler-inl.h
|
|
@@ -30,16 +30,18 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/MacroAssembler-arm-inl.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/MacroAssembler-arm64-inl.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/MacroAssembler-mips32-inl.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/MacroAssembler-mips64-inl.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/MacroAssembler-ppc64-inl.h"
|
|
#elif !defined(JS_CODEGEN_NONE)
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
#include "wasm/WasmBuiltins.h"
|
|
|
|
namespace js {
|
|
namespace jit {
|
|
diff --git a/js/src/jit/MacroAssembler.cpp b/js/src/jit/MacroAssembler.cpp
|
|
index 2a3aeec607..cbe9d14f46 100644
|
|
--- a/js/src/jit/MacroAssembler.cpp
|
|
+++ b/js/src/jit/MacroAssembler.cpp
|
|
@@ -4044,16 +4044,18 @@ void MacroAssembler::emitPreBarrierFastPath(JSRuntime* rt, MIRType type,
|
|
#elif JS_CODEGEN_ARM
|
|
ma_lsl(temp3, temp1, temp1);
|
|
#elif JS_CODEGEN_ARM64
|
|
Lsl(ARMRegister(temp1, 64), ARMRegister(temp1, 64), ARMRegister(temp3, 64));
|
|
#elif JS_CODEGEN_MIPS32
|
|
ma_sll(temp1, temp1, temp3);
|
|
#elif JS_CODEGEN_MIPS64
|
|
ma_dsll(temp1, temp1, temp3);
|
|
+#elif JS_CODEGEN_PPC64
|
|
+ as_sld(temp1, temp1, temp3);
|
|
#elif JS_CODEGEN_NONE
|
|
MOZ_CRASH();
|
|
#else
|
|
# error "Unknown architecture"
|
|
#endif
|
|
|
|
// No barrier is needed if the bit is set, |word & mask != 0|.
|
|
branchTestPtr(Assembler::NonZero, temp2, temp1, noBarrier);
|
|
diff --git a/js/src/jit/MacroAssembler.h b/js/src/jit/MacroAssembler.h
|
|
index e2d53d5cef..cb0148b94e 100644
|
|
--- a/js/src/jit/MacroAssembler.h
|
|
+++ b/js/src/jit/MacroAssembler.h
|
|
@@ -20,16 +20,18 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/MacroAssembler-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/MacroAssembler-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/MacroAssembler-mips32.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/MacroAssembler-mips64.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/MacroAssembler-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/MacroAssembler-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
#include "jit/ABIFunctions.h"
|
|
#include "jit/AtomicOp.h"
|
|
#include "jit/AutoJitContextAlloc.h"
|
|
@@ -87,18 +89,18 @@
|
|
// //{{{ check_macroassembler_style
|
|
// inline uint32_t
|
|
// MacroAssembler::framePushed() const
|
|
// {
|
|
// return framePushed_;
|
|
// }
|
|
// ////}}} check_macroassembler_style
|
|
|
|
-#define ALL_ARCH mips32, mips64, arm, arm64, x86, x64
|
|
-#define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared
|
|
+#define ALL_ARCH mips32, mips64, arm, arm64, x86, x64, ppc64
|
|
+#define ALL_SHARED_ARCH arm, arm64, x86_shared, mips_shared, ppc64
|
|
|
|
// * How this macro works:
|
|
//
|
|
// DEFINED_ON is a macro which check if, for the current architecture, the
|
|
// method is defined on the macro assembler or not.
|
|
//
|
|
// For each architecture, we have a macro named DEFINED_ON_arch. This macro is
|
|
// empty if this is not the current architecture. Otherwise it must be either
|
|
@@ -134,16 +136,17 @@
|
|
#define DEFINED_ON_x86
|
|
#define DEFINED_ON_x64
|
|
#define DEFINED_ON_x86_shared
|
|
#define DEFINED_ON_arm
|
|
#define DEFINED_ON_arm64
|
|
#define DEFINED_ON_mips32
|
|
#define DEFINED_ON_mips64
|
|
#define DEFINED_ON_mips_shared
|
|
+#define DEFINED_ON_ppc64
|
|
#define DEFINED_ON_none
|
|
|
|
// Specialize for each architecture.
|
|
#if defined(JS_CODEGEN_X86)
|
|
# undef DEFINED_ON_x86
|
|
# define DEFINED_ON_x86 define
|
|
# undef DEFINED_ON_x86_shared
|
|
# define DEFINED_ON_x86_shared define
|
|
@@ -163,16 +166,19 @@
|
|
# define DEFINED_ON_mips32 define
|
|
# undef DEFINED_ON_mips_shared
|
|
# define DEFINED_ON_mips_shared define
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# undef DEFINED_ON_mips64
|
|
# define DEFINED_ON_mips64 define
|
|
# undef DEFINED_ON_mips_shared
|
|
# define DEFINED_ON_mips_shared define
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# undef DEFINED_ON_ppc64
|
|
+# define DEFINED_ON_ppc64 define
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# undef DEFINED_ON_none
|
|
# define DEFINED_ON_none crash
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
#define DEFINED_ON_RESULT_crash \
|
|
@@ -479,36 +485,36 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// targets roll their own save-code instead.
|
|
//
|
|
// Nevertheless, because some targets *do* call PushRegsInMask from
|
|
// JitRuntime::generateInvalidator, you should check carefully all of the
|
|
// ::generateInvalidator methods if you change the PushRegsInMask format.
|
|
|
|
// The size of the area used by PushRegsInMask.
|
|
size_t PushRegsInMaskSizeInBytes(LiveRegisterSet set)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
void PushRegsInMask(LiveRegisterSet set)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
void PushRegsInMask(LiveGeneralRegisterSet set);
|
|
|
|
// Like PushRegsInMask, but instead of pushing the registers, store them to
|
|
// |dest|. |dest| should point to the end of the reserved space, so the
|
|
// first register will be stored at |dest.offset - sizeof(register)|. It is
|
|
// required that |dest.offset| is at least as large as the value computed by
|
|
// PushRegsInMaskSizeInBytes for this |set|. In other words, |dest.base|
|
|
// must point to either the lowest address in the save area, or some address
|
|
// below that.
|
|
void storeRegsInMask(LiveRegisterSet set, Address dest, Register scratch)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
void PopRegsInMask(LiveRegisterSet set);
|
|
void PopRegsInMask(LiveGeneralRegisterSet set);
|
|
void PopRegsInMaskIgnore(LiveRegisterSet set, LiveRegisterSet ignore)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
// ===============================================================
|
|
// Stack manipulation functions -- single registers/values.
|
|
|
|
void Push(const Operand op) DEFINED_ON(x86_shared);
|
|
void Push(Register reg) PER_SHARED_ARCH;
|
|
void Push(Register reg1, Register reg2, Register reg3, Register reg4)
|
|
DEFINED_ON(arm64);
|
|
@@ -531,17 +537,17 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
inline CodeOffset PushWithPatch(ImmWord word);
|
|
inline CodeOffset PushWithPatch(ImmPtr imm);
|
|
|
|
void Pop(const Operand op) DEFINED_ON(x86_shared);
|
|
void Pop(Register reg) PER_SHARED_ARCH;
|
|
void Pop(FloatRegister t) PER_SHARED_ARCH;
|
|
void Pop(const ValueOperand& val) PER_SHARED_ARCH;
|
|
void PopFlags() DEFINED_ON(x86_shared);
|
|
- void PopStackPtr() DEFINED_ON(arm, mips_shared, x86_shared);
|
|
+ void PopStackPtr() DEFINED_ON(arm, mips_shared, x86_shared, ppc64);
|
|
void popRooted(VMFunctionData::RootType rootType, Register cellReg,
|
|
const ValueOperand& valueReg);
|
|
|
|
// Move the stack pointer based on the requested amount.
|
|
void adjustStack(int amount);
|
|
void freeStack(uint32_t amount);
|
|
|
|
// Warning: This method does not update the framePushed() counter.
|
|
@@ -589,18 +595,18 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
// Push the return address and make a call. On platforms where this function
|
|
// is not defined, push the link register (pushReturnAddress) at the entry
|
|
// point of the callee.
|
|
void callAndPushReturnAddress(Register reg) DEFINED_ON(x86_shared);
|
|
void callAndPushReturnAddress(Label* label) DEFINED_ON(x86_shared);
|
|
|
|
// These do not adjust framePushed().
|
|
- void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
|
|
- void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64);
|
|
+ void pushReturnAddress() DEFINED_ON(mips_shared, arm, arm64, ppc64);
|
|
+ void popReturnAddress() DEFINED_ON(mips_shared, arm, arm64, ppc64);
|
|
|
|
// Useful for dealing with two-valued returns.
|
|
void moveRegPair(Register src0, Register src1, Register dst0, Register dst1,
|
|
MoveOp::Type type = MoveOp::GENERAL);
|
|
|
|
public:
|
|
// ===============================================================
|
|
// Patchable near/far jumps.
|
|
@@ -621,20 +627,20 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
// These methods are like movWithPatch/PatchDataWithValueCheck but allow
|
|
// using pc-relative addressing on certain platforms (RIP-relative LEA on x64,
|
|
// ADR instruction on arm64).
|
|
//
|
|
// Note: "Near" applies to ARM64 where the target must be within 1 MB (this is
|
|
// release-asserted).
|
|
CodeOffset moveNearAddressWithPatch(Register dest)
|
|
- DEFINED_ON(x86, x64, arm, arm64, mips_shared);
|
|
+ DEFINED_ON(x86, x64, arm, arm64, mips_shared, ppc64);
|
|
static void patchNearAddressMove(CodeLocationLabel loc,
|
|
CodeLocationLabel target)
|
|
- DEFINED_ON(x86, x64, arm, arm64, mips_shared);
|
|
+ DEFINED_ON(x86, x64, arm, arm64, mips_shared, ppc64);
|
|
|
|
public:
|
|
// ===============================================================
|
|
// [SMDOC] JIT-to-C++ Function Calls (callWithABI)
|
|
//
|
|
// callWithABI is used to make a call using the standard C/C++ system ABI.
|
|
//
|
|
// callWithABI is a low level interface for making calls, as such every call
|
|
@@ -983,20 +989,21 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
inline void xor32(Imm32 imm, Register dest) PER_SHARED_ARCH;
|
|
inline void xor32(Imm32 imm, const Address& dest) PER_SHARED_ARCH;
|
|
inline void xor32(const Address& src, Register dest) PER_SHARED_ARCH;
|
|
|
|
inline void xorPtr(Register src, Register dest) PER_ARCH;
|
|
inline void xorPtr(Imm32 imm, Register dest) PER_ARCH;
|
|
|
|
inline void and64(const Operand& src, Register64 dest)
|
|
- DEFINED_ON(x64, mips64);
|
|
- inline void or64(const Operand& src, Register64 dest) DEFINED_ON(x64, mips64);
|
|
+ DEFINED_ON(x64, mips64, ppc64);
|
|
+ inline void or64(const Operand& src, Register64 dest)
|
|
+ DEFINED_ON(x64, mips64, ppc64);
|
|
inline void xor64(const Operand& src, Register64 dest)
|
|
- DEFINED_ON(x64, mips64);
|
|
+ DEFINED_ON(x64, mips64, ppc64);
|
|
|
|
// ===============================================================
|
|
// Swap instructions
|
|
|
|
// Swap the two lower bytes and sign extend the result to 32-bit.
|
|
inline void byteSwap16SignExtend(Register reg) PER_SHARED_ARCH;
|
|
|
|
// Swap the two lower bytes and zero extend the result to 32-bit.
|
|
@@ -1020,27 +1027,27 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
inline void addPtr(Register src, Register dest) PER_ARCH;
|
|
inline void addPtr(Register src1, Register src2, Register dest)
|
|
DEFINED_ON(arm64);
|
|
inline void addPtr(Imm32 imm, Register dest) PER_ARCH;
|
|
inline void addPtr(Imm32 imm, Register src, Register dest) DEFINED_ON(arm64);
|
|
inline void addPtr(ImmWord imm, Register dest) PER_ARCH;
|
|
inline void addPtr(ImmPtr imm, Register dest);
|
|
inline void addPtr(Imm32 imm, const Address& dest)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86, x64);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64);
|
|
inline void addPtr(Imm32 imm, const AbsoluteAddress& dest)
|
|
DEFINED_ON(x86, x64);
|
|
inline void addPtr(const Address& src, Register dest)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86, x64);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64);
|
|
|
|
inline void add64(Register64 src, Register64 dest) PER_ARCH;
|
|
inline void add64(Imm32 imm, Register64 dest) PER_ARCH;
|
|
inline void add64(Imm64 imm, Register64 dest) PER_ARCH;
|
|
inline void add64(const Operand& src, Register64 dest)
|
|
- DEFINED_ON(x64, mips64);
|
|
+ DEFINED_ON(x64, mips64, ppc64);
|
|
|
|
inline void addFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
|
|
// Compute dest=SP-imm where dest is a pointer registers and not SP. The
|
|
// offset returned from sub32FromStackPtrWithPatch() must be passed to
|
|
// patchSub32FromStackPtr().
|
|
inline CodeOffset sub32FromStackPtrWithPatch(Register dest) PER_ARCH;
|
|
inline void patchSub32FromStackPtr(CodeOffset offset, Imm32 imm) PER_ARCH;
|
|
@@ -1049,58 +1056,58 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
inline void addConstantDouble(double d, FloatRegister dest) DEFINED_ON(x86);
|
|
|
|
inline void sub32(const Address& src, Register dest) PER_SHARED_ARCH;
|
|
inline void sub32(Register src, Register dest) PER_SHARED_ARCH;
|
|
inline void sub32(Imm32 imm, Register dest) PER_SHARED_ARCH;
|
|
|
|
inline void subPtr(Register src, Register dest) PER_ARCH;
|
|
inline void subPtr(Register src, const Address& dest)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86, x64);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64);
|
|
inline void subPtr(Imm32 imm, Register dest) PER_ARCH;
|
|
inline void subPtr(ImmWord imm, Register dest) DEFINED_ON(x64);
|
|
inline void subPtr(const Address& addr, Register dest)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86, x64);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64);
|
|
|
|
inline void sub64(Register64 src, Register64 dest) PER_ARCH;
|
|
inline void sub64(Imm64 imm, Register64 dest) PER_ARCH;
|
|
inline void sub64(const Operand& src, Register64 dest)
|
|
- DEFINED_ON(x64, mips64);
|
|
+ DEFINED_ON(x64, mips64, ppc64);
|
|
|
|
inline void subFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
|
|
inline void subDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
|
|
inline void mul32(Register rhs, Register srcDest) PER_SHARED_ARCH;
|
|
|
|
inline void mul32(Register src1, Register src2, Register dest, Label* onOver)
|
|
DEFINED_ON(arm64);
|
|
|
|
inline void mulPtr(Register rhs, Register srcDest) PER_ARCH;
|
|
|
|
inline void mul64(const Operand& src, const Register64& dest) DEFINED_ON(x64);
|
|
inline void mul64(const Operand& src, const Register64& dest,
|
|
- const Register temp) DEFINED_ON(x64, mips64);
|
|
+ const Register temp) DEFINED_ON(x64, mips64, ppc64);
|
|
inline void mul64(Imm64 imm, const Register64& dest) PER_ARCH;
|
|
inline void mul64(Imm64 imm, const Register64& dest, const Register temp)
|
|
- DEFINED_ON(x86, x64, arm, mips32, mips64);
|
|
+ DEFINED_ON(x86, x64, arm, mips32, mips64, ppc64);
|
|
inline void mul64(const Register64& src, const Register64& dest,
|
|
const Register temp) PER_ARCH;
|
|
inline void mul64(const Register64& src1, const Register64& src2,
|
|
const Register64& dest) DEFINED_ON(arm64);
|
|
inline void mul64(Imm64 src1, const Register64& src2, const Register64& dest)
|
|
DEFINED_ON(arm64);
|
|
|
|
inline void mulBy3(Register src, Register dest) PER_ARCH;
|
|
|
|
inline void mulFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
inline void mulDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
|
|
inline void mulDoublePtr(ImmPtr imm, Register temp, FloatRegister dest)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86, x64);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86, x64, ppc64);
|
|
|
|
// Perform an integer division, returning the integer part rounded toward
|
|
// zero. rhs must not be zero, and the division must not overflow.
|
|
//
|
|
// On x86_shared, srcDest must be eax and edx will be clobbered.
|
|
// On ARM, the chip must have hardware division instructions.
|
|
inline void quotient32(Register rhs, Register srcDest,
|
|
bool isUnsigned) PER_SHARED_ARCH;
|
|
@@ -1117,41 +1124,41 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// zero. rhs must not be zero, and the division must not overflow.
|
|
//
|
|
// This variant preserves registers, and doesn't require hardware division
|
|
// instructions on ARM (will call out to a runtime routine).
|
|
//
|
|
// rhs is preserved, srdDest is clobbered.
|
|
void flexibleRemainder32(Register rhs, Register srcDest, bool isUnsigned,
|
|
const LiveRegisterSet& volatileLiveRegs)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86_shared);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86_shared, ppc64);
|
|
|
|
// Perform an integer division, returning the integer part rounded toward
|
|
// zero. rhs must not be zero, and the division must not overflow.
|
|
//
|
|
// This variant preserves registers, and doesn't require hardware division
|
|
// instructions on ARM (will call out to a runtime routine).
|
|
//
|
|
// rhs is preserved, srdDest is clobbered.
|
|
void flexibleQuotient32(Register rhs, Register srcDest, bool isUnsigned,
|
|
const LiveRegisterSet& volatileLiveRegs)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86_shared);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86_shared, ppc64);
|
|
|
|
// Perform an integer division, returning the integer part rounded toward
|
|
// zero. rhs must not be zero, and the division must not overflow. The
|
|
// remainder is stored into the third argument register here.
|
|
//
|
|
// This variant preserves registers, and doesn't require hardware division
|
|
// instructions on ARM (will call out to a runtime routine).
|
|
//
|
|
// rhs is preserved, srdDest and remOutput are clobbered.
|
|
void flexibleDivMod32(Register rhs, Register srcDest, Register remOutput,
|
|
bool isUnsigned,
|
|
const LiveRegisterSet& volatileLiveRegs)
|
|
- DEFINED_ON(mips_shared, arm, arm64, x86_shared);
|
|
+ DEFINED_ON(mips_shared, arm, arm64, x86_shared, ppc64);
|
|
|
|
inline void divFloat32(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
inline void divDouble(FloatRegister src, FloatRegister dest) PER_SHARED_ARCH;
|
|
|
|
inline void inc64(AbsoluteAddress dest) PER_ARCH;
|
|
|
|
inline void neg32(Register reg) PER_SHARED_ARCH;
|
|
inline void neg64(Register64 reg) PER_ARCH;
|
|
@@ -1342,17 +1349,17 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// temp may be invalid only if the chip has the POPCNT instruction.
|
|
inline void popcnt64(Register64 src, Register64 dest, Register temp) PER_ARCH;
|
|
|
|
// ===============================================================
|
|
// Condition functions
|
|
|
|
template <typename T1, typename T2>
|
|
inline void cmp32Set(Condition cond, T1 lhs, T2 rhs, Register dest)
|
|
- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
|
|
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64);
|
|
|
|
template <typename T1, typename T2>
|
|
inline void cmpPtrSet(Condition cond, T1 lhs, T2 rhs, Register dest) PER_ARCH;
|
|
|
|
// ===============================================================
|
|
// Branch functions
|
|
|
|
template <class L>
|
|
@@ -1367,34 +1374,34 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
inline void branch32(Condition cond, const Address& lhs, Register rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branch32(Condition cond, const Address& lhs, Imm32 rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
|
|
inline void branch32(Condition cond, const AbsoluteAddress& lhs, Register rhs,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
inline void branch32(Condition cond, const AbsoluteAddress& lhs, Imm32 rhs,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void branch32(Condition cond, const BaseIndex& lhs, Register rhs,
|
|
Label* label) DEFINED_ON(arm, x86_shared);
|
|
inline void branch32(Condition cond, const BaseIndex& lhs, Imm32 rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
|
|
inline void branch32(Condition cond, const Operand& lhs, Register rhs,
|
|
Label* label) DEFINED_ON(x86_shared);
|
|
inline void branch32(Condition cond, const Operand& lhs, Imm32 rhs,
|
|
Label* label) DEFINED_ON(x86_shared);
|
|
|
|
inline void branch32(Condition cond, wasm::SymbolicAddress lhs, Imm32 rhs,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
// The supported condition are Equal, NotEqual, LessThan(orEqual),
|
|
// GreaterThan(orEqual), Below(orEqual) and Above(orEqual). When a fail label
|
|
// is not defined it will fall through to next instruction, else jump to the
|
|
// fail label.
|
|
inline void branch64(Condition cond, Register64 lhs, Imm64 val,
|
|
Label* success, Label* fail = nullptr) PER_ARCH;
|
|
inline void branch64(Condition cond, Register64 lhs, Register64 rhs,
|
|
@@ -1433,32 +1440,32 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
inline void branchPtr(Condition cond, const BaseIndex& lhs, ImmWord rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchPtr(Condition cond, const BaseIndex& lhs, Register rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
|
|
inline void branchPtr(Condition cond, const AbsoluteAddress& lhs,
|
|
Register rhs, Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
inline void branchPtr(Condition cond, const AbsoluteAddress& lhs, ImmWord rhs,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void branchPtr(Condition cond, wasm::SymbolicAddress lhs, Register rhs,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
// Given a pointer to a GC Cell, retrieve the StoreBuffer pointer from its
|
|
// chunk header, or nullptr if it is in the tenured heap.
|
|
void loadStoreBuffer(Register ptr, Register buffer) PER_ARCH;
|
|
|
|
void branchPtrInNurseryChunk(Condition cond, Register ptr, Register temp,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
void branchPtrInNurseryChunk(Condition cond, const Address& address,
|
|
Register temp, Label* label) DEFINED_ON(x86);
|
|
void branchValueIsNurseryCell(Condition cond, const Address& address,
|
|
Register temp, Label* label) PER_ARCH;
|
|
void branchValueIsNurseryCell(Condition cond, ValueOperand value,
|
|
Register temp, Label* label) PER_ARCH;
|
|
|
|
// This function compares a Value (lhs) which is having a private pointer
|
|
@@ -1470,36 +1477,36 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
FloatRegister rhs, Label* label) PER_SHARED_ARCH;
|
|
|
|
// Truncate a double/float32 to int32 and when it doesn't fit an int32 it will
|
|
// jump to the failure label. This particular variant is allowed to return the
|
|
// value module 2**32, which isn't implemented on all architectures. E.g. the
|
|
// x64 variants will do this only in the int64_t range.
|
|
inline void branchTruncateFloat32MaybeModUint32(FloatRegister src,
|
|
Register dest, Label* fail)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
inline void branchTruncateDoubleMaybeModUint32(FloatRegister src,
|
|
Register dest, Label* fail)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
// Truncate a double/float32 to intptr and when it doesn't fit jump to the
|
|
// failure label.
|
|
inline void branchTruncateFloat32ToPtr(FloatRegister src, Register dest,
|
|
Label* fail) DEFINED_ON(x86, x64);
|
|
inline void branchTruncateDoubleToPtr(FloatRegister src, Register dest,
|
|
Label* fail) DEFINED_ON(x86, x64);
|
|
|
|
// Truncate a double/float32 to int32 and when it doesn't fit jump to the
|
|
// failure label.
|
|
inline void branchTruncateFloat32ToInt32(FloatRegister src, Register dest,
|
|
Label* fail)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
inline void branchTruncateDoubleToInt32(FloatRegister src, Register dest,
|
|
Label* fail)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void branchDouble(DoubleCondition cond, FloatRegister lhs,
|
|
FloatRegister rhs, Label* label) PER_SHARED_ARCH;
|
|
|
|
inline void branchDoubleNotInInt64Range(Address src, Register temp,
|
|
Label* fail);
|
|
inline void branchDoubleNotInUInt64Range(Address src, Register temp,
|
|
Label* fail);
|
|
@@ -1543,17 +1550,17 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
L label) PER_SHARED_ARCH;
|
|
template <class L>
|
|
inline void branchTest32(Condition cond, Register lhs, Imm32 rhs,
|
|
L label) PER_SHARED_ARCH;
|
|
inline void branchTest32(Condition cond, const Address& lhs, Imm32 rhh,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTest32(Condition cond, const AbsoluteAddress& lhs,
|
|
Imm32 rhs, Label* label)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
template <class L>
|
|
inline void branchTestPtr(Condition cond, Register lhs, Register rhs,
|
|
L label) PER_SHARED_ARCH;
|
|
inline void branchTestPtr(Condition cond, Register lhs, Imm32 rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestPtr(Condition cond, const Address& lhs, Imm32 rhs,
|
|
Label* label) PER_SHARED_ARCH;
|
|
@@ -1689,17 +1696,17 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
// Perform a type-test on a tag of a Value (32bits boxing), or the tagged
|
|
// value (64bits boxing).
|
|
inline void branchTestUndefined(Condition cond, Register tag,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestInt32(Condition cond, Register tag,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestDouble(Condition cond, Register tag, Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
inline void branchTestNumber(Condition cond, Register tag,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestBoolean(Condition cond, Register tag,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestString(Condition cond, Register tag,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestSymbol(Condition cond, Register tag,
|
|
Label* label) PER_SHARED_ARCH;
|
|
@@ -1721,106 +1728,106 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// BaseIndex and ValueOperand variants clobber the ScratchReg on x64.
|
|
// All Variants clobber the ScratchReg on arm64.
|
|
inline void branchTestUndefined(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestUndefined(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestUndefined(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestInt32(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestInt32(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestInt32(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestDouble(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestDouble(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestDouble(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestNumber(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestBoolean(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestBoolean(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestBoolean(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestString(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestString(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestString(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestSymbol(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestSymbol(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestSymbol(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestBigInt(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestBigInt(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestBigInt(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestNull(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestNull(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestNull(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
// Clobbers the ScratchReg on x64.
|
|
inline void branchTestObject(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestObject(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestObject(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestGCThing(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestGCThing(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestGCThing(Condition cond, const ValueOperand& value,
|
|
Label* label) PER_SHARED_ARCH;
|
|
|
|
inline void branchTestPrimitive(Condition cond, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestMagic(Condition cond, const Address& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestMagic(Condition cond, const BaseIndex& address,
|
|
Label* label) PER_SHARED_ARCH;
|
|
template <class L>
|
|
inline void branchTestMagic(Condition cond, const ValueOperand& value,
|
|
L label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
inline void branchTestMagic(Condition cond, const Address& valaddr,
|
|
JSWhyMagic why, Label* label) PER_ARCH;
|
|
|
|
inline void branchTestMagicValue(Condition cond, const ValueOperand& val,
|
|
JSWhyMagic why, Label* label);
|
|
|
|
void branchTestValue(Condition cond, const ValueOperand& lhs,
|
|
@@ -1828,42 +1835,42 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
inline void branchTestValue(Condition cond, const BaseIndex& lhs,
|
|
const ValueOperand& rhs, Label* label) PER_ARCH;
|
|
|
|
// Checks if given Value is evaluated to true or false in a condition.
|
|
// The type of the value should match the type of the method.
|
|
inline void branchTestInt32Truthy(bool truthy, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
inline void branchTestDoubleTruthy(bool truthy, FloatRegister reg,
|
|
Label* label) PER_SHARED_ARCH;
|
|
inline void branchTestBooleanTruthy(bool truthy, const ValueOperand& value,
|
|
Label* label) PER_ARCH;
|
|
inline void branchTestStringTruthy(bool truthy, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
inline void branchTestBigIntTruthy(bool truthy, const ValueOperand& value,
|
|
Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
// Create an unconditional branch to the address given as argument.
|
|
inline void branchToComputedAddress(const BaseIndex& address) PER_ARCH;
|
|
|
|
private:
|
|
template <typename T, typename S, typename L>
|
|
inline void branchPtrImpl(Condition cond, const T& lhs, const S& rhs, L label)
|
|
DEFINED_ON(x86_shared);
|
|
|
|
void branchPtrInNurseryChunkImpl(Condition cond, Register ptr, Label* label)
|
|
DEFINED_ON(x86);
|
|
template <typename T>
|
|
void branchValueIsNurseryCellImpl(Condition cond, const T& value,
|
|
Register temp, Label* label)
|
|
- DEFINED_ON(arm64, x64, mips64);
|
|
+ DEFINED_ON(arm64, x64, mips64, ppc64);
|
|
|
|
template <typename T>
|
|
inline void branchTestUndefinedImpl(Condition cond, const T& t, Label* label)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
template <typename T>
|
|
inline void branchTestInt32Impl(Condition cond, const T& t, Label* label)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
template <typename T>
|
|
@@ -1923,116 +1930,116 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
inline void fallibleUnboxString(const T& src, Register dest, Label* fail);
|
|
template <typename T>
|
|
inline void fallibleUnboxSymbol(const T& src, Register dest, Label* fail);
|
|
template <typename T>
|
|
inline void fallibleUnboxBigInt(const T& src, Register dest, Label* fail);
|
|
|
|
inline void cmp32Move32(Condition cond, Register lhs, Register rhs,
|
|
Register src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64);
|
|
|
|
inline void cmp32Move32(Condition cond, Register lhs, const Address& rhs,
|
|
Register src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64);
|
|
|
|
inline void cmpPtrMovePtr(Condition cond, Register lhs, Register rhs,
|
|
Register src, Register dest) PER_ARCH;
|
|
|
|
inline void cmpPtrMovePtr(Condition cond, Register lhs, const Address& rhs,
|
|
Register src, Register dest) PER_ARCH;
|
|
|
|
inline void cmp32Load32(Condition cond, Register lhs, const Address& rhs,
|
|
const Address& src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64);
|
|
|
|
inline void cmp32Load32(Condition cond, Register lhs, Register rhs,
|
|
const Address& src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64);
|
|
|
|
inline void cmp32LoadPtr(Condition cond, const Address& lhs, Imm32 rhs,
|
|
const Address& src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void cmp32MovePtr(Condition cond, Register lhs, Imm32 rhs,
|
|
Register src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void test32LoadPtr(Condition cond, const Address& addr, Imm32 mask,
|
|
const Address& src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void test32MovePtr(Condition cond, const Address& addr, Imm32 mask,
|
|
Register src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
// Conditional move for Spectre mitigations.
|
|
inline void spectreMovePtr(Condition cond, Register src, Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
// Zeroes dest if the condition is true.
|
|
inline void spectreZeroRegister(Condition cond, Register scratch,
|
|
Register dest)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86_shared, ppc64);
|
|
|
|
// Performs a bounds check and zeroes the index register if out-of-bounds
|
|
// (to mitigate Spectre).
|
|
private:
|
|
inline void spectreBoundsCheck32(Register index, const Operand& length,
|
|
Register maybeScratch, Label* failure)
|
|
DEFINED_ON(x86);
|
|
|
|
public:
|
|
inline void spectreBoundsCheck32(Register index, Register length,
|
|
Register maybeScratch, Label* failure)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
inline void spectreBoundsCheck32(Register index, const Address& length,
|
|
Register maybeScratch, Label* failure)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
inline void spectreBoundsCheckPtr(Register index, Register length,
|
|
Register maybeScratch, Label* failure)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
inline void spectreBoundsCheckPtr(Register index, const Address& length,
|
|
Register maybeScratch, Label* failure)
|
|
- DEFINED_ON(arm, arm64, mips_shared, x86, x64);
|
|
+ DEFINED_ON(arm, arm64, mips_shared, x86, x64, ppc64);
|
|
|
|
// ========================================================================
|
|
// Canonicalization primitives.
|
|
inline void canonicalizeDouble(FloatRegister reg);
|
|
inline void canonicalizeDoubleIfDeterministic(FloatRegister reg);
|
|
|
|
inline void canonicalizeFloat(FloatRegister reg);
|
|
inline void canonicalizeFloatIfDeterministic(FloatRegister reg);
|
|
|
|
public:
|
|
// ========================================================================
|
|
// Memory access primitives.
|
|
inline void storeUncanonicalizedDouble(FloatRegister src, const Address& dest)
|
|
- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
|
|
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64);
|
|
inline void storeUncanonicalizedDouble(FloatRegister src,
|
|
const BaseIndex& dest)
|
|
- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
|
|
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64);
|
|
inline void storeUncanonicalizedDouble(FloatRegister src, const Operand& dest)
|
|
DEFINED_ON(x86_shared);
|
|
|
|
template <class T>
|
|
inline void storeDouble(FloatRegister src, const T& dest);
|
|
|
|
template <class T>
|
|
inline void boxDouble(FloatRegister src, const T& dest);
|
|
|
|
using MacroAssemblerSpecific::boxDouble;
|
|
|
|
inline void storeUncanonicalizedFloat32(FloatRegister src,
|
|
const Address& dest)
|
|
- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
|
|
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64);
|
|
inline void storeUncanonicalizedFloat32(FloatRegister src,
|
|
const BaseIndex& dest)
|
|
- DEFINED_ON(x86_shared, arm, arm64, mips32, mips64);
|
|
+ DEFINED_ON(x86_shared, arm, arm64, mips32, mips64, ppc64);
|
|
inline void storeUncanonicalizedFloat32(FloatRegister src,
|
|
const Operand& dest)
|
|
DEFINED_ON(x86_shared);
|
|
|
|
template <class T>
|
|
inline void storeFloat32(FloatRegister src, const T& dest);
|
|
|
|
template <typename T>
|
|
@@ -3470,20 +3477,20 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
DEFINED_ON(x86, x64);
|
|
|
|
public:
|
|
// ========================================================================
|
|
// Convert floating point.
|
|
|
|
// temp required on x86 and x64; must be undefined on mips64.
|
|
void convertUInt64ToFloat32(Register64 src, FloatRegister dest, Register temp)
|
|
- DEFINED_ON(arm64, mips64, x64, x86);
|
|
+ DEFINED_ON(arm64, mips64, x64, x86, ppc64);
|
|
|
|
void convertInt64ToFloat32(Register64 src, FloatRegister dest)
|
|
- DEFINED_ON(arm64, mips64, x64, x86);
|
|
+ DEFINED_ON(arm64, mips64, x64, x86, ppc64);
|
|
|
|
bool convertUInt64ToDoubleNeedsTemp() PER_ARCH;
|
|
|
|
// temp required when convertUInt64ToDoubleNeedsTemp() returns true.
|
|
void convertUInt64ToDouble(Register64 src, FloatRegister dest,
|
|
Register temp) PER_ARCH;
|
|
|
|
void convertInt64ToDouble(Register64 src, FloatRegister dest) PER_ARCH;
|
|
@@ -3514,29 +3521,29 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
//
|
|
// On 32-bit systems for both wasm and asm.js, and on 64-bit systems for
|
|
// asm.js, heap lengths are limited to 2GB. On 64-bit systems for wasm,
|
|
// 32-bit heap lengths are limited to 4GB, and 64-bit heap lengths will be
|
|
// limited to something much larger.
|
|
|
|
void wasmBoundsCheck32(Condition cond, Register index,
|
|
Register boundsCheckLimit, Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
void wasmBoundsCheck32(Condition cond, Register index,
|
|
Address boundsCheckLimit, Label* label)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x86_shared);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x86_shared, ppc64);
|
|
|
|
void wasmBoundsCheck64(Condition cond, Register64 index,
|
|
Register64 boundsCheckLimit, Label* label)
|
|
- DEFINED_ON(arm64, mips64, x64);
|
|
+ DEFINED_ON(arm64, mips64, x64, ppc64);
|
|
|
|
void wasmBoundsCheck64(Condition cond, Register64 index,
|
|
Address boundsCheckLimit, Label* label)
|
|
- DEFINED_ON(arm64, mips64, x64);
|
|
+ DEFINED_ON(arm64, mips64, x64, ppc64);
|
|
|
|
// Each wasm load/store instruction appends its own wasm::Trap::OutOfBounds.
|
|
void wasmLoad(const wasm::MemoryAccessDesc& access, Operand srcAddr,
|
|
AnyRegister out) DEFINED_ON(x86, x64);
|
|
void wasmLoadI64(const wasm::MemoryAccessDesc& access, Operand srcAddr,
|
|
Register64 out) DEFINED_ON(x86, x64);
|
|
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
|
|
Operand dstAddr) DEFINED_ON(x86, x64);
|
|
@@ -3546,26 +3553,26 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// For all the ARM/MIPS wasmLoad and wasmStore functions below, `ptr`
|
|
// MUST equal `ptrScratch`, and that register will be updated based on
|
|
// conditions listed below (where it is only mentioned as `ptr`).
|
|
|
|
// `ptr` will be updated if access.offset() != 0 or access.type() ==
|
|
// Scalar::Int64.
|
|
void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
|
|
Register ptr, Register ptrScratch, AnyRegister output)
|
|
- DEFINED_ON(arm, mips_shared);
|
|
+ DEFINED_ON(arm, mips_shared, ppc64);
|
|
void wasmLoadI64(const wasm::MemoryAccessDesc& access, Register memoryBase,
|
|
Register ptr, Register ptrScratch, Register64 output)
|
|
- DEFINED_ON(arm, mips32, mips64);
|
|
+ DEFINED_ON(arm, mips32, mips64, ppc64);
|
|
void wasmStore(const wasm::MemoryAccessDesc& access, AnyRegister value,
|
|
Register memoryBase, Register ptr, Register ptrScratch)
|
|
- DEFINED_ON(arm, mips_shared);
|
|
+ DEFINED_ON(arm, mips_shared, ppc64);
|
|
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
|
Register memoryBase, Register ptr, Register ptrScratch)
|
|
- DEFINED_ON(arm, mips32, mips64);
|
|
+ DEFINED_ON(arm, mips32, mips64, ppc64);
|
|
|
|
// These accept general memoryBase + ptr + offset (in `access`); the offset is
|
|
// always smaller than the guard region. They will insert an additional add
|
|
// if the offset is nonzero, and of course that add may require a temporary
|
|
// register for the offset if the offset is large, and instructions to set it
|
|
// up.
|
|
void wasmLoad(const wasm::MemoryAccessDesc& access, Register memoryBase,
|
|
Register ptr, AnyRegister output) DEFINED_ON(arm64);
|
|
@@ -3575,100 +3582,100 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
Register memoryBase, Register ptr) DEFINED_ON(arm64);
|
|
void wasmStoreI64(const wasm::MemoryAccessDesc& access, Register64 value,
|
|
Register memoryBase, Register ptr) DEFINED_ON(arm64);
|
|
|
|
// `ptr` will always be updated.
|
|
void wasmUnalignedLoad(const wasm::MemoryAccessDesc& access,
|
|
Register memoryBase, Register ptr, Register ptrScratch,
|
|
Register output, Register tmp)
|
|
- DEFINED_ON(mips32, mips64);
|
|
+ DEFINED_ON(mips32, mips64, ppc64);
|
|
|
|
// MIPS: `ptr` will always be updated.
|
|
void wasmUnalignedLoadFP(const wasm::MemoryAccessDesc& access,
|
|
Register memoryBase, Register ptr,
|
|
Register ptrScratch, FloatRegister output,
|
|
Register tmp1, Register tmp2, Register tmp3)
|
|
- DEFINED_ON(mips32, mips64);
|
|
+ DEFINED_ON(mips32, mips64, ppc64);
|
|
|
|
// `ptr` will always be updated.
|
|
void wasmUnalignedLoadI64(const wasm::MemoryAccessDesc& access,
|
|
Register memoryBase, Register ptr,
|
|
Register ptrScratch, Register64 output,
|
|
- Register tmp) DEFINED_ON(mips32, mips64);
|
|
+ Register tmp) DEFINED_ON(mips32, mips64, ppc64);
|
|
|
|
// MIPS: `ptr` will always be updated.
|
|
void wasmUnalignedStore(const wasm::MemoryAccessDesc& access, Register value,
|
|
Register memoryBase, Register ptr,
|
|
Register ptrScratch, Register tmp)
|
|
- DEFINED_ON(mips32, mips64);
|
|
+ DEFINED_ON(mips32, mips64, ppc64);
|
|
|
|
// `ptr` will always be updated.
|
|
void wasmUnalignedStoreFP(const wasm::MemoryAccessDesc& access,
|
|
FloatRegister floatValue, Register memoryBase,
|
|
Register ptr, Register ptrScratch, Register tmp)
|
|
- DEFINED_ON(mips32, mips64);
|
|
+ DEFINED_ON(mips32, mips64, ppc64);
|
|
|
|
// `ptr` will always be updated.
|
|
void wasmUnalignedStoreI64(const wasm::MemoryAccessDesc& access,
|
|
Register64 value, Register memoryBase,
|
|
Register ptr, Register ptrScratch, Register tmp)
|
|
- DEFINED_ON(mips32, mips64);
|
|
+ DEFINED_ON(mips32, mips64, ppc64);
|
|
|
|
// wasm specific methods, used in both the wasm baseline compiler and ion.
|
|
|
|
// The truncate-to-int32 methods do not bind the rejoin label; clients must
|
|
// do so if oolWasmTruncateCheckF64ToI32() can jump to it.
|
|
void wasmTruncateDoubleToUInt32(FloatRegister input, Register output,
|
|
bool isSaturating, Label* oolEntry) PER_ARCH;
|
|
void wasmTruncateDoubleToInt32(FloatRegister input, Register output,
|
|
bool isSaturating,
|
|
Label* oolEntry) PER_SHARED_ARCH;
|
|
void oolWasmTruncateCheckF64ToI32(FloatRegister input, Register output,
|
|
TruncFlags flags, wasm::BytecodeOffset off,
|
|
Label* rejoin)
|
|
- DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
|
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64);
|
|
|
|
void wasmTruncateFloat32ToUInt32(FloatRegister input, Register output,
|
|
bool isSaturating, Label* oolEntry) PER_ARCH;
|
|
void wasmTruncateFloat32ToInt32(FloatRegister input, Register output,
|
|
bool isSaturating,
|
|
Label* oolEntry) PER_SHARED_ARCH;
|
|
void oolWasmTruncateCheckF32ToI32(FloatRegister input, Register output,
|
|
TruncFlags flags, wasm::BytecodeOffset off,
|
|
Label* rejoin)
|
|
- DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
|
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64);
|
|
|
|
// The truncate-to-int64 methods will always bind the `oolRejoin` label
|
|
// after the last emitted instruction.
|
|
void wasmTruncateDoubleToInt64(FloatRegister input, Register64 output,
|
|
bool isSaturating, Label* oolEntry,
|
|
Label* oolRejoin, FloatRegister tempDouble)
|
|
- DEFINED_ON(arm64, x86, x64, mips64);
|
|
+ DEFINED_ON(arm64, x86, x64, mips64, ppc64);
|
|
void wasmTruncateDoubleToUInt64(FloatRegister input, Register64 output,
|
|
bool isSaturating, Label* oolEntry,
|
|
Label* oolRejoin, FloatRegister tempDouble)
|
|
- DEFINED_ON(arm64, x86, x64, mips64);
|
|
+ DEFINED_ON(arm64, x86, x64, mips64, ppc64);
|
|
void oolWasmTruncateCheckF64ToI64(FloatRegister input, Register64 output,
|
|
TruncFlags flags, wasm::BytecodeOffset off,
|
|
Label* rejoin)
|
|
- DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
|
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64);
|
|
|
|
void wasmTruncateFloat32ToInt64(FloatRegister input, Register64 output,
|
|
bool isSaturating, Label* oolEntry,
|
|
Label* oolRejoin, FloatRegister tempDouble)
|
|
- DEFINED_ON(arm64, x86, x64, mips64);
|
|
+ DEFINED_ON(arm64, x86, x64, mips64, ppc64);
|
|
void wasmTruncateFloat32ToUInt64(FloatRegister input, Register64 output,
|
|
bool isSaturating, Label* oolEntry,
|
|
Label* oolRejoin, FloatRegister tempDouble)
|
|
- DEFINED_ON(arm64, x86, x64, mips64);
|
|
+ DEFINED_ON(arm64, x86, x64, mips64, ppc64);
|
|
void oolWasmTruncateCheckF32ToI64(FloatRegister input, Register64 output,
|
|
TruncFlags flags, wasm::BytecodeOffset off,
|
|
Label* rejoin)
|
|
- DEFINED_ON(arm, arm64, x86_shared, mips_shared);
|
|
+ DEFINED_ON(arm, arm64, x86_shared, mips_shared, ppc64);
|
|
|
|
// This function takes care of loading the callee's TLS and pinned regs but
|
|
// it is the caller's responsibility to save/restore TLS or pinned regs.
|
|
CodeOffset wasmCallImport(const wasm::CallSiteDesc& desc,
|
|
const wasm::CalleeDesc& callee);
|
|
|
|
// WasmTableCallIndexReg must contain the index of the indirect call.
|
|
CodeOffset wasmCallIndirect(const wasm::CallSiteDesc& desc,
|
|
@@ -3735,72 +3742,72 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
const BaseIndex& mem, Register expected,
|
|
Register replacement, Register output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void compareExchange(Scalar::Type type, const Synchronization& sync,
|
|
const Address& mem, Register expected,
|
|
Register replacement, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void compareExchange(Scalar::Type type, const Synchronization& sync,
|
|
const BaseIndex& mem, Register expected,
|
|
Register replacement, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
// x86: `expected` and `output` must be edx:eax; `replacement` is ecx:ebx.
|
|
// x64: `output` must be rax.
|
|
// ARM: Registers must be distinct; `replacement` and `output` must be
|
|
// (even,odd) pairs.
|
|
|
|
void compareExchange64(const Synchronization& sync, const Address& mem,
|
|
Register64 expected, Register64 replacement,
|
|
Register64 output)
|
|
- DEFINED_ON(arm, arm64, x64, x86, mips64);
|
|
+ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64);
|
|
|
|
void compareExchange64(const Synchronization& sync, const BaseIndex& mem,
|
|
Register64 expected, Register64 replacement,
|
|
Register64 output)
|
|
- DEFINED_ON(arm, arm64, x64, x86, mips64);
|
|
+ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64);
|
|
|
|
// Exchange with memory. Return the value initially in memory.
|
|
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
|
|
// and 16-bit wide operations.
|
|
|
|
void atomicExchange(Scalar::Type type, const Synchronization& sync,
|
|
const Address& mem, Register value, Register output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void atomicExchange(Scalar::Type type, const Synchronization& sync,
|
|
const BaseIndex& mem, Register value, Register output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void atomicExchange(Scalar::Type type, const Synchronization& sync,
|
|
const Address& mem, Register value, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicExchange(Scalar::Type type, const Synchronization& sync,
|
|
const BaseIndex& mem, Register value, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
// x86: `value` must be ecx:ebx; `output` must be edx:eax.
|
|
// ARM: `value` and `output` must be distinct and (even,odd) pairs.
|
|
// ARM64: `value` and `output` must be distinct.
|
|
|
|
void atomicExchange64(const Synchronization& sync, const Address& mem,
|
|
Register64 value, Register64 output)
|
|
- DEFINED_ON(arm, arm64, x64, x86, mips64);
|
|
+ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64);
|
|
|
|
void atomicExchange64(const Synchronization& sync, const BaseIndex& mem,
|
|
Register64 value, Register64 output)
|
|
- DEFINED_ON(arm, arm64, x64, x86, mips64);
|
|
+ DEFINED_ON(arm, arm64, x64, x86, mips64, ppc64);
|
|
|
|
// Read-modify-write with memory. Return the value in memory before the
|
|
// operation.
|
|
//
|
|
// x86-shared:
|
|
// For 8-bit operations, `value` and `output` must have a byte subregister.
|
|
// For Add and Sub, `temp` must be invalid.
|
|
// For And, Or, and Xor, `output` must be eax and `temp` must have a byte
|
|
@@ -3826,44 +3833,44 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
|
|
AtomicOp op, Imm32 value, const BaseIndex& mem,
|
|
Register temp, Register output) DEFINED_ON(x86_shared);
|
|
|
|
void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
|
|
AtomicOp op, Register value, const Address& mem,
|
|
Register valueTemp, Register offsetTemp, Register maskTemp,
|
|
- Register output) DEFINED_ON(mips_shared);
|
|
+ Register output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicFetchOp(Scalar::Type type, const Synchronization& sync,
|
|
AtomicOp op, Register value, const BaseIndex& mem,
|
|
Register valueTemp, Register offsetTemp, Register maskTemp,
|
|
- Register output) DEFINED_ON(mips_shared);
|
|
+ Register output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
// x86:
|
|
// `temp` must be ecx:ebx; `output` must be edx:eax.
|
|
// x64:
|
|
// For Add and Sub, `temp` is ignored.
|
|
// For And, Or, and Xor, `output` must be rax.
|
|
// ARM:
|
|
// `temp` and `output` must be (even,odd) pairs and distinct from `value`.
|
|
// ARM64:
|
|
// Registers `value`, `temp`, and `output` must all differ.
|
|
|
|
void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
|
|
Register64 value, const Address& mem, Register64 temp,
|
|
- Register64 output) DEFINED_ON(arm, arm64, x64, mips64);
|
|
+ Register64 output) DEFINED_ON(arm, arm64, x64, mips64, ppc64);
|
|
|
|
void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
|
|
const Address& value, const Address& mem,
|
|
Register64 temp, Register64 output) DEFINED_ON(x86);
|
|
|
|
void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
|
|
Register64 value, const BaseIndex& mem, Register64 temp,
|
|
- Register64 output) DEFINED_ON(arm, arm64, x64, mips64);
|
|
+ Register64 output) DEFINED_ON(arm, arm64, x64, mips64, ppc64);
|
|
|
|
void atomicFetchOp64(const Synchronization& sync, AtomicOp op,
|
|
const Address& value, const BaseIndex& mem,
|
|
Register64 temp, Register64 output) DEFINED_ON(x86);
|
|
|
|
// x64:
|
|
// `value` can be any register.
|
|
// ARM:
|
|
@@ -3871,24 +3878,24 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// ARM64:
|
|
// Registers `value` and `temp` must differ.
|
|
|
|
void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
|
|
Register64 value, const Address& mem) DEFINED_ON(x64);
|
|
|
|
void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
|
|
Register64 value, const Address& mem, Register64 temp)
|
|
- DEFINED_ON(arm, arm64, mips64);
|
|
+ DEFINED_ON(arm, arm64, mips64, ppc64);
|
|
|
|
void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
|
|
Register64 value, const BaseIndex& mem) DEFINED_ON(x64);
|
|
|
|
void atomicEffectOp64(const Synchronization& sync, AtomicOp op,
|
|
Register64 value, const BaseIndex& mem, Register64 temp)
|
|
- DEFINED_ON(arm, arm64, mips64);
|
|
+ DEFINED_ON(arm, arm64, mips64, ppc64);
|
|
|
|
// 64-bit atomic load. On 64-bit systems, use regular load with
|
|
// Synchronization::Load, not this method.
|
|
//
|
|
// x86: `temp` must be ecx:ebx; `output` must be edx:eax.
|
|
// ARM: `output` must be (even,odd) pair.
|
|
|
|
void atomicLoad64(const Synchronization& sync, const Address& mem,
|
|
@@ -3930,43 +3937,43 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
const BaseIndex& mem, Register expected,
|
|
Register replacement, Register output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
|
|
const Address& mem, Register expected,
|
|
Register replacement, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp,
|
|
- Register output) DEFINED_ON(mips_shared);
|
|
+ Register output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void wasmCompareExchange(const wasm::MemoryAccessDesc& access,
|
|
const BaseIndex& mem, Register expected,
|
|
Register replacement, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp,
|
|
- Register output) DEFINED_ON(mips_shared);
|
|
+ Register output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
|
|
const Address& mem, Register value, Register output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
|
|
const BaseIndex& mem, Register value, Register output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
|
|
const Address& mem, Register value,
|
|
Register valueTemp, Register offsetTemp,
|
|
Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void wasmAtomicExchange(const wasm::MemoryAccessDesc& access,
|
|
const BaseIndex& mem, Register value,
|
|
Register valueTemp, Register offsetTemp,
|
|
Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register value, const Address& mem, Register temp,
|
|
Register output) DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Imm32 value, const Address& mem, Register temp,
|
|
Register output) DEFINED_ON(x86_shared);
|
|
@@ -3977,23 +3984,23 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Imm32 value, const BaseIndex& mem, Register temp,
|
|
Register output) DEFINED_ON(x86_shared);
|
|
|
|
void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register value, const Address& mem, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp,
|
|
- Register output) DEFINED_ON(mips_shared);
|
|
+ Register output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void wasmAtomicFetchOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register value, const BaseIndex& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
Register maskTemp, Register output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
// Read-modify-write with memory. Return no value.
|
|
//
|
|
// MIPS: `valueTemp`, `offsetTemp` and `maskTemp` must be defined for 8-bit
|
|
// and 16-bit wide operations.
|
|
|
|
void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register value, const Address& mem, Register temp)
|
|
@@ -4009,22 +4016,22 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Imm32 value, const BaseIndex& mem, Register temp)
|
|
DEFINED_ON(x86_shared);
|
|
|
|
void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register value, const Address& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
- Register maskTemp) DEFINED_ON(mips_shared);
|
|
+ Register maskTemp) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void wasmAtomicEffectOp(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register value, const BaseIndex& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
- Register maskTemp) DEFINED_ON(mips_shared);
|
|
+ Register maskTemp) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
// 64-bit wide operations.
|
|
|
|
// 64-bit atomic load. On 64-bit systems, use regular wasm load with
|
|
// Synchronization::Load, not this method.
|
|
//
|
|
// x86: `temp` must be ecx:ebx; `output` must be edx:eax.
|
|
// ARM: `temp` should be invalid; `output` must be (even,odd) pair.
|
|
@@ -4074,22 +4081,22 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// ARM: Registers must be distinct; `temp` and `output` must be (even,odd)
|
|
// pairs.
|
|
// MIPS: Registers must be distinct.
|
|
// MIPS32: `temp` should be invalid.
|
|
|
|
void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register64 value, const Address& mem,
|
|
Register64 temp, Register64 output)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x64);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x64, ppc64);
|
|
|
|
void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
Register64 value, const BaseIndex& mem,
|
|
Register64 temp, Register64 output)
|
|
- DEFINED_ON(arm, arm64, mips32, mips64, x64);
|
|
+ DEFINED_ON(arm, arm64, mips32, mips64, x64, ppc64);
|
|
|
|
void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
const Address& value, const Address& mem,
|
|
Register64 temp, Register64 output) DEFINED_ON(x86);
|
|
|
|
void wasmAtomicFetchOp64(const wasm::MemoryAccessDesc& access, AtomicOp op,
|
|
const Address& value, const BaseIndex& mem,
|
|
Register64 temp, Register64 output) DEFINED_ON(x86);
|
|
@@ -4131,42 +4138,42 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
const BaseIndex& mem, Register expected,
|
|
Register replacement, Register temp,
|
|
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
const Address& mem, Register expected,
|
|
Register replacement, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register temp,
|
|
- AnyRegister output) DEFINED_ON(mips_shared);
|
|
+ AnyRegister output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void compareExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
const BaseIndex& mem, Register expected,
|
|
Register replacement, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register temp,
|
|
- AnyRegister output) DEFINED_ON(mips_shared);
|
|
+ AnyRegister output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
const Address& mem, Register value, Register temp,
|
|
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
const BaseIndex& mem, Register value, Register temp,
|
|
AnyRegister output) DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
const Address& mem, Register value, Register valueTemp,
|
|
Register offsetTemp, Register maskTemp, Register temp,
|
|
- AnyRegister output) DEFINED_ON(mips_shared);
|
|
+ AnyRegister output) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicExchangeJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
const BaseIndex& mem, Register value,
|
|
Register valueTemp, Register offsetTemp,
|
|
Register maskTemp, Register temp, AnyRegister output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const Address& mem,
|
|
Register temp1, Register temp2, AnyRegister output)
|
|
DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const BaseIndex& mem,
|
|
@@ -4182,23 +4189,23 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
AtomicOp op, Imm32 value, const BaseIndex& mem,
|
|
Register temp1, Register temp2, AnyRegister output)
|
|
DEFINED_ON(x86_shared);
|
|
|
|
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const Address& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
Register maskTemp, Register temp, AnyRegister output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicFetchOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const BaseIndex& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
Register maskTemp, Register temp, AnyRegister output)
|
|
- DEFINED_ON(mips_shared);
|
|
+ DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const Address& mem,
|
|
Register temp) DEFINED_ON(arm, arm64, x86_shared);
|
|
|
|
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const BaseIndex& mem,
|
|
Register temp) DEFINED_ON(arm, arm64, x86_shared);
|
|
@@ -4209,22 +4216,22 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
|
|
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Imm32 value, const BaseIndex& mem,
|
|
Register temp) DEFINED_ON(x86_shared);
|
|
|
|
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const Address& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
- Register maskTemp) DEFINED_ON(mips_shared);
|
|
+ Register maskTemp) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicEffectOpJS(Scalar::Type arrayType, const Synchronization& sync,
|
|
AtomicOp op, Register value, const BaseIndex& mem,
|
|
Register valueTemp, Register offsetTemp,
|
|
- Register maskTemp) DEFINED_ON(mips_shared);
|
|
+ Register maskTemp) DEFINED_ON(mips_shared, ppc64);
|
|
|
|
void atomicIsLockFreeJS(Register value, Register output);
|
|
|
|
// ========================================================================
|
|
// Spectre Mitigations.
|
|
//
|
|
// Spectre attacks are side-channel attacks based on cache pollution or
|
|
// slow-execution of some instructions. We have multiple spectre mitigations
|
|
@@ -4803,17 +4810,17 @@ class MacroAssembler : public MacroAssemblerSpecific {
|
|
// StackPointer manipulation functions.
|
|
// On ARM64, the StackPointer is implemented as two synchronized registers.
|
|
// Code shared across platforms must use these functions to be valid.
|
|
template <typename T>
|
|
inline void addToStackPtr(T t);
|
|
template <typename T>
|
|
inline void addStackPtrTo(T t);
|
|
|
|
- void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64);
|
|
+ void subFromStackPtr(Imm32 imm32) DEFINED_ON(mips32, mips64, arm, x86, x64, ppc64);
|
|
void subFromStackPtr(Register reg);
|
|
|
|
template <typename T>
|
|
void subStackPtrFrom(T t) {
|
|
subPtr(getStackPointer(), t);
|
|
}
|
|
|
|
template <typename T>
|
|
diff --git a/js/src/jit/MoveEmitter.h b/js/src/jit/MoveEmitter.h
|
|
index 6c62c0561a..30ee4b61a5 100644
|
|
--- a/js/src/jit/MoveEmitter.h
|
|
+++ b/js/src/jit/MoveEmitter.h
|
|
@@ -12,15 +12,17 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/MoveEmitter-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/MoveEmitter-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/MoveEmitter-mips32.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/MoveEmitter-mips64.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/MoveEmitter-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/MoveEmitter-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
#endif /* jit_MoveEmitter_h */
|
|
diff --git a/js/src/jit/Registers.h b/js/src/jit/Registers.h
|
|
index 67c8661004..ef49df83e5 100644
|
|
--- a/js/src/jit/Registers.h
|
|
+++ b/js/src/jit/Registers.h
|
|
@@ -15,16 +15,18 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/Architecture-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/Architecture-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/Architecture-mips32.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/Architecture-mips64.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/Architecture-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/Architecture-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
namespace js {
|
|
namespace jit {
|
|
diff --git a/js/src/jit/SharedICHelpers-inl.h b/js/src/jit/SharedICHelpers-inl.h
|
|
index 901c80cdd8..fd4a27d8bb 100644
|
|
--- a/js/src/jit/SharedICHelpers-inl.h
|
|
+++ b/js/src/jit/SharedICHelpers-inl.h
|
|
@@ -12,16 +12,18 @@
|
|
#elif defined(JS_CODEGEN_X64)
|
|
# include "jit/x64/SharedICHelpers-x64-inl.h"
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/SharedICHelpers-arm-inl.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/SharedICHelpers-arm64-inl.h"
|
|
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips-shared/SharedICHelpers-mips-shared-inl.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/SharedICHelpers-ppc64-inl.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/SharedICHelpers-none-inl.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
namespace js {
|
|
namespace jit {} // namespace jit
|
|
diff --git a/js/src/jit/SharedICHelpers.h b/js/src/jit/SharedICHelpers.h
|
|
index 563cae3ccf..737ca1d5a5 100644
|
|
--- a/js/src/jit/SharedICHelpers.h
|
|
+++ b/js/src/jit/SharedICHelpers.h
|
|
@@ -12,16 +12,18 @@
|
|
#elif defined(JS_CODEGEN_X64)
|
|
# include "jit/x64/SharedICHelpers-x64.h"
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/SharedICHelpers-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/SharedICHelpers-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips-shared/SharedICHelpers-mips-shared.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/SharedICHelpers-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/SharedICHelpers-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
namespace js {
|
|
namespace jit {} // namespace jit
|
|
diff --git a/js/src/jit/SharedICRegisters.h b/js/src/jit/SharedICRegisters.h
|
|
index c87e5f8408..76239d5dde 100644
|
|
--- a/js/src/jit/SharedICRegisters.h
|
|
+++ b/js/src/jit/SharedICRegisters.h
|
|
@@ -14,16 +14,18 @@
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
# include "jit/arm/SharedICRegisters-arm.h"
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
# include "jit/arm64/SharedICRegisters-arm64.h"
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips32/SharedICRegisters-mips32.h"
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips64/SharedICRegisters-mips64.h"
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/SharedICRegisters-ppc64.h"
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
# include "jit/none/SharedICRegisters-none.h"
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
namespace js {
|
|
namespace jit {} // namespace jit
|
|
diff --git a/js/src/jit/moz.build b/js/src/jit/moz.build
|
|
index f50d86fc44..82cddd07af 100644
|
|
--- a/js/src/jit/moz.build
|
|
+++ b/js/src/jit/moz.build
|
|
@@ -227,17 +227,29 @@ elif CONFIG["JS_CODEGEN_MIPS32"] or CONFIG["JS_CODEGEN_MIPS64"]:
|
|
"mips64/CodeGenerator-mips64.cpp",
|
|
"mips64/Lowering-mips64.cpp",
|
|
"mips64/MacroAssembler-mips64.cpp",
|
|
"mips64/MoveEmitter-mips64.cpp",
|
|
"mips64/Trampoline-mips64.cpp",
|
|
]
|
|
if CONFIG["JS_SIMULATOR_MIPS64"]:
|
|
UNIFIED_SOURCES += ["mips64/Simulator-mips64.cpp"]
|
|
-
|
|
+elif CONFIG["JS_CODEGEN_PPC64"]:
|
|
+ lir_inputs += ["ppc64/LIR-ppc64.h"]
|
|
+ UNIFIED_SOURCES += [
|
|
+ "ppc64/Architecture-ppc64.cpp",
|
|
+ "ppc64/Assembler-ppc64.cpp",
|
|
+ "ppc64/Bailouts-ppc64.cpp",
|
|
+ "ppc64/CodeGenerator-ppc64.cpp",
|
|
+ "ppc64/Lowering-ppc64.cpp",
|
|
+ "ppc64/MacroAssembler-ppc64.cpp",
|
|
+ "ppc64/MoveEmitter-ppc64.cpp",
|
|
+ "ppc64/Trampoline-ppc64.cpp",
|
|
+ "shared/AtomicOperations-shared-jit.cpp",
|
|
+ ]
|
|
|
|
# Generate jit/MIROpsGenerated.h from jit/MIROps.yaml
|
|
GeneratedFile(
|
|
"MIROpsGenerated.h",
|
|
script="GenerateMIRFiles.py",
|
|
entry_point="generate_mir_header",
|
|
inputs=["MIROps.yaml"],
|
|
)
|
|
diff --git a/js/src/jit/shared/Assembler-shared.h b/js/src/jit/shared/Assembler-shared.h
|
|
index dfb2bcb6b8..69ba759d42 100644
|
|
--- a/js/src/jit/shared/Assembler-shared.h
|
|
+++ b/js/src/jit/shared/Assembler-shared.h
|
|
@@ -20,23 +20,24 @@
|
|
#include "jit/Registers.h"
|
|
#include "jit/RegisterSets.h"
|
|
#include "js/ScalarType.h" // js::Scalar::Type
|
|
#include "vm/HelperThreads.h"
|
|
#include "vm/NativeObject.h"
|
|
#include "wasm/WasmTypes.h"
|
|
|
|
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
|
|
- defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
// Push return addresses callee-side.
|
|
# define JS_USE_LINK_REGISTER
|
|
#endif
|
|
|
|
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
- defined(JS_CODEGEN_ARM64)
|
|
+ defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_PPC64)
|
|
// JS_CODELABEL_LINKMODE gives labels additional metadata
|
|
// describing how Bind() should patch them.
|
|
# define JS_CODELABEL_LINKMODE
|
|
#endif
|
|
|
|
namespace js {
|
|
namespace jit {
|
|
|
|
diff --git a/js/src/jit/shared/AtomicOperations-shared-jit.cpp b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
|
|
index 79463f118b..7c8eeaf89e 100644
|
|
--- a/js/src/jit/shared/AtomicOperations-shared-jit.cpp
|
|
+++ b/js/src/jit/shared/AtomicOperations-shared-jit.cpp
|
|
@@ -133,16 +133,38 @@ static constexpr Register AtomicTemp = edx;
|
|
// 64-bit registers for cmpxchg8b. ValReg/Val2Reg/Temp are not used in this
|
|
// case.
|
|
|
|
static constexpr Register64 AtomicValReg64(edx, eax);
|
|
static constexpr Register64 AtomicVal2Reg64(ecx, ebx);
|
|
|
|
// AtomicReturnReg64 is unused on x86.
|
|
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+
|
|
+// Selected registers match the argument registers, except that the Ptr is not
|
|
+// in IntArgReg0 so as not to conflict with the result register.
|
|
+
|
|
+static const LiveRegisterSet AtomicNonVolatileRegs;
|
|
+
|
|
+static constexpr Register AtomicPtrReg = IntArgReg4;
|
|
+static constexpr Register AtomicPtr2Reg = IntArgReg1;
|
|
+static constexpr Register AtomicValReg = IntArgReg1;
|
|
+static constexpr Register64 AtomicValReg64(IntArgReg1);
|
|
+static constexpr Register AtomicVal2Reg = IntArgReg2;
|
|
+static constexpr Register64 AtomicVal2Reg64(IntArgReg2);
|
|
+static constexpr Register AtomicTemp = IntArgReg3;
|
|
+static constexpr Register AtomicTemp2 = IntArgReg5;
|
|
+static constexpr Register AtomicTemp3 = IntArgReg6;
|
|
+static constexpr Register64 AtomicTemp64(IntArgReg3);
|
|
+static constexpr Register64 AtomicTemp64_2(IntArgReg5);
|
|
+static constexpr Register64 AtomicTemp64_3(IntArgReg6);
|
|
+
|
|
+static constexpr Register64 AtomicReturnReg64 = ReturnReg64;
|
|
+
|
|
#else
|
|
# error "Unsupported platform"
|
|
#endif
|
|
|
|
// These are useful shorthands and hide the meaningless uint/int distinction.
|
|
|
|
static constexpr Scalar::Type SIZE8 = Scalar::Uint8;
|
|
static constexpr Scalar::Type SIZE16 = Scalar::Uint16;
|
|
@@ -248,31 +270,37 @@ static uint32_t GenPrologue(MacroAssembler& masm, ArgIterator* iter) {
|
|
uint32_t start = masm.currentOffset();
|
|
masm.PushRegsInMask(AtomicNonVolatileRegs);
|
|
#if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
|
// The return address is among the nonvolatile registers, if pushed at all.
|
|
iter->argBase = masm.framePushed();
|
|
#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
// The return address is pushed separately.
|
|
iter->argBase = sizeof(void*) + masm.framePushed();
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+// XXX
|
|
+ // The return address is in LR (an SPR); it's not (probably) on the stack.
|
|
+ iter->argBase = masm.framePushed();
|
|
#else
|
|
# error "Unsupported platform"
|
|
#endif
|
|
return start;
|
|
}
|
|
|
|
static void GenEpilogue(MacroAssembler& masm) {
|
|
masm.PopRegsInMask(AtomicNonVolatileRegs);
|
|
MOZ_ASSERT(masm.framePushed() == 0);
|
|
#if defined(JS_CODEGEN_ARM64)
|
|
masm.Ret();
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
masm.mov(lr, pc);
|
|
#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
masm.ret();
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ masm.as_blr();
|
|
#endif
|
|
}
|
|
|
|
#ifndef JS_64BIT
|
|
static uint32_t GenNop(MacroAssembler& masm) {
|
|
ArgIterator iter;
|
|
uint32_t start = GenPrologue(masm, &iter);
|
|
GenEpilogue(masm);
|
|
@@ -414,21 +442,31 @@ static uint32_t GenCmpxchg(MacroAssembler& masm, Scalar::Type size,
|
|
ArgIterator iter;
|
|
uint32_t start = GenPrologue(masm, &iter);
|
|
GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
|
|
|
|
Address addr(AtomicPtrReg, 0);
|
|
switch (size) {
|
|
case SIZE8:
|
|
case SIZE16:
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg,
|
|
+ AtomicTemp, AtomicTemp2, AtomicTemp3, ReturnReg);
|
|
+ break;
|
|
+#endif
|
|
case SIZE32:
|
|
GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
|
|
GenGprArg(masm, MIRType::Int32, &iter, AtomicVal2Reg);
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg,
|
|
+ InvalidReg, InvalidReg, InvalidReg, ReturnReg);
|
|
+#else
|
|
masm.compareExchange(size, sync, addr, AtomicValReg, AtomicVal2Reg,
|
|
ReturnReg);
|
|
+#endif
|
|
break;
|
|
case SIZE64:
|
|
GenGpr64Arg(masm, &iter, AtomicValReg64);
|
|
GenGpr64Arg(masm, &iter, AtomicVal2Reg64);
|
|
#if defined(JS_CODEGEN_X86)
|
|
static_assert(AtomicValReg64 == Register64(edx, eax));
|
|
static_assert(AtomicVal2Reg64 == Register64(ecx, ebx));
|
|
|
|
@@ -453,19 +491,29 @@ static uint32_t GenExchange(MacroAssembler& masm, Scalar::Type size,
|
|
ArgIterator iter;
|
|
uint32_t start = GenPrologue(masm, &iter);
|
|
GenGprArg(masm, MIRType::Pointer, &iter, AtomicPtrReg);
|
|
|
|
Address addr(AtomicPtrReg, 0);
|
|
switch (size) {
|
|
case SIZE8:
|
|
case SIZE16:
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ masm.atomicExchange(size, sync, addr, AtomicValReg,
|
|
+ AtomicTemp, AtomicTemp2, AtomicTemp3, ReturnReg);
|
|
+ break;
|
|
+#endif
|
|
case SIZE32:
|
|
GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ masm.atomicExchange(size, sync, addr, AtomicValReg,
|
|
+ InvalidReg, InvalidReg, InvalidReg, ReturnReg);
|
|
+#else
|
|
masm.atomicExchange(size, sync, addr, AtomicValReg, ReturnReg);
|
|
+#endif
|
|
break;
|
|
case SIZE64:
|
|
#if defined(JS_64BIT)
|
|
GenGpr64Arg(masm, &iter, AtomicValReg64);
|
|
masm.atomicExchange64(sync, addr, AtomicValReg64, AtomicReturnReg64);
|
|
break;
|
|
#else
|
|
MOZ_CRASH("64-bit atomic exchange not available on this platform");
|
|
@@ -492,17 +540,22 @@ static uint32_t GenFetchOp(MacroAssembler& masm, Scalar::Type size, AtomicOp op,
|
|
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
Register tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp
|
|
? Register::Invalid()
|
|
: AtomicTemp;
|
|
#else
|
|
Register tmp = AtomicTemp;
|
|
#endif
|
|
GenGprArg(masm, MIRType::Int32, &iter, AtomicValReg);
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp, AtomicTemp2,
|
|
+ AtomicTemp3, ReturnReg);
|
|
+#else
|
|
masm.atomicFetchOp(size, sync, op, AtomicValReg, addr, tmp, ReturnReg);
|
|
+#endif
|
|
break;
|
|
}
|
|
case SIZE64: {
|
|
#if defined(JS_64BIT)
|
|
# if defined(JS_CODEGEN_X64)
|
|
Register64 tmp = op == AtomicFetchAddOp || op == AtomicFetchSubOp
|
|
? Register64::Invalid()
|
|
: AtomicTemp64;
|
|
@@ -636,16 +689,19 @@ static bool UnalignedAccessesAreOK() {
|
|
#endif
|
|
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
return true;
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
return !HasAlignmentFault();
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
// This is not necessarily true but it's the best guess right now.
|
|
return true;
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ // We'd sure like to avoid it, even though it works.
|
|
+ return false;
|
|
#else
|
|
# error "Unsupported platform"
|
|
#endif
|
|
}
|
|
|
|
void AtomicMemcpyDownUnsynchronized(uint8_t* dest, const uint8_t* src,
|
|
size_t nbytes) {
|
|
const uint8_t* lim = src + nbytes;
|
|
diff --git a/js/src/jsapi-tests/testJitABIcalls.cpp b/js/src/jsapi-tests/testJitABIcalls.cpp
|
|
index 02b67da3ca..bd45389b21 100644
|
|
--- a/js/src/jsapi-tests/testJitABIcalls.cpp
|
|
+++ b/js/src/jsapi-tests/testJitABIcalls.cpp
|
|
@@ -653,16 +653,19 @@ class JitABICall final : public JSAPITest, public DefineCheckArgs<Sig> {
|
|
Register base = r8;
|
|
regs.take(base);
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
Register base = t1;
|
|
regs.take(base);
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
Register base = t1;
|
|
regs.take(base);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ Register base = r0;
|
|
+ regs.take(base);
|
|
#else
|
|
# error "Unknown architecture!"
|
|
#endif
|
|
|
|
Register setup = regs.takeAny();
|
|
|
|
this->generateCalls(masm, base, setup);
|
|
|
|
diff --git a/js/src/jsapi-tests/testsJit.cpp b/js/src/jsapi-tests/testsJit.cpp
|
|
index 069eef43fe..705609df2c 100644
|
|
--- a/js/src/jsapi-tests/testsJit.cpp
|
|
+++ b/js/src/jsapi-tests/testsJit.cpp
|
|
@@ -20,16 +20,21 @@ void PrepareJit(js::jit::MacroAssembler& masm) {
|
|
#endif
|
|
AllocatableRegisterSet regs(RegisterSet::All());
|
|
LiveRegisterSet save(regs.asLiveSet());
|
|
#if defined(JS_CODEGEN_ARM)
|
|
save.add(js::jit::d15);
|
|
#endif
|
|
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
save.add(js::jit::ra);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ // XXX
|
|
+ // Push the link register separately, since it's not a GPR.
|
|
+ masm.xs_mflr(ScratchRegister);
|
|
+ masm.as_stdu(ScratchRegister, StackPointer, -8);
|
|
#elif defined(JS_USE_LINK_REGISTER)
|
|
save.add(js::jit::lr);
|
|
#endif
|
|
masm.PushRegsInMask(save);
|
|
}
|
|
|
|
// Generate the exit path of the JIT code, which restores every register. Then,
|
|
// make it executable and run it.
|
|
@@ -37,26 +42,35 @@ bool ExecuteJit(JSContext* cx, js::jit::MacroAssembler& masm) {
|
|
using namespace js::jit;
|
|
AllocatableRegisterSet regs(RegisterSet::All());
|
|
LiveRegisterSet save(regs.asLiveSet());
|
|
#if defined(JS_CODEGEN_ARM)
|
|
save.add(js::jit::d15);
|
|
#endif
|
|
#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
save.add(js::jit::ra);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ // We pop after loading the regs.
|
|
#elif defined(JS_USE_LINK_REGISTER)
|
|
save.add(js::jit::lr);
|
|
#endif
|
|
masm.PopRegsInMask(save);
|
|
#if defined(JS_CODEGEN_ARM64)
|
|
// Return using the value popped into x30.
|
|
masm.abiret();
|
|
|
|
// Reset stack pointer.
|
|
masm.SetStackPointer64(PseudoStackPointer64);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ // XXX
|
|
+ // Pop LR and exit.
|
|
+ masm.as_ld(ScratchRegister, StackPointer, 0);
|
|
+ masm.xs_mtlr(ScratchRegister);
|
|
+ masm.as_addi(StackPointer, StackPointer, 8);
|
|
+ masm.as_blr();
|
|
#else
|
|
// Exit the JIT-ed code using the ABI return style.
|
|
masm.abiret();
|
|
#endif
|
|
|
|
if (masm.oom()) {
|
|
return false;
|
|
}
|
|
diff --git a/js/src/util/Poison.h b/js/src/util/Poison.h
|
|
index 8356ca1f00..5eeb111cf8 100644
|
|
--- a/js/src/util/Poison.h
|
|
+++ b/js/src/util/Poison.h
|
|
@@ -88,16 +88,18 @@ const uint8_t JS_SCOPE_DATA_TRAILING_NAMES_PATTERN = 0xCC;
|
|
*/
|
|
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64) || \
|
|
defined(JS_CODEGEN_NONE)
|
|
# define JS_SWEPT_CODE_PATTERN 0xED // IN instruction, crashes in user mode.
|
|
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
|
# define JS_SWEPT_CODE_PATTERN 0xA3 // undefined instruction
|
|
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
# define JS_SWEPT_CODE_PATTERN 0x01 // undefined instruction
|
|
+#elif defined(JS_CODEGEN_PPC64) || defined(JS_CODEGEN_PPC)
|
|
+# define JS_SWEPT_CODE_PATTERN 0x00 // architecturally defined as illegal
|
|
#else
|
|
# error "JS_SWEPT_CODE_PATTERN not defined for this platform"
|
|
#endif
|
|
|
|
enum class MemCheckKind : uint8_t {
|
|
// Marks a region as poisoned. Memory sanitizers like ASan will crash when
|
|
// accessing it (both reads and writes).
|
|
MakeNoAccess,
|
|
diff --git a/js/src/wasm/WasmBaselineCompile.cpp b/js/src/wasm/WasmBaselineCompile.cpp
|
|
index 156f3cbbba..ab29f44713 100644
|
|
--- a/js/src/wasm/WasmBaselineCompile.cpp
|
|
+++ b/js/src/wasm/WasmBaselineCompile.cpp
|
|
@@ -138,16 +138,19 @@
|
|
#if defined(JS_CODEGEN_MIPS32)
|
|
# include "jit/mips-shared/Assembler-mips-shared.h"
|
|
# include "jit/mips32/Assembler-mips32.h"
|
|
#endif
|
|
#if defined(JS_CODEGEN_MIPS64)
|
|
# include "jit/mips-shared/Assembler-mips-shared.h"
|
|
# include "jit/mips64/Assembler-mips64.h"
|
|
#endif
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+# include "jit/ppc64/Assembler-ppc64.h"
|
|
+#endif
|
|
#include "js/ScalarType.h" // js::Scalar::Type
|
|
#include "util/Memory.h"
|
|
#include "wasm/TypedObject.h"
|
|
#include "wasm/WasmGC.h"
|
|
#include "wasm/WasmGenerator.h"
|
|
#include "wasm/WasmInstance.h"
|
|
#include "wasm/WasmOpIter.h"
|
|
#include "wasm/WasmSignalHandlers.h"
|
|
@@ -288,16 +291,23 @@ static constexpr Register RabaldrScratchI32 = CallTempReg2;
|
|
#endif
|
|
|
|
#ifdef RABALDR_SCRATCH_F32_ALIASES_F64
|
|
# if !defined(RABALDR_SCRATCH_F32) || !defined(RABALDR_SCRATCH_F64)
|
|
# error "Bad configuration"
|
|
# endif
|
|
#endif
|
|
|
|
+#ifdef JS_CODEGEN_PPC64
|
|
+# define RABALDR_SCRATCH_I32
|
|
+// We can use all the argregs up, and we don't want the JIT using our own
|
|
+// private scratch registers, so this is the best option of what's left.
|
|
+static constexpr Register RabaldrScratchI32 = r19;
|
|
+#endif
|
|
+
|
|
template <MIRType t>
|
|
struct RegTypeOf {
|
|
#ifdef ENABLE_WASM_SIMD
|
|
static_assert(t == MIRType::Float32 || t == MIRType::Double ||
|
|
t == MIRType::Simd128,
|
|
"Float mask type");
|
|
#else
|
|
static_assert(t == MIRType::Float32 || t == MIRType::Double,
|
|
@@ -550,16 +560,18 @@ struct SpecificRegs {};
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
struct SpecificRegs {
|
|
RegI64 abiReturnRegI64;
|
|
|
|
SpecificRegs() : abiReturnRegI64(ReturnReg64) {}
|
|
};
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
struct SpecificRegs {};
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+struct SpecificRegs {};
|
|
#else
|
|
struct SpecificRegs {
|
|
# ifndef JS_64BIT
|
|
RegI64 abiReturnRegI64;
|
|
# endif
|
|
|
|
SpecificRegs() { MOZ_CRASH("BaseCompiler porting interface: SpecificRegs"); }
|
|
};
|
|
@@ -6038,16 +6050,25 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
ABIArg argLoc = call->abi.next(MIRType::Int32);
|
|
if (argLoc.kind() == ABIArg::Stack) {
|
|
ScratchI32 scratch(*this);
|
|
loadI32(arg, scratch);
|
|
masm.store32(scratch, Address(masm.getStackPointer(),
|
|
argLoc.offsetFromArgBase()));
|
|
} else {
|
|
loadI32(arg, RegI32(argLoc.gpr()));
|
|
+#if JS_CODEGEN_PPC64
|
|
+ // If this is a call to compiled C++, we must ensure that the
|
|
+ // upper 32 bits are clear: addi can sign-extend, which yields
|
|
+ // difficult-to-diagnose bugs when the function expects a uint32_t
|
|
+ // but the register it gets has a residual 64-bit value.
|
|
+ if (call->usesSystemAbi) {
|
|
+ masm.as_rldicl(argLoc.gpr(), argLoc.gpr(), 0, 32);
|
|
+ }
|
|
+#endif
|
|
}
|
|
break;
|
|
}
|
|
case ValType::I64: {
|
|
ABIArg argLoc = call->abi.next(MIRType::Int64);
|
|
if (argLoc.kind() == ABIArg::Stack) {
|
|
ScratchI32 scratch(*this);
|
|
#ifdef JS_PUNBOX64
|
|
@@ -6324,17 +6345,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
|
|
// Compute the absolute table base pointer into `scratch`, offset by 8
|
|
// to account for the fact that ma_mov read PC+8.
|
|
masm.ma_sub(Imm32(offset + 8), scratch, arm_scratch);
|
|
|
|
// Jump indirect via table element.
|
|
masm.ma_ldr(DTRAddr(scratch, DtrRegImmShift(switchValue, LSL, 2)), pc,
|
|
Offset, Assembler::Always);
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
ScratchI32 scratch(*this);
|
|
CodeLabel tableCl;
|
|
|
|
masm.ma_li(scratch, &tableCl);
|
|
|
|
tableCl.target()->bind(theTable->offset());
|
|
masm.addCodeLabel(tableCl);
|
|
|
|
@@ -6493,16 +6515,22 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
# elif defined(JS_CODEGEN_ARM64)
|
|
ARMRegister sd(srcDest.reg, 64);
|
|
ARMRegister r(rhs.reg, 64);
|
|
if (isUnsigned) {
|
|
masm.Udiv(sd, sd, r);
|
|
} else {
|
|
masm.Sdiv(sd, sd, r);
|
|
}
|
|
+# elif defined(JS_CODEGEN_PPC64)
|
|
+ if (isUnsigned) {
|
|
+ masm.as_divdu(srcDest.reg, srcDest.reg, rhs.reg);
|
|
+ } else {
|
|
+ masm.as_divd(srcDest.reg, srcDest.reg, rhs.reg);
|
|
+ }
|
|
# else
|
|
MOZ_CRASH("BaseCompiler platform hook: quotientI64");
|
|
# endif
|
|
masm.bind(&done);
|
|
}
|
|
|
|
void remainderI64(RegI64 rhs, RegI64 srcDest, RegI64 reserved,
|
|
IsUnsigned isUnsigned, bool isConst, int64_t c) {
|
|
@@ -6544,29 +6572,46 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
ARMRegister t(temp, 64);
|
|
if (isUnsigned) {
|
|
masm.Udiv(t, sd, r);
|
|
} else {
|
|
masm.Sdiv(t, sd, r);
|
|
}
|
|
masm.Mul(t, t, r);
|
|
masm.Sub(sd, sd, t);
|
|
+# elif defined(JS_CODEGEN_PPC64)
|
|
+ if (js::jit::HasPPCISA3()) {
|
|
+ if (isUnsigned) {
|
|
+ masm.as_modud(srcDest.reg, srcDest.reg, rhs.reg);
|
|
+ } else {
|
|
+ masm.as_modsd(srcDest.reg, srcDest.reg, rhs.reg);
|
|
+ }
|
|
+ } else {
|
|
+ ScratchI32 temp(*this);
|
|
+ if (isUnsigned) {
|
|
+ masm.as_divdu(temp, srcDest.reg, rhs.reg);
|
|
+ } else {
|
|
+ masm.as_divd(temp, srcDest.reg, rhs.reg);
|
|
+ }
|
|
+ masm.as_mulld(temp, temp, rhs.reg);
|
|
+ masm.as_subf(srcDest.reg, temp, srcDest.reg); // T = B - A
|
|
+ }
|
|
# else
|
|
MOZ_CRASH("BaseCompiler platform hook: remainderI64");
|
|
# endif
|
|
masm.bind(&done);
|
|
}
|
|
#endif // RABALDR_INT_DIV_I64_CALLOUT
|
|
|
|
RegI32 needRotate64Temp() {
|
|
#if defined(JS_CODEGEN_X86)
|
|
return needI32();
|
|
#elif defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
|
|
defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
|
|
- defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64)
|
|
return RegI32::Invalid();
|
|
#else
|
|
MOZ_CRASH("BaseCompiler platform hook: needRotate64Temp");
|
|
#endif
|
|
}
|
|
|
|
class OutOfLineTruncateCheckF32OrF64ToI32 : public OutOfLineCode {
|
|
AnyReg src;
|
|
@@ -6869,30 +6914,35 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
RegI64 ptr64 = fromI32(ptr);
|
|
|
|
// In principle there may be non-zero bits in the upper bits of the
|
|
// register; clear them.
|
|
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
|
|
// The canonical value is zero-extended (see comment block "64-bit GPRs
|
|
// carrying 32-bit values" in MacroAssembler.h); we already have that.
|
|
masm.assertCanonicalInt32(ptr);
|
|
+# elif defined(JS_CODEGEN_PPC64)
|
|
+ // The canonical value is sign-extended.
|
|
+ masm.as_rldicl(ptr, ptr, 0, 32); // "clrldi"
|
|
# else
|
|
MOZ_CRASH("Platform code needed here");
|
|
# endif
|
|
|
|
// Any Spectre mitigation will appear to update the ptr64 register.
|
|
masm.wasmBoundsCheck64(
|
|
Assembler::Below, ptr64,
|
|
Address(tls, offsetof(TlsData, boundsCheckLimit)), &ok);
|
|
|
|
// Restore the value to the canonical form for a 32-bit value in a
|
|
// 64-bit register and/or the appropriate form for further use in the
|
|
// indexing instruction.
|
|
# if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64)
|
|
// The canonical value is zero-extended; we already have that.
|
|
+# elif defined(JS_CODEGEN_PPC64)
|
|
+ // Leave it zero-extended.
|
|
# else
|
|
MOZ_CRASH("Platform code needed here");
|
|
# endif
|
|
} else {
|
|
masm.wasmBoundsCheck32(
|
|
Assembler::Below, ptr,
|
|
Address(tls, offsetof(TlsData, boundsCheckLimit)), &ok);
|
|
}
|
|
@@ -6903,17 +6953,17 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
#endif
|
|
masm.wasmTrap(Trap::OutOfBounds, bytecodeOffset());
|
|
masm.bind(&ok);
|
|
}
|
|
}
|
|
|
|
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM) || \
|
|
defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
|
|
- defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64)
|
|
BaseIndex prepareAtomicMemoryAccess(MemoryAccessDesc* access,
|
|
AccessCheck* check, RegI32 tls,
|
|
RegI32 ptr) {
|
|
MOZ_ASSERT(needTlsForAccess(*check) == tls.isValid());
|
|
prepareMemoryAccess(access, check, tls, ptr);
|
|
return BaseIndex(HeapReg, ptr, TimesOne, access->offset());
|
|
}
|
|
#elif defined(JS_CODEGEN_X86)
|
|
@@ -7001,17 +7051,19 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
if (dest.tag == AnyReg::I64) {
|
|
MOZ_ASSERT(dest.i64() == specific_.abiReturnRegI64);
|
|
masm.wasmLoadI64(*access, srcAddr, dest.i64());
|
|
} else {
|
|
// For 8 bit loads, this will generate movsbl or movzbl, so
|
|
// there's no constraint on what the output register may be.
|
|
masm.wasmLoad(*access, srcAddr, dest.any());
|
|
}
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
+// XXX: We don't really need this anymore
|
|
if (IsUnaligned(*access)) {
|
|
switch (dest.tag) {
|
|
case AnyReg::I64:
|
|
masm.wasmUnalignedLoadI64(*access, HeapReg, ptr, ptr, dest.i64(),
|
|
temp1);
|
|
break;
|
|
case AnyReg::F32:
|
|
masm.wasmUnalignedLoadFP(*access, HeapReg, ptr, ptr, dest.f32(),
|
|
@@ -7102,17 +7154,19 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
MOZ_ASSERT(temp.isInvalid());
|
|
if (access->type() == Scalar::Int64) {
|
|
masm.wasmStoreI64(*access, src.i64(), HeapReg, ptr, ptr);
|
|
} else if (src.tag == AnyReg::I64) {
|
|
masm.wasmStore(*access, AnyRegister(src.i64().low), HeapReg, ptr, ptr);
|
|
} else {
|
|
masm.wasmStore(*access, src.any(), HeapReg, ptr, ptr);
|
|
}
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
+// XXX: We don't really need this anymore
|
|
if (IsUnaligned(*access)) {
|
|
switch (src.tag) {
|
|
case AnyReg::I64:
|
|
masm.wasmUnalignedStoreI64(*access, src.i64(), HeapReg, ptr, ptr,
|
|
temp);
|
|
break;
|
|
case AnyReg::F32:
|
|
masm.wasmUnalignedStoreFP(*access, src.f32(), HeapReg, ptr, ptr,
|
|
@@ -7160,17 +7214,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
}
|
|
void maybeFree(BaseCompiler* bc) {
|
|
for (size_t i = 0; i < Count; ++i) {
|
|
bc->maybeFree(this->operator[](i));
|
|
}
|
|
}
|
|
};
|
|
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
using AtomicRMW32Temps = Atomic32Temps<3>;
|
|
#else
|
|
using AtomicRMW32Temps = Atomic32Temps<1>;
|
|
#endif
|
|
|
|
template <typename T>
|
|
void atomicRMW32(const MemoryAccessDesc& access, T srcAddr, AtomicOp op,
|
|
RegI32 rv, RegI32 rd, const AtomicRMW32Temps& temps) {
|
|
@@ -7187,17 +7242,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
}
|
|
masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temp, rd);
|
|
break;
|
|
}
|
|
#endif
|
|
case Scalar::Uint16:
|
|
case Scalar::Int32:
|
|
case Scalar::Uint32:
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps[0], temps[1],
|
|
temps[2], rd);
|
|
#else
|
|
masm.wasmAtomicFetchOp(access, op, rv, srcAddr, temps[0], rd);
|
|
#endif
|
|
break;
|
|
default: {
|
|
MOZ_CRASH("Bad type for atomic operation");
|
|
@@ -7208,17 +7264,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
// On x86, V is Address. On other platforms, it is Register64.
|
|
// T is BaseIndex or Address.
|
|
template <typename T, typename V>
|
|
void atomicRMW64(const MemoryAccessDesc& access, const T& srcAddr,
|
|
AtomicOp op, V value, Register64 temp, Register64 rd) {
|
|
masm.wasmAtomicFetchOp64(access, op, value, srcAddr, temp, rd);
|
|
}
|
|
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
using AtomicCmpXchg32Temps = Atomic32Temps<3>;
|
|
#else
|
|
using AtomicCmpXchg32Temps = Atomic32Temps<0>;
|
|
#endif
|
|
|
|
template <typename T>
|
|
void atomicCmpXchg32(const MemoryAccessDesc& access, T srcAddr,
|
|
RegI32 rexpect, RegI32 rnew, RegI32 rd,
|
|
@@ -7236,29 +7293,31 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
}
|
|
masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd);
|
|
break;
|
|
}
|
|
#endif
|
|
case Scalar::Uint16:
|
|
case Scalar::Int32:
|
|
case Scalar::Uint32:
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, temps[0],
|
|
temps[1], temps[2], rd);
|
|
#else
|
|
masm.wasmCompareExchange(access, srcAddr, rexpect, rnew, rd);
|
|
#endif
|
|
break;
|
|
default:
|
|
MOZ_CRASH("Bad type for atomic operation");
|
|
}
|
|
}
|
|
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
using AtomicXchg32Temps = Atomic32Temps<3>;
|
|
#else
|
|
using AtomicXchg32Temps = Atomic32Temps<0>;
|
|
#endif
|
|
|
|
template <typename T>
|
|
void atomicXchg32(const MemoryAccessDesc& access, T srcAddr, RegI32 rv,
|
|
RegI32 rd, const AtomicXchg32Temps& temps) {
|
|
@@ -7275,17 +7334,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
masm.wasmAtomicExchange(access, srcAddr, rv, rd);
|
|
}
|
|
break;
|
|
}
|
|
#endif
|
|
case Scalar::Uint16:
|
|
case Scalar::Int32:
|
|
case Scalar::Uint32:
|
|
-#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#if defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
masm.wasmAtomicExchange(access, srcAddr, rv, temps[0], temps[1],
|
|
temps[2], rd);
|
|
#else
|
|
masm.wasmAtomicExchange(access, srcAddr, rv, rd);
|
|
#endif
|
|
break;
|
|
default:
|
|
MOZ_CRASH("Bad type for atomic operation");
|
|
@@ -7342,16 +7402,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
pop2xI64(r0, r1);
|
|
*temp = needI32();
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
pop2xI64(r0, r1);
|
|
*temp = needI32();
|
|
#elif defined(JS_CODEGEN_ARM64)
|
|
pop2xI64(r0, r1);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ pop2xI64(r0, r1);
|
|
#else
|
|
MOZ_CRASH("BaseCompiler porting interface: pop2xI64ForMulI64");
|
|
#endif
|
|
}
|
|
|
|
void pop2xI64ForDivI64(RegI64* r0, RegI64* r1, RegI64* reserved) {
|
|
#if defined(JS_CODEGEN_X64)
|
|
// r0 must be rax, and rdx will be clobbered.
|
|
@@ -7529,17 +7591,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
rexpect = bc->popI32();
|
|
}
|
|
setRd(bc->needI32());
|
|
}
|
|
~PopAtomicCmpXchg32Regs() {
|
|
bc->freeI32(rnew);
|
|
bc->freeI32(rexpect);
|
|
}
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicCmpXchg32Regs(BaseCompiler* bc, ValType type,
|
|
Scalar::Type viewType)
|
|
: Base(bc) {
|
|
if (type == ValType::I64) {
|
|
rnew = bc->popI64ToI32();
|
|
rexpect = bc->popI64ToI32();
|
|
} else {
|
|
rnew = bc->popI32();
|
|
@@ -7606,17 +7669,17 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
rexpect = bc->popI64();
|
|
setRd(bc->needI64Pair());
|
|
}
|
|
~PopAtomicCmpXchg64Regs() {
|
|
bc->freeI64(rexpect);
|
|
bc->freeI64(rnew);
|
|
}
|
|
#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
|
|
- defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicCmpXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
|
rnew = bc->popI64();
|
|
rexpect = bc->popI64();
|
|
setRd(bc->needI64());
|
|
}
|
|
~PopAtomicCmpXchg64Regs() {
|
|
bc->freeI64(rexpect);
|
|
bc->freeI64(rnew);
|
|
@@ -7658,17 +7721,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
bc->needI64(bc->specific_.edx_eax);
|
|
setRd(bc->specific_.edx_eax);
|
|
}
|
|
~PopAtomicLoad64Regs() { bc->freeI32(bc->specific_.ecx); }
|
|
# elif defined(JS_CODEGEN_ARM)
|
|
explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
|
|
setRd(bc->needI64Pair());
|
|
}
|
|
-# elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+# elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
|
|
setRd(bc->needI64());
|
|
}
|
|
# else
|
|
explicit PopAtomicLoad64Regs(BaseCompiler* bc) : Base(bc) {
|
|
MOZ_CRASH("BaseCompiler porting interface: PopAtomicLoad64Regs");
|
|
}
|
|
# endif
|
|
@@ -7745,17 +7809,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
|
|
temps.allocate(bc);
|
|
setRd(bc->needI32());
|
|
}
|
|
~PopAtomicRMW32Regs() {
|
|
bc->freeI32(rv);
|
|
temps.maybeFree(bc);
|
|
}
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicRMW32Regs(BaseCompiler* bc, ValType type,
|
|
Scalar::Type viewType, AtomicOp op)
|
|
: Base(bc) {
|
|
rv = type == ValType::I64 ? bc->popI64ToI32() : bc->popI32();
|
|
if (Scalar::byteSize(viewType) < 4) {
|
|
temps.allocate(bc);
|
|
}
|
|
|
|
@@ -7833,17 +7898,17 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
temp = bc->needI64Pair();
|
|
setRd(bc->needI64Pair());
|
|
}
|
|
~PopAtomicRMW64Regs() {
|
|
bc->freeI64(rv);
|
|
bc->freeI64(temp);
|
|
}
|
|
#elif defined(JS_CODEGEN_ARM64) || defined(JS_CODEGEN_MIPS32) || \
|
|
- defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicRMW64Regs(BaseCompiler* bc, AtomicOp) : Base(bc) {
|
|
rv = bc->popI64();
|
|
temp = bc->needI64();
|
|
setRd(bc->needI64());
|
|
}
|
|
~PopAtomicRMW64Regs() {
|
|
bc->freeI64(rv);
|
|
bc->freeI64(temp);
|
|
@@ -7888,17 +7953,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64)
|
|
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type,
|
|
Scalar::Type viewType)
|
|
: Base(bc) {
|
|
rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
|
|
setRd(bc->needI32());
|
|
}
|
|
~PopAtomicXchg32Regs() { bc->freeI32(rv); }
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicXchg32Regs(BaseCompiler* bc, ValType type,
|
|
Scalar::Type viewType)
|
|
: Base(bc) {
|
|
rv = (type == ValType::I64) ? bc->popI64ToI32() : bc->popI32();
|
|
if (Scalar::byteSize(viewType) < 4) {
|
|
temps.allocate(bc);
|
|
}
|
|
setRd(bc->needI32());
|
|
@@ -7954,17 +8020,18 @@ class BaseCompiler final : public BaseCompilerInterface {
|
|
~PopAtomicXchg64Regs() { bc->freeI64(rv); }
|
|
#elif defined(JS_CODEGEN_ARM)
|
|
// Both rv and rd must be odd/even pairs.
|
|
explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
|
rv = bc->popI64ToSpecific(bc->needI64Pair());
|
|
setRd(bc->needI64Pair());
|
|
}
|
|
~PopAtomicXchg64Regs() { bc->freeI64(rv); }
|
|
-#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
|
rv = bc->popI64ToSpecific(bc->needI64());
|
|
setRd(bc->needI64());
|
|
}
|
|
~PopAtomicXchg64Regs() { bc->freeI64(rv); }
|
|
#else
|
|
explicit PopAtomicXchg64Regs(BaseCompiler* bc) : Base(bc) {
|
|
MOZ_CRASH("BaseCompiler porting interface: xchg64");
|
|
@@ -8968,16 +9035,18 @@ static void CtzI32(MacroAssembler& masm, RegI32 rsd) {
|
|
|
|
// Currently common to PopcntI32 and PopcntI64
|
|
static RegI32 PopcntTemp(BaseCompiler& bc) {
|
|
#if defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
return AssemblerX86Shared::HasPOPCNT() ? RegI32::Invalid() : bc.needI32();
|
|
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
|
|
defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
return bc.needI32();
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ return RegI32::Invalid(); // We rock.
|
|
#else
|
|
MOZ_CRASH("BaseCompiler platform hook: PopcntTemp");
|
|
#endif
|
|
}
|
|
|
|
static void PopcntI32(BaseCompiler& bc, RegI32 rsd, RegI32 temp) {
|
|
bc.masm.popcnt32(rsd, rsd, temp);
|
|
}
|
|
@@ -11982,17 +12051,17 @@ RegI32 BaseCompiler::popMemory32Access(MemoryAccessDesc* access,
|
|
bceCheckLocal(access, check, local);
|
|
}
|
|
|
|
return popI32();
|
|
}
|
|
|
|
void BaseCompiler::pushHeapBase() {
|
|
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_ARM64) || \
|
|
- defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64)
|
|
RegI64 heapBase = needI64();
|
|
moveI64(RegI64(Register64(HeapReg)), heapBase);
|
|
pushI64(heapBase);
|
|
#elif defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32)
|
|
RegI32 heapBase = needI32();
|
|
moveI32(RegI32(HeapReg), heapBase);
|
|
pushI32(heapBase);
|
|
#elif defined(JS_CODEGEN_X86)
|
|
@@ -17244,17 +17313,19 @@ bool js::wasm::BaselinePlatformSupport() {
|
|
// they are definitely implemented on the Cortex-A7 and Cortex-A15
|
|
// and on all ARMv8 systems.
|
|
if (!HasIDIV()) {
|
|
return false;
|
|
}
|
|
#endif
|
|
#if defined(JS_CODEGEN_X64) || defined(JS_CODEGEN_X86) || \
|
|
defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_ARM64) || \
|
|
- defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64) || \
|
|
+ defined(JS_CODEGEN_PPC64)
|
|
+ // PPC64 gates on other prerequisites not specified here.
|
|
return true;
|
|
#else
|
|
return false;
|
|
#endif
|
|
}
|
|
|
|
bool js::wasm::BaselineCompileFunctions(const ModuleEnvironment& moduleEnv,
|
|
const CompilerEnvironment& compilerEnv,
|
|
diff --git a/js/src/wasm/WasmCompile.cpp b/js/src/wasm/WasmCompile.cpp
|
|
index 0f456aaaa5..f0694f1b9e 100644
|
|
--- a/js/src/wasm/WasmCompile.cpp
|
|
+++ b/js/src/wasm/WasmCompile.cpp
|
|
@@ -45,16 +45,17 @@ using namespace js::wasm;
|
|
uint32_t wasm::ObservedCPUFeatures() {
|
|
enum Arch {
|
|
X86 = 0x1,
|
|
X64 = 0x2,
|
|
ARM = 0x3,
|
|
MIPS = 0x4,
|
|
MIPS64 = 0x5,
|
|
ARM64 = 0x6,
|
|
+ PPC64 = 0x7,
|
|
ARCH_BITS = 3
|
|
};
|
|
|
|
#if defined(JS_CODEGEN_X86)
|
|
MOZ_ASSERT(uint32_t(jit::CPUInfo::GetSSEVersion()) <=
|
|
(UINT32_MAX >> ARCH_BITS));
|
|
return X86 | (uint32_t(jit::CPUInfo::GetSSEVersion()) << ARCH_BITS);
|
|
#elif defined(JS_CODEGEN_X64)
|
|
@@ -68,16 +69,19 @@ uint32_t wasm::ObservedCPUFeatures() {
|
|
MOZ_ASSERT(jit::GetARM64Flags() <= (UINT32_MAX >> ARCH_BITS));
|
|
return ARM64 | (jit::GetARM64Flags() << ARCH_BITS);
|
|
#elif defined(JS_CODEGEN_MIPS32)
|
|
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
|
|
return MIPS | (jit::GetMIPSFlags() << ARCH_BITS);
|
|
#elif defined(JS_CODEGEN_MIPS64)
|
|
MOZ_ASSERT(jit::GetMIPSFlags() <= (UINT32_MAX >> ARCH_BITS));
|
|
return MIPS64 | (jit::GetMIPSFlags() << ARCH_BITS);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ MOZ_ASSERT(jit::GetPPC64Flags() <= (UINT32_MAX >> ARCH_BITS));
|
|
+ return PPC64 | (jit::GetPPC64Flags() << ARCH_BITS);
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
return 0;
|
|
#else
|
|
# error "unknown architecture"
|
|
#endif
|
|
}
|
|
|
|
FeatureArgs FeatureArgs::build(JSContext* cx, const FeatureOptions& options) {
|
|
diff --git a/js/src/wasm/WasmFrame.h b/js/src/wasm/WasmFrame.h
|
|
index 85f2612d14..9919205739 100644
|
|
--- a/js/src/wasm/WasmFrame.h
|
|
+++ b/js/src/wasm/WasmFrame.h
|
|
@@ -53,16 +53,25 @@ constexpr uintptr_t ExitOrJitEntryFPTag = 0x1;
|
|
// before the function has made its stack reservation, the stack alignment is
|
|
// sizeof(Frame) % WasmStackAlignment.
|
|
//
|
|
// During MacroAssembler code generation, the bytes pushed after the wasm::Frame
|
|
// are counted by masm.framePushed. Thus, the stack alignment at any point in
|
|
// time is (sizeof(wasm::Frame) + masm.framePushed) % WasmStackAlignment.
|
|
|
|
class Frame {
|
|
+#if defined(JS_CODEGEN_PPC64)
|
|
+ // Since Wasm can call directly to ABI-compliant routines, the Frame must
|
|
+ // have an ABI-compliant linkage area. We allocate four doublewords, the
|
|
+ // minimum size.
|
|
+ void *_ppc_sp_;
|
|
+ void *_ppc_cr_;
|
|
+ void *_ppc_lr_;
|
|
+ void *_ppc_toc_;
|
|
+#endif
|
|
// See GenerateCallableEpilogue for why this must be
|
|
// the first field of wasm::Frame (in a downward-growing stack).
|
|
// It's either the caller's Frame*, for wasm callers, or the JIT caller frame
|
|
// plus a tag otherwise.
|
|
uint8_t* callerFP_;
|
|
|
|
// The return address pushed by the call (in the case of ARM/MIPS the return
|
|
// address is pushed by the first instruction of the prologue).
|
|
@@ -115,18 +124,21 @@ class Frame {
|
|
static uint8_t* addExitOrJitEntryFPTag(const Frame* fp) {
|
|
MOZ_ASSERT(!isExitOrJitEntryFP(fp));
|
|
return reinterpret_cast<uint8_t*>(reinterpret_cast<uintptr_t>(fp) |
|
|
ExitOrJitEntryFPTag);
|
|
}
|
|
};
|
|
|
|
static_assert(!std::is_polymorphic_v<Frame>, "Frame doesn't need a vtable.");
|
|
+#if !defined(JS_CODEGEN_PPC64)
|
|
+// Not on PowerPC, it's not.
|
|
static_assert(sizeof(Frame) == 2 * sizeof(void*),
|
|
"Frame is a two pointer structure");
|
|
+#endif
|
|
|
|
class FrameWithTls : public Frame {
|
|
TlsData* calleeTls_;
|
|
TlsData* callerTls_;
|
|
|
|
public:
|
|
TlsData* calleeTls() { return calleeTls_; }
|
|
TlsData* callerTls() { return callerTls_; }
|
|
diff --git a/js/src/wasm/WasmFrameIter.cpp b/js/src/wasm/WasmFrameIter.cpp
|
|
index dffab53940..5da8d6c730 100644
|
|
--- a/js/src/wasm/WasmFrameIter.cpp
|
|
+++ b/js/src/wasm/WasmFrameIter.cpp
|
|
@@ -358,16 +358,21 @@ static const unsigned SetFP = 16;
|
|
static const unsigned PoppedFP = 4;
|
|
static_assert(BeforePushRetAddr == 0, "Required by StartUnwinding");
|
|
static_assert(PushedFP > PushedRetAddr, "Required by StartUnwinding");
|
|
#elif defined(JS_CODEGEN_MIPS32) || defined(JS_CODEGEN_MIPS64)
|
|
static const unsigned PushedRetAddr = 8;
|
|
static const unsigned PushedFP = 12;
|
|
static const unsigned SetFP = 16;
|
|
static const unsigned PoppedFP = 4;
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+static const unsigned PushedRetAddr = 12;
|
|
+static const unsigned PushedFP = 16;
|
|
+static const unsigned SetFP = 20;
|
|
+static const unsigned PoppedFP = 8;
|
|
#elif defined(JS_CODEGEN_NONE)
|
|
// Synthetic values to satisfy asserts and avoid compiler warnings.
|
|
static const unsigned PushedRetAddr = 0;
|
|
static const unsigned PushedFP = 1;
|
|
static const unsigned SetFP = 2;
|
|
static const unsigned PoppedFP = 3;
|
|
#else
|
|
# error "Unknown architecture!"
|
|
@@ -453,16 +458,38 @@ static void GenerateCallablePrologue(MacroAssembler& masm, uint32_t* entry) {
|
|
MemOperand(sp, Frame::callerFPOffset()));
|
|
MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
|
|
masm.Mov(ARMRegister(FramePointer, 64), sp);
|
|
MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
|
|
|
|
// And restore the SP-reg setting, per comment above.
|
|
masm.SetStackPointer64(stashedSPreg);
|
|
}
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ {
|
|
+ *entry = masm.currentOffset();
|
|
+
|
|
+ // These must be in this precise order. Fortunately we can subsume the
|
|
+ // SPR load into the initial "verse" since it is treated atomically.
|
|
+ // The linkage area required for ABI compliance is baked into the Frame.
|
|
+ masm.xs_mflr(ScratchRegister);
|
|
+ masm.as_addi(StackPointer, StackPointer, -(sizeof(Frame)));
|
|
+ masm.as_std(ScratchRegister, StackPointer, Frame::returnAddressOffset());
|
|
+ MOZ_ASSERT_IF(!masm.oom(), PushedRetAddr == masm.currentOffset() - *entry);
|
|
+ masm.as_std(FramePointer, StackPointer, Frame::callerFPOffset());
|
|
+ MOZ_ASSERT_IF(!masm.oom(), PushedFP == masm.currentOffset() - *entry);
|
|
+ masm.xs_mr(FramePointer, StackPointer);
|
|
+ MOZ_ASSERT_IF(!masm.oom(), SetFP == masm.currentOffset() - *entry);
|
|
+
|
|
+ // Burn nops because we have to make this a multiple of 16 and the mfspr
|
|
+ // just screwed us.
|
|
+ masm.as_nop(); // 24
|
|
+ masm.as_nop(); // 28
|
|
+ masm.as_nop(); // 32 // trap point
|
|
+ }
|
|
#else
|
|
{
|
|
# if defined(JS_CODEGEN_ARM)
|
|
AutoForbidPoolsAndNops afp(&masm,
|
|
/* number of instructions in scope = */ 3);
|
|
|
|
*entry = masm.currentOffset();
|
|
|
|
@@ -527,16 +554,28 @@ static void GenerateCallableEpilogue(MacroAssembler& masm, unsigned framePushed,
|
|
// use it. Hence we have to do it "by hand".
|
|
masm.Mov(PseudoStackPointer64, vixl::sp);
|
|
|
|
masm.Ret(ARMRegister(lr, 64));
|
|
|
|
// See comment at equivalent place in |GenerateCallablePrologue| above.
|
|
masm.SetStackPointer64(stashedSPreg);
|
|
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+
|
|
+ masm.as_ld(FramePointer, StackPointer, Frame::callerFPOffset());
|
|
+ poppedFP = masm.currentOffset();
|
|
+ // This is suboptimal since we get serialized, but has to be in this order.
|
|
+ masm.as_ld(ScratchRegister, StackPointer, Frame::returnAddressOffset());
|
|
+ masm.xs_mtlr(ScratchRegister);
|
|
+ *ret = masm.currentOffset();
|
|
+
|
|
+ masm.as_addi(StackPointer, StackPointer, sizeof(Frame));
|
|
+ masm.as_blr();
|
|
+
|
|
#else
|
|
// Forbid pools for the same reason as described in GenerateCallablePrologue.
|
|
# if defined(JS_CODEGEN_ARM)
|
|
AutoForbidPoolsAndNops afp(&masm, /* number of instructions in scope = */ 6);
|
|
# endif
|
|
|
|
// There is an important ordering constraint here: fp must be repointed to
|
|
// the caller's frame before any field of the frame currently pointed to by
|
|
@@ -773,16 +812,23 @@ void wasm::GenerateJitEntryPrologue(MacroAssembler& masm, Offsets* offsets) {
|
|
AutoForbidPoolsAndNops afp(&masm,
|
|
/* number of instructions in scope = */ 3);
|
|
offsets->begin = masm.currentOffset();
|
|
static_assert(BeforePushRetAddr == 0);
|
|
// Subtract from SP first as SP must be aligned before offsetting.
|
|
masm.Sub(sp, sp, 8);
|
|
masm.storePtr(lr, Address(masm.getStackPointer(), 0));
|
|
masm.adjustFrame(8);
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+ offsets->begin = masm.currentOffset();
|
|
+
|
|
+ // We have to burn a nop here to match the other prologue length.
|
|
+ masm.xs_mflr(ScratchRegister);
|
|
+ masm.as_nop(); // might as well explicitly wait for the mfspr to complete
|
|
+ masm.as_stdu(ScratchRegister, StackPointer, -8);
|
|
#else
|
|
// The x86/x64 call instruction pushes the return address.
|
|
offsets->begin = masm.currentOffset();
|
|
#endif
|
|
MOZ_ASSERT_IF(!masm.oom(),
|
|
PushedRetAddr == masm.currentOffset() - offsets->begin);
|
|
|
|
// Save jit frame pointer, so unwinding from wasm to jit frames is trivial.
|
|
diff --git a/js/src/wasm/WasmGC.cpp b/js/src/wasm/WasmGC.cpp
|
|
index 4eb77a81a2..3f00cbb632 100644
|
|
--- a/js/src/wasm/WasmGC.cpp
|
|
+++ b/js/src/wasm/WasmGC.cpp
|
|
@@ -284,16 +284,33 @@ bool IsValidStackMapKey(bool debugEnabled, const uint8_t* nextPC) {
|
|
(insn[-1] & 0xfffffc1f) == 0xd63f0000 || // blr reg
|
|
(insn[-1] & 0xfc000000) == 0x94000000 || // bl simm26
|
|
(debugEnabled && insn[-1] == 0xd503201f)); // nop
|
|
|
|
# elif defined(JS_CODEGEN_MIPS64)
|
|
// TODO (bug 1699696): Implement this. As for the platforms above, we need to
|
|
// enumerate all code sequences that can precede the stackmap location.
|
|
return true;
|
|
+# elif defined(JS_CODEGEN_PPC64)
|
|
+// XXX: we should just be able to use inst[0]
|
|
+ const uint32_t* insn = (const uint32_t*)nextPC;
|
|
+ js::jit::Instruction* inst = (js::jit::Instruction*)nextPC;
|
|
+ //fprintf(stderr, "IsValidStackMapKey: 0x%lx 0x%08x\n", (uint64_t)nextPC, insn[0]);
|
|
+ return (((uintptr_t(insn) & 3) == 0) &&
|
|
+ (inst[0].extractOpcode() == js::jit::PPC_addi || // stack allocate
|
|
+ inst[0].extractOpcode() == js::jit::PPC_addis || // load immediate
|
|
+ inst[0].extractOpcode() == js::jit::PPC_cmpwi || // test after bl
|
|
+ inst[0].extractOpcode() == js::jit::PPC_cmpw || // (extsw, same)
|
|
+ inst[0].extractOpcode() == js::jit::PPC_lfd || // load FPR
|
|
+ inst[0].extractOpcode() == js::jit::PPC_lfs || // load FPR
|
|
+ inst[0].extractOpcode() == js::jit::PPC_lwz || // load GPR
|
|
+ inst[0].extractOpcode() == js::jit::PPC_ld || // load GPR
|
|
+ inst[0].extractOpcode() == js::jit::PPC_b || // branch
|
|
+ inst[0].encode() == js::jit::PPC_nop || // GET BACK TO WORK
|
|
+ inst[0].encode() == js::jit::PPC_stop)); // designated throw
|
|
# else
|
|
MOZ_CRASH("IsValidStackMapKey: requires implementation on this platform");
|
|
# endif
|
|
}
|
|
#endif
|
|
|
|
} // namespace wasm
|
|
} // namespace js
|
|
diff --git a/js/src/wasm/WasmSignalHandlers.cpp b/js/src/wasm/WasmSignalHandlers.cpp
|
|
index 4ab2a44192..1a51061a12 100644
|
|
--- a/js/src/wasm/WasmSignalHandlers.cpp
|
|
+++ b/js/src/wasm/WasmSignalHandlers.cpp
|
|
@@ -101,16 +101,17 @@ using mozilla::DebugOnly;
|
|
# endif
|
|
# if defined(__mips__)
|
|
# define EPC_sig(p) ((p)->sc_pc)
|
|
# define RFP_sig(p) ((p)->sc_regs[30])
|
|
# endif
|
|
# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
|
|
defined(__PPC64LE__)
|
|
# define R01_sig(p) ((p)->sc_frame.fixreg[1])
|
|
+# define R31_sig(p) ((p)->sc_frame.fixreg[31])
|
|
# define R32_sig(p) ((p)->sc_frame.srr0)
|
|
# endif
|
|
#elif defined(__linux__) || defined(__sun)
|
|
# if defined(__linux__)
|
|
# define EIP_sig(p) ((p)->uc_mcontext.gregs[REG_EIP])
|
|
# define EBP_sig(p) ((p)->uc_mcontext.gregs[REG_EBP])
|
|
# define ESP_sig(p) ((p)->uc_mcontext.gregs[REG_ESP])
|
|
# else
|
|
@@ -147,16 +148,17 @@ using mozilla::DebugOnly;
|
|
# if defined(__linux__) && (defined(__sparc__) && defined(__arch64__))
|
|
# define PC_sig(p) ((p)->uc_mcontext.mc_gregs[MC_PC])
|
|
# define FP_sig(p) ((p)->uc_mcontext.mc_fp)
|
|
# define SP_sig(p) ((p)->uc_mcontext.mc_i7)
|
|
# endif
|
|
# if defined(__linux__) && (defined(__ppc64__) || defined(__PPC64__) || \
|
|
defined(__ppc64le__) || defined(__PPC64LE__))
|
|
# define R01_sig(p) ((p)->uc_mcontext.gp_regs[1])
|
|
+# define R31_sig(p) ((p)->uc_mcontext.gp_regs[31])
|
|
# define R32_sig(p) ((p)->uc_mcontext.gp_regs[32])
|
|
# endif
|
|
#elif defined(__NetBSD__)
|
|
# define EIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EIP])
|
|
# define EBP_sig(p) ((p)->uc_mcontext.__gregs[_REG_EBP])
|
|
# define ESP_sig(p) ((p)->uc_mcontext.__gregs[_REG_ESP])
|
|
# define RIP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RIP])
|
|
# define RSP_sig(p) ((p)->uc_mcontext.__gregs[_REG_RSP])
|
|
@@ -173,16 +175,17 @@ using mozilla::DebugOnly;
|
|
# endif
|
|
# if defined(__mips__)
|
|
# define EPC_sig(p) ((p)->uc_mcontext.__gregs[_REG_EPC])
|
|
# define RFP_sig(p) ((p)->uc_mcontext.__gregs[_REG_S8])
|
|
# endif
|
|
# if defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
|
|
defined(__PPC64LE__)
|
|
# define R01_sig(p) ((p)->uc_mcontext.__gregs[_REG_R1])
|
|
+# define R31_sig(p) ((p)->uc_mcontext.__gregs[_REG_R31])
|
|
# define R32_sig(p) ((p)->uc_mcontext.__gregs[_REG_PC])
|
|
# endif
|
|
#elif defined(__DragonFly__) || defined(__FreeBSD__) || \
|
|
defined(__FreeBSD_kernel__)
|
|
# define EIP_sig(p) ((p)->uc_mcontext.mc_eip)
|
|
# define EBP_sig(p) ((p)->uc_mcontext.mc_ebp)
|
|
# define ESP_sig(p) ((p)->uc_mcontext.mc_esp)
|
|
# define RIP_sig(p) ((p)->uc_mcontext.mc_rip)
|
|
@@ -207,16 +210,17 @@ using mozilla::DebugOnly;
|
|
# endif
|
|
# if defined(__FreeBSD__) && defined(__mips__)
|
|
# define EPC_sig(p) ((p)->uc_mcontext.mc_pc)
|
|
# define RFP_sig(p) ((p)->uc_mcontext.mc_regs[30])
|
|
# endif
|
|
# if defined(__FreeBSD__) && (defined(__ppc64__) || defined(__PPC64__) || \
|
|
defined(__ppc64le__) || defined(__PPC64LE__))
|
|
# define R01_sig(p) ((p)->uc_mcontext.mc_gpr[1])
|
|
+# define R31_sig(p) ((p)->uc_mcontext.mc_gpr[31])
|
|
# define R32_sig(p) ((p)->uc_mcontext.mc_srr0)
|
|
# endif
|
|
#elif defined(XP_DARWIN)
|
|
# define EIP_sig(p) ((p)->thread.uts.ts32.__eip)
|
|
# define EBP_sig(p) ((p)->thread.uts.ts32.__ebp)
|
|
# define ESP_sig(p) ((p)->thread.uts.ts32.__esp)
|
|
# define RIP_sig(p) ((p)->thread.__rip)
|
|
# define RBP_sig(p) ((p)->thread.__rbp)
|
|
@@ -367,17 +371,17 @@ struct macos_aarch64_context {
|
|
# define PC_sig(p) EPC_sig(p)
|
|
# define FP_sig(p) RFP_sig(p)
|
|
# define SP_sig(p) RSP_sig(p)
|
|
# define LR_sig(p) R31_sig(p)
|
|
#elif defined(__ppc64__) || defined(__PPC64__) || defined(__ppc64le__) || \
|
|
defined(__PPC64LE__)
|
|
# define PC_sig(p) R32_sig(p)
|
|
# define SP_sig(p) R01_sig(p)
|
|
-# define FP_sig(p) R01_sig(p)
|
|
+# define FP_sig(p) R31_sig(p)
|
|
#endif
|
|
|
|
static void SetContextPC(CONTEXT* context, uint8_t* pc) {
|
|
#ifdef PC_sig
|
|
*reinterpret_cast<uint8_t**>(&PC_sig(context)) = pc;
|
|
#else
|
|
MOZ_CRASH();
|
|
#endif
|
|
diff --git a/js/src/wasm/WasmStubs.cpp b/js/src/wasm/WasmStubs.cpp
|
|
index 59a5cf18bf..dbc10c6e2c 100644
|
|
--- a/js/src/wasm/WasmStubs.cpp
|
|
+++ b/js/src/wasm/WasmStubs.cpp
|
|
@@ -719,17 +719,17 @@ static bool GenerateInterpEntry(MacroAssembler& masm, const FuncExport& fe,
|
|
AssertExpectedSP(masm);
|
|
masm.haltingAlign(CodeAlignment);
|
|
|
|
offsets->begin = masm.currentOffset();
|
|
|
|
// Save the return address if it wasn't already saved by the call insn.
|
|
#ifdef JS_USE_LINK_REGISTER
|
|
# if defined(JS_CODEGEN_ARM) || defined(JS_CODEGEN_MIPS32) || \
|
|
- defined(JS_CODEGEN_MIPS64)
|
|
+ defined(JS_CODEGEN_MIPS64) || defined(JS_CODEGEN_PPC64)
|
|
masm.pushReturnAddress();
|
|
# elif defined(JS_CODEGEN_ARM64)
|
|
// WasmPush updates framePushed() unlike pushReturnAddress(), but that's
|
|
// cancelled by the setFramePushed() below.
|
|
WasmPush(masm, lr);
|
|
# else
|
|
MOZ_CRASH("Implement this");
|
|
# endif
|
|
@@ -2111,17 +2111,26 @@ static bool GenerateImportInterpExit(MacroAssembler& masm, const FuncImport& fi,
|
|
masm.storePtr(scratch,
|
|
Address(masm.getStackPointer(), i->offsetFromArgBase()));
|
|
}
|
|
i++;
|
|
MOZ_ASSERT(i.done());
|
|
|
|
// Make the call, test whether it succeeded, and extract the return value.
|
|
AssertStackAlignment(masm, ABIStackAlignment);
|
|
+#ifdef JS_CODEGEN_PPC64
|
|
+ // Because this is calling an ABI-compliant function, we have to pull down
|
|
+ // a dummy linkage area or the values on the stack will be stomped on. The
|
|
+ // minimum size is sufficient.
|
|
+ masm.as_addi(masm.getStackPointer(), masm.getStackPointer(), -32);
|
|
+#endif
|
|
masm.call(SymbolicAddress::CallImport_General);
|
|
+#ifdef JS_CODEGEN_PPC64
|
|
+ masm.as_addi(masm.getStackPointer(), masm.getStackPointer(), 32);
|
|
+#endif
|
|
masm.branchTest32(Assembler::Zero, ReturnReg, ReturnReg, throwLabel);
|
|
|
|
ResultType resultType = ResultType::Vector(fi.funcType().results());
|
|
ValType registerResultType;
|
|
for (ABIResultIter iter(resultType); !iter.done(); iter.next()) {
|
|
if (iter.cur().inRegister()) {
|
|
MOZ_ASSERT(!registerResultType.isValid());
|
|
registerResultType = iter.cur().type();
|
|
@@ -2673,16 +2682,21 @@ static const LiveRegisterSet RegsToPreserve(
|
|
#elif defined(JS_CODEGEN_X86) || defined(JS_CODEGEN_X64)
|
|
// It's correct to use FloatRegisters::AllMask even when SIMD is not enabled;
|
|
// PushRegsInMask strips out the high lanes of the XMM registers in this case,
|
|
// while the singles will be stripped as they are aliased by the larger doubles.
|
|
static const LiveRegisterSet RegsToPreserve(
|
|
GeneralRegisterSet(Registers::AllMask &
|
|
~(Registers::SetType(1) << Registers::StackPointer)),
|
|
FloatRegisterSet(FloatRegisters::AllMask));
|
|
+#elif defined(JS_CODEGEN_PPC64)
|
|
+// Note that this includes no SPRs, since the JIT is unaware of them.
|
|
+static const LiveRegisterSet RegsToPreserve(
|
|
+ GeneralRegisterSet(Registers::AllMask),
|
|
+ FloatRegisterSet(FloatRegisters::AllMask));
|
|
#else
|
|
static const LiveRegisterSet RegsToPreserve(
|
|
GeneralRegisterSet(0), FloatRegisterSet(FloatRegisters::AllDoubleMask));
|
|
# ifdef ENABLE_WASM_SIMD
|
|
# error "no SIMD support"
|
|
# endif
|
|
#endif
|
|
|
|
diff --git a/modules/libpref/init/StaticPrefList.yaml b/modules/libpref/init/StaticPrefList.yaml
|
|
index d81025b282..43b75c6ae0 100644
|
|
--- a/modules/libpref/init/StaticPrefList.yaml
|
|
+++ b/modules/libpref/init/StaticPrefList.yaml
|
|
@@ -5729,17 +5729,17 @@
|
|
- name: javascript.options.baselinejit
|
|
type: bool
|
|
value: true
|
|
mirror: always # LoadStartupJSPrefs
|
|
do_not_use_directly: true
|
|
|
|
- name: javascript.options.ion
|
|
type: bool
|
|
- value: true
|
|
+ value: false
|
|
mirror: always # LoadStartupJSPrefs
|
|
do_not_use_directly: true
|
|
|
|
# The irregexp JIT for regex evaluation.
|
|
- name: javascript.options.native_regexp
|
|
type: bool
|
|
value: true
|
|
mirror: always # LoadStartupJSPrefs
|
|
@@ -5968,17 +5968,17 @@
|
|
value: 6 * 1024 * 1024
|
|
#else
|
|
value: 2 * 1024 * 1024
|
|
#endif
|
|
mirror: always
|
|
|
|
- name: javascript.options.wasm_optimizingjit
|
|
type: bool
|
|
- value: true
|
|
+ value: false
|
|
mirror: always
|
|
|
|
#if defined(ENABLE_WASM_SIMD)
|
|
- name: javascript.options.wasm_simd
|
|
type: bool
|
|
value: true
|
|
mirror: always
|
|
#endif // defined(ENABLE_WASM_SIMD)
|