Skip to content

Instantly share code, notes, and snippets.

@annulen
Last active March 21, 2016 09:35
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save annulen/130a2d15d6630e32766e to your computer and use it in GitHub Desktop.
Save annulen/130a2d15d6630e32766e to your computer and use it in GitHub Desktop.
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
index 7c4bd08..d2ace0f 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerMIPS.h
@@ -2499,6 +2499,12 @@ public:
m_assembler.movd(dest, src);
}
+ void moveZeroToDouble(FPRegisterID reg)
+ {
+ //m_assembler.mtc1(MIPSRegisters::zero, reg);
+ convertInt32ToDouble(MIPSRegisters::zero, reg);
+ }
+
void swapDouble(FPRegisterID fr1, FPRegisterID fr2)
{
moveDouble(fr1, fpTempRegister);
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 6a2da6d..059c54b 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -176,8 +176,6 @@ namespace JSC {
return FINALIZE_CODE(patchBuffer, ("Specialized thunk for %s", thunkKind));
}
- // Assumes that the target function uses fpRegister0 as the first argument
- // and return value. Like any sensible architecture would.
void callDoubleToDouble(FunctionPtr function)
{
m_calls.append(std::make_pair(call(), function));
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 4a71dfe..71f00b9 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -735,6 +735,29 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+#elif CPU(MIPS)
+
+#define defineUnaryDoubleOpWrapper(function) \
+ asm( \
+ ".text\n" \
+ ".align 2\n" \
+ ".globl " SYMBOL_STRING(function##Thunk) "\n" \
+ HIDE_SYMBOL(function##Thunk) "\n" \
+ SYMBOL_STRING(function##Thunk) ":" "\n" \
+ ".set noreorder\n" \
+ ".cpload $t9\n" \
+ ".set nomacro\n" \
+ "lw $t9, %call16(" #function ")($gp)\n" \
+ "jr $t9\n" \
+ "nop\n" \
+ ".set macro\n" \
+ ".set reorder\n" \
+ ); \
+ extern "C" { \
+ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
@@ -766,15 +789,26 @@ static double (_cdecl *jsRoundFunction)(double) = jsRound;
static MathThunk UnaryDoubleOpWrapper(function) = 0
#endif
+extern "C" {
+double floor1(double x)
+{
+ double res = floor(x);
+ fprintf(stderr, "floor1(%f) -> %f\n", x, res);
+ return res;
+}
+}
+
defineUnaryDoubleOpWrapper(jsRound);
defineUnaryDoubleOpWrapper(exp);
defineUnaryDoubleOpWrapper(log);
defineUnaryDoubleOpWrapper(floor);
+defineUnaryDoubleOpWrapper(floor1);
defineUnaryDoubleOpWrapper(ceil);
static const double oneConstant = 1.0;
static const double negativeHalfConstant = -0.5;
static const double halfConstant = 0.5;
+static const double zeroConstant = 0.0;
MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
{
@@ -785,7 +819,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::regT0);
#if CPU(ARM64)
SpecializedThunkJIT::JumpList doubleResult;
jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
@@ -798,21 +832,28 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
- doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ //jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1);
+ doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
- slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0));
+ slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::fpRegT1));
+ slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::regT0));
intResult = jit.jump();
slowPath.link(&jit);
}
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(floor));
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::returnValueFPR, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
if (jit.supportsFloatingPointTruncate())
intResult.link(&jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
+#if !CPU(MIPS)
doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+#endif
+ jit.returnDouble(SpecializedThunkJIT::returnValueFPR);
+#if CPU(MIPS)
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::argumentFPR0);
+#endif
#endif // CPU(ARM64)
return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
}
@@ -826,17 +867,17 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::regT0);
if (jit.supportsFloatingPointRounding())
- jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.ceilDouble(SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::returnValueFPR);
else
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
SpecializedThunkJIT::JumpList doubleResult;
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::returnValueFPR, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::returnValueFPR);
return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
}
@@ -849,28 +890,34 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::regT0);
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
- doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
- slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
+ slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::fpRegT1));
jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
- jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
+ jit.addDouble(SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::fpRegT1);
slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
intResult = jit.jump();
slowPath.link(&jit);
}
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(jsRound));
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::returnValueFPR, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
if (jit.supportsFloatingPointTruncate())
intResult.link(&jit);
jit.returnInt32(SpecializedThunkJIT::regT0);
+#if !CPU(MIPS)
doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+#endif
+ jit.returnDouble(SpecializedThunkJIT::returnValueFPR);
+#if CPU(MIPS)
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::argumentFPR0);
+#endif
return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
}
@@ -894,9 +941,9 @@ MacroAssemblerCodeRef logThunkGenerator(VM* vm)
SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::argumentFPR0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::returnValueFPR);
return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment