Skip to content

Instantly share code, notes, and snippets.

@pgavlin
Created November 7, 2016 23:01
Show Gist options
  • Star 0 You must be signed in to star a gist
  • Fork 0 You must be signed in to fork a gist
  • Save pgavlin/962c33feaef0f965e3c8afdccda8293c to your computer and use it in GitHub Desktop.
Save pgavlin/962c33feaef0f965e3c8afdccda8293c to your computer and use it in GitHub Desktop.
diff --git a/src/jit/importer.cpp b/src/jit/importer.cpp
index 5c21f08..2555e1f 100644
--- a/src/jit/importer.cpp
+++ b/src/jit/importer.cpp
@@ -1389,202 +1389,202 @@ GenTreePtr Compiler::impAssignStructPtr(GenTreePtr destAddr,
Given a struct value, and the class handle for that structure, return
the expression for the address for that structure value.
willDeref - does the caller guarantee to dereference the pointer.
*/
GenTreePtr Compiler::impGetStructAddr(GenTreePtr structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool willDeref)
{
assert(varTypeIsStruct(structVal) || eeIsValueClass(structHnd));
var_types type = structVal->TypeGet();
genTreeOps oper = structVal->gtOper;
if (oper == GT_OBJ && willDeref)
{
assert(structVal->gtObj.gtClass == structHnd);
return (structVal->gtObj.Addr());
}
else if (oper == GT_CALL || oper == GT_RET_EXPR || oper == GT_OBJ || oper == GT_MKREFANY)
{
unsigned tmpNum = lvaGrabTemp(true DEBUGARG("struct address for call/obj"));
impAssignTempGen(tmpNum, structVal, structHnd, curLevel);
// The 'return value' is now the temp itself
type = genActualType(lvaTable[tmpNum].TypeGet());
GenTreePtr temp = gtNewLclvNode(tmpNum, type);
temp = gtNewOperNode(GT_ADDR, TYP_BYREF, temp);
return temp;
}
else if (oper == GT_COMMA)
{
assert(structVal->gtOp.gtOp2->gtType == type); // Second thing is the struct
GenTreePtr oldTreeLast = impTreeLast;
structVal->gtOp.gtOp2 = impGetStructAddr(structVal->gtOp.gtOp2, structHnd, curLevel, willDeref);
structVal->gtType = TYP_BYREF;
if (oldTreeLast != impTreeLast)
{
// Some temp assignment statement was placed on the statement list
// for Op2, but that would be out of order with op1, so we need to
// spill op1 onto the statement list after whatever was last
// before we recursed on Op2 (i.e. before whatever Op2 appended).
impInsertTreeBefore(structVal->gtOp.gtOp1, impCurStmtOffs, oldTreeLast->gtNext);
structVal->gtOp.gtOp1 = gtNewNothingNode();
}
return (structVal);
}
return (gtNewOperNode(GT_ADDR, TYP_BYREF, structVal));
}
//------------------------------------------------------------------------
// impNormStructType: Given a (known to be) struct class handle structHnd, normalize its type,
// and optionally determine the GC layout of the struct.
//
// Arguments:
// structHnd - The class handle for the struct type of interest.
// gcLayout - (optional, default nullptr) - a BYTE pointer, allocated by the caller,
// into which the gcLayout will be written.
// pNumGCVars - (optional, default nullptr) - if non-null, a pointer to an unsigned,
// which will be set to the number of GC fields in the struct.
// pSimdBaseType - (optional, default nullptr) - if non-null, and the struct is a SIMD
// type, set to the SIMD base type
//
// Return Value:
// The JIT type for the struct (e.g. TYP_STRUCT, or TYP_SIMD*).
// The gcLayout will be returned using the pointers provided by the caller, if non-null.
// It may also modify the compFloatingPointUsed flag if the type is a SIMD type.
//
// Assumptions:
// The caller must set gcLayout to nullptr OR ensure that it is large enough
// (see ICorStaticInfo::getClassGClayout in corinfo.h).
//
// Notes:
// Normalizing the type involves examining the struct type to determine if it should
// be modified to one that is handled specially by the JIT, possibly being a candidate
// for full enregistration, e.g. TYP_SIMD16.
var_types Compiler::impNormStructType(CORINFO_CLASS_HANDLE structHnd,
BYTE* gcLayout,
unsigned* pNumGCVars,
var_types* pSimdBaseType)
{
assert(structHnd != NO_CLASS_HANDLE);
const DWORD structFlags = info.compCompHnd->getClassAttribs(structHnd);
var_types structType = TYP_STRUCT;
#ifdef FEATURE_CORECLR
const bool hasGCPtrs = (structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0;
#else
// Desktop CLR won't report FLG_CONTAINS_GC_PTR for RefAnyClass - need to check explicitly.
- const bool isRefAny = (structHnd == impGetRefAnyClass());
- const bool hasGCPtrs = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
+ const bool isRefAny = (structHnd == impGetRefAnyClass());
+ const bool hasGCPtrs = isRefAny || ((structFlags & CORINFO_FLG_CONTAINS_GC_PTR) != 0);
#endif
#ifdef FEATURE_SIMD
// Check to see if this is a SIMD type.
if (featureSIMD && !hasGCPtrs)
{
unsigned originalSize = info.compCompHnd->getClassSize(structHnd);
if ((originalSize >= minSIMDStructBytes()) && (originalSize <= maxSIMDStructBytes()))
{
unsigned int sizeBytes;
var_types simdBaseType = getBaseTypeAndSizeOfSIMDType(structHnd, &sizeBytes);
if (simdBaseType != TYP_UNKNOWN)
{
assert(sizeBytes == originalSize);
structType = getSIMDTypeForSize(sizeBytes);
if (pSimdBaseType != nullptr)
{
*pSimdBaseType = simdBaseType;
}
#ifdef _TARGET_AMD64_
// Amd64: also indicate that we use floating point registers
compFloatingPointUsed = true;
#endif
}
}
}
#endif // FEATURE_SIMD
// Fetch GC layout info if requested
if (gcLayout != nullptr)
{
unsigned numGCVars = info.compCompHnd->getClassGClayout(structHnd, gcLayout);
// Verify that the quick test up above via the class attributes gave a
// safe view of the type's GCness.
//
// Note there are cases where hasGCPtrs is true but getClassGClayout
// does not report any gc fields.
assert(hasGCPtrs || (numGCVars == 0));
if (pNumGCVars != nullptr)
{
*pNumGCVars = numGCVars;
}
}
else
{
// Can't safely ask for number of GC pointers without also
// asking for layout.
assert(pNumGCVars == nullptr);
}
return structType;
}
//****************************************************************************
// Given TYP_STRUCT value 'structVal', make sure it is 'canonical', that is
// it is either an OBJ or a MKREFANY node, or a node (e.g. GT_INDEX) that will be morphed.
//
GenTreePtr Compiler::impNormStructVal(GenTreePtr structVal,
CORINFO_CLASS_HANDLE structHnd,
unsigned curLevel,
bool forceNormalization /*=false*/)
{
assert(forceNormalization || varTypeIsStruct(structVal));
assert(structHnd != NO_CLASS_HANDLE);
var_types structType = structVal->TypeGet();
bool makeTemp = false;
if (structType == TYP_STRUCT)
{
structType = impNormStructType(structHnd);
}
bool alreadyNormalized = false;
GenTreeLclVarCommon* structLcl = nullptr;
genTreeOps oper = structVal->OperGet();
switch (oper)
{
// GT_RETURN and GT_MKREFANY don't capture the handle.
case GT_RETURN:
break;
case GT_MKREFANY:
alreadyNormalized = true;
break;
case GT_CALL:
structVal->gtCall.gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_RET_EXPR:
structVal->gtRetExpr.gtRetClsHnd = structHnd;
makeTemp = true;
break;
case GT_ARGPLACE:
structVal->gtArgPlace.gtArgPlaceClsHnd = structHnd;
break;
@@ -3169,201 +3169,201 @@ GenTreePtr Compiler::impInitializeArrayIntrinsic(CORINFO_SIG_INFO* sig)
}
//
// Make sure that the number of elements look valid.
//
if (arrayLengthNode->gtOper != GT_CNS_INT)
{
return nullptr;
}
numElements = S_SIZE_T(arrayLengthNode->gtIntCon.gtIconVal);
if (!info.compCompHnd->isSDArray(arrayClsHnd))
{
return nullptr;
}
}
CORINFO_CLASS_HANDLE elemClsHnd;
var_types elementType = JITtype2varType(info.compCompHnd->getChildType(arrayClsHnd, &elemClsHnd));
//
// Note that genTypeSize will return zero for non primitive types, which is exactly
// what we want (size will then be 0, and we will catch this in the conditional below).
// Note that we don't expect this to fail for valid binaries, so we assert in the
// non-verification case (the verification case should not assert but rather correctly
// handle bad binaries). This assert is not guarding any specific invariant, but rather
// saying that we don't expect this to happen, and if it is hit, we need to investigate
// why.
//
S_UINT32 elemSize(genTypeSize(elementType));
S_UINT32 size = elemSize * S_UINT32(numElements);
if (size.IsOverflow())
{
return nullptr;
}
if ((size.Value() == 0) || (varTypeIsGC(elementType)))
{
assert(verNeedsVerification());
return nullptr;
}
void* initData = info.compCompHnd->getArrayInitializationData(fieldToken, size.Value());
if (!initData)
{
return nullptr;
}
//
// At this point we are ready to commit to implementing the InitializeArray
// intrinsic using a struct assignment. Pop the arguments from the stack and
// return the struct assignment node.
//
impPopStack();
impPopStack();
const unsigned blkSize = size.Value();
GenTreePtr dst;
if (isMDArray)
{
unsigned dataOffset = eeGetMDArrayDataOffset(elementType, rank);
dst = gtNewOperNode(GT_ADD, TYP_BYREF, arrayLocalNode, gtNewIconNode(dataOffset, TYP_I_IMPL));
}
else
{
dst = gtNewOperNode(GT_ADDR, TYP_BYREF, gtNewIndexRef(elementType, arrayLocalNode, gtNewIconNode(0)));
}
GenTreePtr blk = gtNewBlockVal(dst, blkSize);
GenTreePtr srcAddr = gtNewIconHandleNode((size_t)initData, GTF_ICON_STATIC_HDL);
GenTreePtr src = gtNewOperNode(GT_IND, TYP_STRUCT, srcAddr);
return gtNewBlkOpNode(blk, // dst
src, // src
blkSize, // size
false, // volatil
true); // copyBlock
}
/*****************************************************************************/
// Returns the GenTree that should be used to do the intrinsic instead of the call.
// Returns NULL if an intrinsic cannot be used
GenTreePtr Compiler::impIntrinsic(CORINFO_CLASS_HANDLE clsHnd,
CORINFO_METHOD_HANDLE method,
CORINFO_SIG_INFO* sig,
int memberRef,
bool readonlyCall,
bool tailCall,
CorInfoIntrinsics* pIntrinsicID)
{
bool mustExpand = false;
#if COR_JIT_EE_VERSION > 460
CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method, &mustExpand);
#else
- CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
+ CorInfoIntrinsics intrinsicID = info.compCompHnd->getIntrinsicID(method);
#endif
*pIntrinsicID = intrinsicID;
#ifndef _TARGET_ARM_
genTreeOps interlockedOperator;
#endif
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContext)
{
// must be done regardless of DbgCode and MinOpts
return gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL);
}
#ifdef _TARGET_64BIT_
if (intrinsicID == CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr)
{
// must be done regardless of DbgCode and MinOpts
return gtNewOperNode(GT_ADDR, TYP_I_IMPL, gtNewLclvNode(lvaStubArgumentVar, TYP_I_IMPL));
}
#else
assert(intrinsicID != CORINFO_INTRINSIC_StubHelpers_GetStubContextAddr);
#endif
GenTreePtr retNode = nullptr;
//
// We disable the inlining of instrinsics for MinOpts.
//
if (!mustExpand && (opts.compDbgCode || opts.MinOpts()))
{
*pIntrinsicID = CORINFO_INTRINSIC_Illegal;
return retNode;
}
// Currently we don't have CORINFO_INTRINSIC_Exp because it does not
// seem to work properly for Infinity values, we don't do
// CORINFO_INTRINSIC_Pow because it needs a Helper which we currently don't have
var_types callType = JITtype2varType(sig->retType);
/* First do the intrinsics which are always smaller than a call */
switch (intrinsicID)
{
GenTreePtr op1, op2;
case CORINFO_INTRINSIC_Sin:
case CORINFO_INTRINSIC_Sqrt:
case CORINFO_INTRINSIC_Abs:
case CORINFO_INTRINSIC_Cos:
case CORINFO_INTRINSIC_Round:
case CORINFO_INTRINSIC_Cosh:
case CORINFO_INTRINSIC_Sinh:
case CORINFO_INTRINSIC_Tan:
case CORINFO_INTRINSIC_Tanh:
case CORINFO_INTRINSIC_Asin:
case CORINFO_INTRINSIC_Acos:
case CORINFO_INTRINSIC_Atan:
case CORINFO_INTRINSIC_Atan2:
case CORINFO_INTRINSIC_Log10:
case CORINFO_INTRINSIC_Pow:
case CORINFO_INTRINSIC_Exp:
case CORINFO_INTRINSIC_Ceiling:
case CORINFO_INTRINSIC_Floor:
// These are math intrinsics
assert(callType != TYP_STRUCT);
op1 = nullptr;
#ifdef LEGACY_BACKEND
if (IsTargetIntrinsic(intrinsicID))
#else
// Intrinsics that are not implemented directly by target instructions will
// be re-materialized as users calls in rationalizer. For prefixed tail calls,
// don't do this optimization, because
// a) For back compatibility reasons on desktop.Net 4.6 / 4.6.1
// b) It will be non-trivial task or too late to re-materialize a surviving
// tail prefixed GT_INTRINSIC as tail call in rationalizer.
if (!IsIntrinsicImplementedByUserCall(intrinsicID) || !tailCall)
#endif
{
switch (sig->numArgs)
{
case 1:
op1 = impPopStack().val;
#if FEATURE_X87_DOUBLES
// X87 stack doesn't differentiate between float/double
// so it doesn't need a cast, but everybody else does
// Just double check it is at least a FP type
noway_assert(varTypeIsFloating(op1));
#else // FEATURE_X87_DOUBLES
if (op1->TypeGet() != callType)
{
op1 = gtNewCastNode(callType, op1, callType);
}
Sign up for free to join this conversation on GitHub. Already have an account? Sign in to comment