提交 19d68e76 编写于 作者: M Michelle McDaniel

Reformat jit sources with clang-tidy and format

This change is the result of running clang-tidy and clang-format on jit
sources.


Commit migrated from https://github.com/dotnet/coreclr/commit/36a2b906c008cd3693a9ab5aef7b4402addd6c74
上级 2027080e
......@@ -33,10 +33,10 @@ BraceWrapping:
BreakBeforeBinaryOperators: None
BreakBeforeBraces: Allman
BreakBeforeTernaryOperators: true
BreakConstructorInitializersBeforeComma: false
BreakConstructorInitializersBeforeComma: true
ColumnLimit: 120
CommentPragmas: '^ IWYU pragma:'
ConstructorInitializerAllOnOneLineOrOnePerLine: false
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
......
此差异已折叠。
......@@ -20,15 +20,17 @@ private:
enum
{
POOLED_ALLOCATOR_NOTINITIALIZED = 0,
POOLED_ALLOCATOR_IN_USE = 1,
POOLED_ALLOCATOR_AVAILABLE = 2,
POOLED_ALLOCATOR_SHUTDOWN = 3,
POOLED_ALLOCATOR_IN_USE = 1,
POOLED_ALLOCATOR_AVAILABLE = 2,
POOLED_ALLOCATOR_SHUTDOWN = 3,
};
static PooledAllocator s_pooledAllocator;
static LONG s_pooledAllocatorState;
static LONG s_pooledAllocatorState;
PooledAllocator() : ArenaAllocator() {}
PooledAllocator() : ArenaAllocator()
{
}
PooledAllocator(IEEMemoryManager* memoryManager);
PooledAllocator(const PooledAllocator& other) = delete;
......@@ -61,7 +63,7 @@ bool ArenaAllocator::bypassHostAllocator()
// knobs for ensuring that we do not have buffer overruns in the JIT.
return JitConfig.JitDirectAlloc() != 0;
#else // defined(DEBUG)
#else // defined(DEBUG)
return false;
#endif // !defined(DEBUG)
}
......@@ -115,16 +117,16 @@ ArenaAllocator& ArenaAllocator::operator=(ArenaAllocator&& other)
assert(!isInitialized());
m_memoryManager = other.m_memoryManager;
m_firstPage = other.m_firstPage;
m_lastPage = other.m_lastPage;
m_nextFreeByte = other.m_nextFreeByte;
m_lastFreeByte = other.m_lastFreeByte;
m_firstPage = other.m_firstPage;
m_lastPage = other.m_lastPage;
m_nextFreeByte = other.m_nextFreeByte;
m_lastFreeByte = other.m_lastFreeByte;
other.m_memoryManager = nullptr;
other.m_firstPage = nullptr;
other.m_lastPage = nullptr;
other.m_nextFreeByte = nullptr;
other.m_lastFreeByte = nullptr;
other.m_firstPage = nullptr;
other.m_lastPage = nullptr;
other.m_nextFreeByte = nullptr;
other.m_lastFreeByte = nullptr;
return *this;
}
......@@ -196,12 +198,12 @@ void* ArenaAllocator::allocateNewPage(size_t size, bool canThrow)
}
// Append the new page to the end of the list
newPage->m_next = nullptr;
newPage->m_next = nullptr;
newPage->m_pageBytes = pageSize;
newPage->m_previous = m_lastPage;
newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated.
// Instead of letting it contain garbage (so to confuse us),
// set it to zero.
newPage->m_previous = m_lastPage;
newPage->m_usedBytes = 0; // m_usedBytes is meaningless until a new page is allocated.
// Instead of letting it contain garbage (so to confuse us),
// set it to zero.
if (m_lastPage != nullptr)
{
......@@ -230,7 +232,7 @@ void ArenaAllocator::destroy()
assert(isInitialized());
// Free all of the allocated pages
for (PageDescriptor* page = m_firstPage, *next; page != nullptr; page = next)
for (PageDescriptor *page = m_firstPage, *next; page != nullptr; page = next)
{
next = page->m_next;
freeHostMemory(page);
......@@ -238,10 +240,10 @@ void ArenaAllocator::destroy()
// Clear out the allocator's fields
m_memoryManager = nullptr;
m_firstPage = nullptr;
m_lastPage = nullptr;
m_nextFreeByte = nullptr;
m_lastFreeByte = nullptr;
m_firstPage = nullptr;
m_lastPage = nullptr;
m_nextFreeByte = nullptr;
m_lastFreeByte = nullptr;
}
// The debug version of the allocator may allocate directly from the
......@@ -277,7 +279,7 @@ void* ArenaAllocator::allocateHostMemory(size_t size)
{
return ClrAllocInProcessHeap(0, S_SIZE_T(size));
}
#else // defined(DEBUG)
#else // defined(DEBUG)
return m_memoryManager->ClrVirtualAlloc(nullptr, size, MEM_COMMIT, PAGE_READWRITE);
#endif // !defined(DEBUG)
}
......@@ -301,7 +303,7 @@ void ArenaAllocator::freeHostMemory(void* block)
{
ClrFreeInProcessHeap(0, block);
}
#else // defined(DEBUG)
#else // defined(DEBUG)
m_memoryManager->ClrVirtualFree(block, 0, MEM_RELEASE);
#endif // !defined(DEBUG)
}
......@@ -334,16 +336,16 @@ void* ArenaAllocator::allocateMemory(size_t size)
if (JitConfig.ShouldInjectFault() != 0)
{
// Force the underlying memory allocator (either the OS or the CLR hoster)
// Force the underlying memory allocator (either the OS or the CLR hoster)
// to allocate the memory. Any fault injection will kick in.
void* p = ClrAllocInProcessHeap(0, S_SIZE_T(1));
if (p != nullptr)
{
ClrFreeInProcessHeap(0, p);
}
else
else
{
NOMEM(); // Throw!
NOMEM(); // Throw!
}
}
......@@ -419,9 +421,7 @@ size_t ArenaAllocator::getTotalBytesUsed()
// subsystem.
void ArenaAllocator::startup()
{
s_defaultPageSize = bypassHostAllocator()
? (size_t)MIN_PAGE_SIZE
: (size_t)DEFAULT_PAGE_SIZE;
s_defaultPageSize = bypassHostAllocator() ? (size_t)MIN_PAGE_SIZE : (size_t)DEFAULT_PAGE_SIZE;
}
//------------------------------------------------------------------------
......@@ -433,13 +433,12 @@ void ArenaAllocator::shutdown()
}
PooledAllocator PooledAllocator::s_pooledAllocator;
LONG PooledAllocator::s_pooledAllocatorState = POOLED_ALLOCATOR_NOTINITIALIZED;
LONG PooledAllocator::s_pooledAllocatorState = POOLED_ALLOCATOR_NOTINITIALIZED;
//------------------------------------------------------------------------
// PooledAllocator::PooledAllocator:
// Constructs a `PooledAllocator`.
PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager)
: ArenaAllocator(memoryManager)
PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager) : ArenaAllocator(memoryManager)
{
}
......@@ -448,7 +447,7 @@ PooledAllocator::PooledAllocator(IEEMemoryManager* memoryManager)
// Move-assigns a `PooledAllocator`.
PooledAllocator& PooledAllocator::operator=(PooledAllocator&& other)
{
*((ArenaAllocator*)this) = std::move((ArenaAllocator&&)other);
*((ArenaAllocator*)this) = std::move((ArenaAllocator &&)other);
return *this;
}
......@@ -514,18 +513,18 @@ ArenaAllocator* PooledAllocator::getPooledAllocator(IEEMemoryManager* memoryMana
return &s_pooledAllocator;
case POOLED_ALLOCATOR_NOTINITIALIZED:
{
PooledAllocator allocator(memoryManager);
if (allocator.allocateNewPage(0, false) == nullptr)
{
PooledAllocator allocator(memoryManager);
if (allocator.allocateNewPage(0, false) == nullptr)
{
// Failed to grab the initial memory page.
InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_NOTINITIALIZED);
return nullptr;
}
s_pooledAllocator = std::move(allocator);
// Failed to grab the initial memory page.
InterlockedExchange(&s_pooledAllocatorState, POOLED_ALLOCATOR_NOTINITIALIZED);
return nullptr;
}
s_pooledAllocator = std::move(allocator);
}
return &s_pooledAllocator;
default:
......@@ -546,7 +545,7 @@ void PooledAllocator::destroy()
assert(m_firstPage != nullptr);
// Free all but the first allocated page
for (PageDescriptor* page = m_firstPage->m_next, *next; page != nullptr; page = next)
for (PageDescriptor *page = m_firstPage->m_next, *next; page != nullptr; page = next)
{
next = page->m_next;
freeHostMemory(page);
......@@ -554,9 +553,9 @@ void PooledAllocator::destroy()
// Reset the relevant state to point back to the first byte of the first page
m_firstPage->m_next = nullptr;
m_lastPage = m_firstPage;
m_nextFreeByte = m_firstPage->m_contents;
m_lastFreeByte = (BYTE*)m_firstPage + m_firstPage->m_pageBytes;
m_lastPage = m_firstPage;
m_nextFreeByte = m_firstPage->m_contents;
m_lastFreeByte = (BYTE*)m_firstPage + m_firstPage->m_pageBytes;
assert(getTotalBytesAllocated() == s_defaultPageSize);
......
......@@ -33,7 +33,7 @@ protected:
enum
{
DEFAULT_PAGE_SIZE = 16 * OS_page_size,
MIN_PAGE_SIZE = sizeof(PageDescriptor)
MIN_PAGE_SIZE = sizeof(PageDescriptor)
};
static size_t s_defaultPageSize;
......@@ -44,7 +44,7 @@ protected:
PageDescriptor* m_lastPage;
// These two pointers (when non-null) will always point into 'm_lastPage'.
BYTE* m_nextFreeByte;
BYTE* m_nextFreeByte;
BYTE* m_lastFreeByte;
bool isInitialized();
......@@ -69,7 +69,7 @@ public:
#if defined(DEBUG)
void* allocateMemory(size_t sz);
#else // defined(DEBUG)
#else // defined(DEBUG)
inline void* allocateMemory(size_t size)
{
void* block = m_nextFreeByte;
......@@ -87,7 +87,7 @@ public:
size_t getTotalBytesAllocated();
size_t getTotalBytesUsed();
static bool bypassHostAllocator();
static bool bypassHostAllocator();
static size_t getDefaultPageSize();
static void startup();
......
......@@ -9,21 +9,21 @@ template <class T>
class ArrayStack
{
static const int builtinSize = 8;
public:
ArrayStack(Compiler *comp, int initialSize = builtinSize)
ArrayStack(Compiler* comp, int initialSize = builtinSize)
{
compiler = comp;
if (initialSize > builtinSize)
{
maxIndex = initialSize;
data = new(compiler, CMK_ArrayStack) T[initialSize];
data = new (compiler, CMK_ArrayStack) T[initialSize];
}
else
{
maxIndex = builtinSize;
data = builtinData;
data = builtinData;
}
tosIndex = 0;
......@@ -32,8 +32,10 @@ public:
void Push(T item)
{
if (tosIndex == maxIndex)
{
Realloc();
}
data[tosIndex] = item;
tosIndex++;
}
......@@ -43,9 +45,9 @@ public:
// get a new chunk 2x the size of the old one
// and copy over
T* oldData = data;
noway_assert(maxIndex*2 > maxIndex);
data = new(compiler, CMK_ArrayStack) T[maxIndex*2];
for (int i=0; i<maxIndex; i++)
noway_assert(maxIndex * 2 > maxIndex);
data = new (compiler, CMK_ArrayStack) T[maxIndex * 2];
for (int i = 0; i < maxIndex; i++)
{
data[i] = oldData[i];
}
......@@ -56,19 +58,21 @@ public:
void ReverseTop(int number)
{
if (number < 2)
{
return;
}
assert(number <= tosIndex);
int start = tosIndex - number;
int start = tosIndex - number;
int offset = 0;
while (offset < number/2)
while (offset < number / 2)
{
T temp;
int index = start+offset;
int otherIndex = tosIndex - 1 - offset;
temp = data[index];
data[index] = data[otherIndex];
T temp;
int index = start + offset;
int otherIndex = tosIndex - 1 - offset;
temp = data[index];
data[index] = data[otherIndex];
data[otherIndex] = temp;
offset++;
......@@ -85,13 +89,13 @@ public:
T Top()
{
assert(tosIndex > 0);
return data[tosIndex-1];
return data[tosIndex - 1];
}
T& TopRef()
{
assert(tosIndex > 0);
return data[tosIndex-1];
return data[tosIndex - 1];
}
// return the i'th from the top
......@@ -133,13 +137,10 @@ public:
}
private:
Compiler *compiler; // needed for allocation
int tosIndex; // first free location
int maxIndex;
T* data;
Compiler* compiler; // needed for allocation
int tosIndex; // first free location
int maxIndex;
T* data;
// initial allocation
T builtinData[builtinSize];
T builtinData[builtinSize];
};
......@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#include "jitpch.h"
#ifdef _MSC_VER
#pragma hdrstop
......@@ -20,32 +19,26 @@ unsigned BitSetSupport::BitCountTable[16] = { 0, 1, 1, 2,
// clang-format on
#ifdef DEBUG
template<typename BitSetType,
unsigned Uniq,
typename Env,
typename BitSetTraits>
template <typename BitSetType, unsigned Uniq, typename Env, typename BitSetTraits>
void BitSetSupport::RunTests(Env env)
{
typedef BitSetOps<BitSetType,
Uniq,
Env,
BitSetTraits> LclBitSetOps;
typedef BitSetOps<BitSetType, Uniq, Env, BitSetTraits> LclBitSetOps;
// The tests require that the Size is at least 52...
assert(BitSetTraits::GetSize(env) > 51);
BitSetType bs1;
LclBitSetOps::AssignNoCopy(env, bs1, LclBitSetOps::MakeEmpty(env));
unsigned bs1bits[] = { 0, 10, 44, 45 };
unsigned bs1bits[] = {0, 10, 44, 45};
LclBitSetOps::AddElemD(env, bs1, bs1bits[0]);
LclBitSetOps::AddElemD(env, bs1, bs1bits[1]);
LclBitSetOps::AddElemD(env, bs1, bs1bits[2]);
LclBitSetOps::AddElemD(env, bs1, bs1bits[3]);
typename LclBitSetOps::Iter bsi(env, bs1);
unsigned bitNum = 0;
unsigned k = 0;
unsigned bitNum = 0;
unsigned k = 0;
while (bsi.NextElem(env, &bitNum))
{
assert(bitNum == bs1bits[k]);
......@@ -59,17 +52,17 @@ void BitSetSupport::RunTests(Env env)
BitSetType bs2;
LclBitSetOps::AssignNoCopy(env, bs2, LclBitSetOps::MakeEmpty(env));
unsigned bs2bits[] = { 0, 10, 50, 51 };
unsigned bs2bits[] = {0, 10, 50, 51};
LclBitSetOps::AddElemD(env, bs2, bs2bits[0]);
LclBitSetOps::AddElemD(env, bs2, bs2bits[1]);
LclBitSetOps::AddElemD(env, bs2, bs2bits[2]);
LclBitSetOps::AddElemD(env, bs2, bs2bits[3]);
unsigned unionBits[] = { 0, 10, 44, 45, 50, 51 };
unsigned unionBits[] = {0, 10, 44, 45, 50, 51};
BitSetType bsU12;
LclBitSetOps::AssignNoCopy(env, bsU12, LclBitSetOps::Union(env, bs1, bs2));
k = 0;
bsi = typename LclBitSetOps::Iter(env, bsU12);
k = 0;
bsi = typename LclBitSetOps::Iter(env, bsU12);
bitNum = 0;
while (bsi.NextElem(env, &bitNum))
{
......@@ -78,9 +71,9 @@ void BitSetSupport::RunTests(Env env)
}
assert(k == 6);
k = 0;
k = 0;
typename LclBitSetOps::Iter bsiL = typename LclBitSetOps::Iter(env, bsU12);
bitNum = 0;
bitNum = 0;
while (bsiL.NextElem(env, &bitNum))
{
assert(bitNum == unionBits[k]);
......@@ -88,11 +81,11 @@ void BitSetSupport::RunTests(Env env)
}
assert(k == 6);
unsigned intersectionBits[] = { 0, 10 };
unsigned intersectionBits[] = {0, 10};
BitSetType bsI12;
LclBitSetOps::AssignNoCopy(env, bsI12, LclBitSetOps::Intersection(env, bs1, bs2));
k = 0;
bsi = typename LclBitSetOps::Iter(env, bsI12);
k = 0;
bsi = typename LclBitSetOps::Iter(env, bsI12);
bitNum = 0;
while (bsi.NextElem(env, &bitNum))
{
......@@ -105,65 +98,81 @@ void BitSetSupport::RunTests(Env env)
class TestBitSetTraits
{
public:
static IAllocator* GetAllocator(IAllocator* alloc) { return alloc; }
static unsigned GetSize(IAllocator* alloc) { return 64; }
static IAllocator* GetAllocator(IAllocator* alloc)
{
return alloc;
}
static unsigned GetSize(IAllocator* alloc)
{
return 64;
}
static unsigned GetArrSize(IAllocator* alloc, unsigned elemSize)
{
assert(elemSize == sizeof(size_t));
return (64/8)/sizeof(size_t);
return (64 / 8) / sizeof(size_t);
}
static unsigned GetEpoch(IAllocator* alloc)
{
return 0;
}
static unsigned GetEpoch(IAllocator* alloc) { return 0; }
};
void BitSetSupport::TestSuite(IAllocator* env)
{
BitSetSupport::RunTests<UINT64, BSUInt64, IAllocator*, TestBitSetTraits>(env);
BitSetSupport::RunTests<BitSetShortLongRep, BSShortLong, IAllocator*, TestBitSetTraits>(env);
BitSetSupport::RunTests<BitSetUint64<IAllocator*, TestBitSetTraits>, BSUInt64Class, IAllocator*, TestBitSetTraits>(env);
BitSetSupport::RunTests<BitSetUint64<IAllocator*, TestBitSetTraits>, BSUInt64Class, IAllocator*, TestBitSetTraits>(
env);
}
#endif
const char* BitSetSupport::OpNames[BitSetSupport::BSOP_NUMOPS] =
{
const char* BitSetSupport::OpNames[BitSetSupport::BSOP_NUMOPS] = {
#define BSOPNAME(x) #x,
#include "bitsetops.h"
#undef BSOPNAME
};
};
void BitSetSupport::BitSetOpCounter::RecordOp(BitSetSupport::Operation op)
{
OpCounts[op]++; TotalOps++;
OpCounts[op]++;
TotalOps++;
if ((TotalOps % 1000000) == 0)
{
if (OpOutputFile == NULL)
if (OpOutputFile == nullptr)
{
OpOutputFile = fopen(m_fileName, "a");
}
fprintf(OpOutputFile, "@ %d total ops.\n", TotalOps);
unsigned OpOrder[BSOP_NUMOPS];
bool OpOrdered[BSOP_NUMOPS];
bool OpOrdered[BSOP_NUMOPS];
// First sort by total operations (into an index permutation array, using a simple n^2 sort).
for (unsigned k = 0; k < BitSetSupport::BSOP_NUMOPS; k++) OpOrdered[k] = false;
for (unsigned k = 0; k < BitSetSupport::BSOP_NUMOPS; k++)
{
bool candSet = false;
unsigned cand = 0;
OpOrdered[k] = false;
}
for (unsigned k = 0; k < BitSetSupport::BSOP_NUMOPS; k++)
{
bool candSet = false;
unsigned cand = 0;
unsigned candInd = 0;
for (unsigned j = 0; j < BitSetSupport::BSOP_NUMOPS; j++)
{
if (OpOrdered[j]) continue;
if (OpOrdered[j])
{
continue;
}
if (!candSet || OpCounts[j] > cand)
{
candInd = j;
cand = OpCounts[j];
cand = OpCounts[j];
candSet = true;
}
}
assert(candSet);
OpOrder[k] = candInd;
OpOrder[k] = candInd;
OpOrdered[candInd] = true;
}
......
......@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
// A set of integers in the range [0..N], for some given N.
/*****************************************************************************/
......@@ -15,10 +14,7 @@
class BitSetSupport
{
#ifdef DEBUG
template<typename BitSetType,
unsigned Brand,
typename Env,
typename BitSetTraits>
template <typename BitSetType, unsigned Brand, typename Env, typename BitSetTraits>
static void RunTests(Env env);
#endif
......@@ -29,12 +25,12 @@ public:
static unsigned BitCountTable[16];
// Returns the number of 1 bits in the binary representation of "u".
template<typename T>
template <typename T>
static unsigned CountBitsInIntegral(T u)
{
unsigned res = 0;
// We process "u" in 4-bit nibbles, hence the "*2" below.
for (int i = 0; i < sizeof(T)*2; i++)
for (int i = 0; i < sizeof(T) * 2; i++)
{
res += BitCountTable[u & 0xf];
u >>= 4;
......@@ -58,12 +54,13 @@ public:
class BitSetOpCounter
{
unsigned TotalOps;
unsigned OpCounts[BSOP_NUMOPS];
unsigned TotalOps;
unsigned OpCounts[BSOP_NUMOPS];
const char* m_fileName;
FILE* OpOutputFile;
public:
BitSetOpCounter(const char* fileName) : TotalOps(0), m_fileName(fileName), OpOutputFile(NULL)
FILE* OpOutputFile;
public:
BitSetOpCounter(const char* fileName) : TotalOps(0), m_fileName(fileName), OpOutputFile(nullptr)
{
for (unsigned i = 0; i < BSOP_NUMOPS; i++)
{
......@@ -75,15 +72,15 @@ public:
};
};
template <> FORCEINLINE
unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
template <>
FORCEINLINE unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
{
// Make sure we're 32 bit.
assert(sizeof(unsigned) == 4);
c = (c & 0x55555555) + ((c >> 1) & 0x55555555);
c = (c & 0x33333333) + ((c >> 2) & 0x33333333);
c = (c & 0x0f0f0f0f) + ((c >> 4) & 0x0f0f0f0f);
c = (c & 0x00ff00ff) + ((c >> 8) & 0x00ff00ff);
c = (c & 0x55555555) + ((c >> 1) & 0x55555555);
c = (c & 0x33333333) + ((c >> 2) & 0x33333333);
c = (c & 0x0f0f0f0f) + ((c >> 4) & 0x0f0f0f0f);
c = (c & 0x00ff00ff) + ((c >> 8) & 0x00ff00ff);
c = (c & 0x0000ffff) + ((c >> 16) & 0x0000ffff);
return c;
}
......@@ -112,14 +109,14 @@ unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
// function, which makes a copy of the referent data structure in the indirect case, and an
// "AssignNoCopy" version, which does not, and instead introduces sharing in the indirect case.
// Obviously, the latter should be used with care.
//
//
// (Orthogonally, there are also further versions of assignment that differ in whether the "rhs"
// argument may be uninitialized. The normal assignment operation requires the "rhs" argument not be
// uninitialized; "AssignNoCopy" has the same requirement. The "AssignAllowUninitRhs" version allows
// the "rhs" to be the uninit value, and sets the "lhs" to be uninitialized in that case.)
// This class has static methods that provide the operations on BitSets.
//
//
// An instantiation requires:
// typename BitSetType: the representation type of this kind of BitSet.
//
......@@ -141,7 +138,7 @@ unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
// "GetSize". A given BitSet implementation must call
// this with only one constant value. Thus, and "Env"
// may compute this result when GetSize changes.
//
//
// static unsigned GetEpoch(Env): the current epoch.
//
// (For many instantiations, BitSetValueArgType and BitSetValueRetType will be the same as BitSetType; in cases where
......@@ -154,18 +151,15 @@ unsigned BitSetSupport::CountBitsInIntegral<unsigned>(unsigned c)
// yielded member.
//
// Finally, it should export two further types:
//
//
// ValArgType: the type used to pass a BitSet as a by-value argument.
// RetValType: the type that should be used to return a BitSet.
//
//
// For many instantiations, these can be identical to BitSetTypes. When the representation type is a class,
// however, ValArgType may need to be "const BitSetType&", and RetValArg may need to be a helper class, if the
// class hides default copy constructors and assignment operators to detect erroneous usage.
//
template<typename BitSetType,
unsigned Brand,
typename Env,
typename BitSetTraits>
template <typename BitSetType, unsigned Brand, typename Env, typename BitSetTraits>
class BitSetOps
{
#if 0
......@@ -278,25 +272,22 @@ class BitSetOps
typename ValArgType;
typename RetValType;
#endif // 0 -- the above is #if'd out, since it's really just an extended comment on what an instantiation
#endif // 0 -- the above is #if'd out, since it's really just an extended comment on what an instantiation
// should provide.
};
template<typename BitSetType,
unsigned Brand,
typename Env,
typename BitSetTraits,
typename BitSetValueArgType,
typename BitSetValueRetType,
typename BaseIter>
template <typename BitSetType,
unsigned Brand,
typename Env,
typename BitSetTraits,
typename BitSetValueArgType,
typename BitSetValueRetType,
typename BaseIter>
class BitSetOpsWithCounter
{
typedef BitSetOps<BitSetType,
Brand,
Env,
BitSetTraits> BSO;
typedef BitSetOps<BitSetType, Brand, Env, BitSetTraits> BSO;
public:
public:
static BitSetValueRetType UninitVal()
{
return BSO::UninitVal();
......@@ -433,13 +424,17 @@ class BitSetOpsWithCounter
}
#endif
class Iter {
class Iter
{
BaseIter m_iter;
public:
Iter(Env env, BitSetValueArgType bs) : m_iter(env, bs) {}
public:
Iter(Env env, BitSetValueArgType bs) : m_iter(env, bs)
{
}
bool NextElem(Env env, unsigned* pElem) {
bool NextElem(Env env, unsigned* pElem)
{
BitSetTraits::GetOpCounter(env)->RecordOp(BitSetSupport::BSOP_NextBit);
return m_iter.NextElem(env, pElem);
}
......@@ -448,12 +443,10 @@ class BitSetOpsWithCounter
// We define symbolic names for the various bitset implementations available, to allow choices between them.
#define BSUInt64 0
#define BSShortLong 1
#define BSUInt64 0
#define BSShortLong 1
#define BSUInt64Class 2
/*****************************************************************************/
#endif // _BITSET_H_
/*****************************************************************************/
......@@ -2,29 +2,28 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#ifndef bitSetAsUint64_DEFINED
#define bitSetAsUint64_DEFINED 1
#include "bitset.h"
template<typename Env, typename BitSetTraits>
class BitSetOps</*BitSetType*/UINT64,
/*Brand*/BSUInt64,
/*Env*/Env,
/*BitSetTraits*/BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetOps</*BitSetType*/ UINT64,
/*Brand*/ BSUInt64,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>
{
public:
typedef UINT64 Rep;
private:
static UINT64 Singleton(unsigned bitNum)
{
assert(bitNum < sizeof(UINT64)*BitSetSupport::BitsInByte);
assert(bitNum < sizeof(UINT64) * BitSetSupport::BitsInByte);
return (UINT64)1 << bitNum;
}
public:
static void Assign(Env env, UINT64& lhs, UINT64 rhs)
{
lhs = rhs;
......@@ -99,7 +98,7 @@ public:
static UINT64 RemoveElem(Env env, UINT64 bs1, unsigned i)
{
return bs1 & ~Singleton(i);
return bs1 & ~Singleton(i);
}
static void AddElemD(Env env, UINT64& bs1, unsigned i)
......@@ -153,7 +152,7 @@ public:
static UINT64 MakeFull(Env env)
{
unsigned sz = BitSetTraits::GetSize(env);
if (sz == sizeof(UINT64)*8)
if (sz == sizeof(UINT64) * 8)
{
return UINT64(-1);
}
......@@ -166,23 +165,25 @@ public:
#ifdef DEBUG
static const char* ToString(Env env, UINT64 bs)
{
IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
const int CharsForUINT64 = sizeof(UINT64)*2;
char * res = NULL;
const int AllocSize = CharsForUINT64 + 4;
res = (char*)alloc->Alloc(AllocSize);
UINT64 bits = bs;
unsigned remaining = AllocSize;
char* ptr = res;
IAllocator* alloc = BitSetTraits::GetDebugOnlyAllocator(env);
const int CharsForUINT64 = sizeof(UINT64) * 2;
char* res = NULL;
const int AllocSize = CharsForUINT64 + 4;
res = (char*)alloc->Alloc(AllocSize);
UINT64 bits = bs;
unsigned remaining = AllocSize;
char* ptr = res;
for (unsigned bytesDone = 0; bytesDone < sizeof(UINT64); bytesDone += sizeof(unsigned))
{
unsigned bits0 = (unsigned)bits;
sprintf_s(ptr, remaining, "%08X", bits0);
ptr += 8;
remaining -= 8;
bytesDone += 4; assert(sizeof(unsigned) == 4);
bytesDone += 4;
assert(sizeof(unsigned) == 4);
// Doing this twice by 16, rather than once by 32, avoids warnings when size_t == unsigned.
bits = bits >> 16; bits = bits >> 16;
bits = bits >> 16;
bits = bits >> 16;
}
return res;
}
......@@ -201,16 +202,22 @@ public:
class Iter
{
UINT64 m_bits;
public:
Iter(Env env, const UINT64& bits) : m_bits(bits) {}
public:
Iter(Env env, const UINT64& bits) : m_bits(bits)
{
}
bool NextElem(Env env, unsigned* pElem)
{
if (m_bits)
{
unsigned bitNum = *pElem;
while ((m_bits & 0x1) == 0) { bitNum++; m_bits >>= 1; }
while ((m_bits & 0x1) == 0)
{
bitNum++;
m_bits >>= 1;
}
*pElem = bitNum;
m_bits &= ~0x1;
return true;
......
......@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
#ifndef bitSetAsUint64InClass_DEFINED
#define bitSetAsUint64InClass_DEFINED 1
......@@ -10,21 +9,23 @@
#include "bitsetasuint64.h"
#include "stdmacros.h"
template<typename Env, typename BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType;
template<typename Env, typename BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetUint64Iter;
template<typename Env, typename BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetUint64
{
public:
typedef BitSetUint64<Env,BitSetTraits> Rep;
typedef BitSetUint64<Env, BitSetTraits> Rep;
private:
friend class BitSetOps</*BitSetType*/BitSetUint64<Env,BitSetTraits>,
/*Brand*/BSUInt64Class,
/*Env*/Env, /*BitSetTraits*/BitSetTraits>;
friend class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
/*Brand*/ BSUInt64Class,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>;
friend class BitSetUint64ValueRetType<Env, BitSetTraits>;
friend class BitSetUint64Iter<Env, BitSetTraits>;
......@@ -46,11 +47,11 @@ private:
#ifdef DEBUG
// In debug, make sure we don't have any public assignment, by making this private.
BitSetUint64& operator=(const BitSetUint64& bs)
BitSetUint64& operator=(const BitSetUint64& bs)
{
m_bits = bs.m_bits;
m_bits = bs.m_bits;
m_epoch = bs.m_epoch;
return (*this);
return (*this);
}
#endif // DEBUG
......@@ -58,7 +59,7 @@ private:
{
return m_bits == bs.m_bits
#ifdef DEBUG
&& m_epoch == bs.m_epoch
&& m_epoch == bs.m_epoch
#endif
;
}
......@@ -71,24 +72,23 @@ private:
// int argument, and just make copy constructor defined here visible.
public:
#endif
BitSetUint64(const BitSetUint64& bs) : m_bits(bs.m_bits)
BitSetUint64(const BitSetUint64& bs)
: m_bits(bs.m_bits)
#ifdef DEBUG
, m_epoch(bs.m_epoch)
#endif
{}
{
}
#ifdef DEBUG
public:
// But we add a public constructor that's *almost* the default constructor.
BitSetUint64(const BitSetUint64& bs, int xxx) : m_bits(bs.m_bits)
, m_epoch(bs.m_epoch)
{}
BitSetUint64(const BitSetUint64& bs, int xxx) : m_bits(bs.m_bits), m_epoch(bs.m_epoch)
{
}
#endif
private:
// Return the number of bits set in the BitSet.
inline unsigned Count(Env env) const
{
......@@ -112,7 +112,6 @@ private:
return res;
}
inline void RemoveElemD(Env env, unsigned i)
{
CheckEpoch(env);
......@@ -127,7 +126,6 @@ private:
return res;
}
inline void AddElemD(Env env, unsigned i)
{
CheckEpoch(env);
......@@ -222,89 +220,105 @@ private:
return Uint64BitSetOps::ToString(env, m_bits);
}
public:
public:
// Uninint
BitSetUint64() :
m_bits(0)
BitSetUint64()
: m_bits(0)
#ifdef DEBUG
, m_epoch(UINT32_MAX) // Undefined.
#endif
{}
{
}
BitSetUint64(Env env, bool full = false) :
m_bits(0)
BitSetUint64(Env env, bool full = false)
: m_bits(0)
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
if (full) m_bits = Uint64BitSetOps::MakeFull(env);
if (full)
{
m_bits = Uint64BitSetOps::MakeFull(env);
}
}
inline BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt);
BitSetUint64(Env env, unsigned bitNum) :
m_bits(Uint64BitSetOps::MakeSingleton(env, bitNum))
BitSetUint64(Env env, unsigned bitNum)
: m_bits(Uint64BitSetOps::MakeSingleton(env, bitNum))
#ifdef DEBUG
, m_epoch(BitSetTraits::GetEpoch(env))
#endif
{
assert(bitNum < BitSetTraits::GetSize(env));
}
};
template<typename Env, typename BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetUint64ValueRetType
{
friend class BitSetUint64<Env, BitSetTraits>;
BitSetUint64<Env, BitSetTraits> m_bs;
public:
BitSetUint64ValueRetType(const BitSetUint64<Env, BitSetTraits>& bs) : m_bs(bs) {}
BitSetUint64ValueRetType(const BitSetUint64<Env, BitSetTraits>& bs) : m_bs(bs)
{
}
};
template<typename Env, typename BitSetTraits>
BitSetUint64<Env, BitSetTraits>::BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt) : m_bits(rt.m_bs.m_bits)
template <typename Env, typename BitSetTraits>
BitSetUint64<Env, BitSetTraits>::BitSetUint64(const BitSetUint64ValueRetType<Env, BitSetTraits>& rt)
: m_bits(rt.m_bs.m_bits)
#ifdef DEBUG
, m_epoch(rt.m_bs.m_epoch)
#endif
{}
{
}
// You *can* clear a bit after it's been iterated. But you shouldn't otherwise mutate the
// You *can* clear a bit after it's been iterated. But you shouldn't otherwise mutate the
// bitset during bit iteration.
template<typename Env, typename BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetUint64Iter
{
UINT64 m_bits;
unsigned m_bitNum;
UINT64 m_bits;
unsigned m_bitNum;
public:
BitSetUint64Iter(Env env, const BitSetUint64<Env, BitSetTraits>& bs): m_bits(bs.m_bits), m_bitNum(0) {}
BitSetUint64Iter(Env env, const BitSetUint64<Env, BitSetTraits>& bs) : m_bits(bs.m_bits), m_bitNum(0)
{
}
bool NextElem(Env env, unsigned* pElem)
{
static const unsigned UINT64_SIZE = 64;
if ((m_bits & 0x1) != 0)
{
*pElem = m_bitNum; m_bitNum++; m_bits >>= 1; return true;
}
else
*pElem = m_bitNum;
m_bitNum++;
m_bits >>= 1;
return true;
}
else
{
// Skip groups of 4 zeros -- an optimization for sparse bitsets.
while (m_bitNum < UINT64_SIZE && (m_bits & 0xf) == 0)
{
m_bitNum += 4; m_bits >>= 4;
m_bitNum += 4;
m_bits >>= 4;
}
while (m_bitNum < UINT64_SIZE && (m_bits & 0x1) == 0)
{
m_bitNum += 1; m_bits >>= 1;
m_bitNum += 1;
m_bits >>= 1;
}
if (m_bitNum < UINT64_SIZE)
{
*pElem = m_bitNum; m_bitNum++; m_bits >>= 1; return true;
*pElem = m_bitNum;
m_bitNum++;
m_bits >>= 1;
return true;
}
else
{
......@@ -314,19 +328,20 @@ public:
}
};
template<typename Env, typename BitSetTraits>
class BitSetOps</*BitSetType*/BitSetUint64<Env,BitSetTraits>,
/*Brand*/BSUInt64Class,
/*Env*/Env, /*BitSetTraits*/BitSetTraits>
template <typename Env, typename BitSetTraits>
class BitSetOps</*BitSetType*/ BitSetUint64<Env, BitSetTraits>,
/*Brand*/ BSUInt64Class,
/*Env*/ Env,
/*BitSetTraits*/ BitSetTraits>
{
typedef BitSetUint64<Env,BitSetTraits> BST;
typedef const BitSetUint64<Env,BitSetTraits>& BSTValArg;
typedef BitSetUint64ValueRetType<Env,BitSetTraits> BSTRetVal;
typedef BitSetUint64<Env, BitSetTraits> BST;
typedef const BitSetUint64<Env, BitSetTraits>& BSTValArg;
typedef BitSetUint64ValueRetType<Env, BitSetTraits> BSTRetVal;
public:
static BSTRetVal UninitVal()
{
return BitSetUint64<Env,BitSetTraits>();
return BitSetUint64<Env, BitSetTraits>();
}
static bool MayBeUninit(BSTValArg bs)
......@@ -467,7 +482,7 @@ public:
static BSTRetVal MakeFull(Env env)
{
return BST(env, /*full*/true);
return BST(env, /*full*/ true);
}
#ifdef DEBUG
......
......@@ -2,7 +2,6 @@
// The .NET Foundation licenses this file to you under the MIT license.
// See the LICENSE file in the project root for more information.
BSOPNAME(BSOP_Assign)
BSOPNAME(BSOP_AssignAllowUninitRhs)
BSOPNAME(BSOP_AssignNocopy)
......
......@@ -14,7 +14,7 @@
//
// BitVecTraits traits(size, pCompiler);
// BitVec bitvec = BitVecOps::MakeEmpty(&traits);
//
//
// and call functions like so:
//
// BitVecOps::AddElemD(&traits, bitvec, 10);
......@@ -25,17 +25,17 @@
#include "compilerbitsettraits.h"
#include "bitsetasshortlong.h"
typedef BitSetOps</*BitSetType*/BitSetShortLongRep,
/*Brand*/BSShortLong,
/*Env*/BitVecTraits*,
/*BitSetTraits*/BitVecTraits>
BitVecOps;
typedef BitSetOps</*BitSetType*/ BitSetShortLongRep,
/*Brand*/ BSShortLong,
/*Env*/ BitVecTraits*,
/*BitSetTraits*/ BitVecTraits>
BitVecOps;
typedef BitSetShortLongRep BitVec;
typedef BitSetShortLongRep BitVec;
// These types should be used as the types for BitVec arguments and return values, respectively.
typedef BitVecOps::ValArgType BitVec_ValArg_T;
typedef BitVecOps::RetValType BitVec_ValRet_T;
typedef BitVecOps::ValArgType BitVec_ValArg_T;
typedef BitVecOps::RetValType BitVec_ValRet_T;
// Initialize "_varName" to "_initVal." Copies contents, not references; if "_varName" is uninitialized, allocates a
// set for it (using "_traits" for any necessary allocation), and copies the contents of "_initVal" into it.
......@@ -49,8 +49,8 @@ typedef BitVecOps::RetValType BitVec_ValRet_T;
// Use this to initialize an iterator "_iterName" to iterate over a BitVec "_bitVec".
// "_bitNum" will be an unsigned variable to which we assign the elements of "_bitVec".
#define BITVEC_ITER_INIT(_traits, _iterName, _bitVec, _bitNum) \
unsigned _bitNum = 0; \
#define BITVEC_ITER_INIT(_traits, _iterName, _bitVec, _bitNum) \
unsigned _bitNum = 0; \
BitVecOps::Iter _iterName(_traits, _bitVec)
#endif // _BITVEC_INCLUDED_
......@@ -20,10 +20,10 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
flowList* ShuffleHelper(unsigned hash, flowList* res)
{
flowList* head = res;
for (flowList* prev = NULL; res != NULL; prev = res, res = res->flNext)
for (flowList *prev = nullptr; res != nullptr; prev = res, res = res->flNext)
{
unsigned blkHash = (hash ^ (res->flBlock->bbNum << 16) ^ res->flBlock->bbNum);
if (((blkHash % 1879) & 1) && prev != NULL)
if (((blkHash % 1879) & 1) && prev != nullptr)
{
// Swap res with head.
prev->flNext = head;
......@@ -51,26 +51,26 @@ unsigned SsaStressHashHelper()
}
#endif
EHSuccessorIter::EHSuccessorIter(Compiler* comp, BasicBlock* block) :
m_comp(comp),
m_block(block),
m_curRegSucc(NULL),
m_curTry(comp->ehGetBlockExnFlowDsc(block)),
m_remainingRegSuccs(block->NumSucc(comp))
EHSuccessorIter::EHSuccessorIter(Compiler* comp, BasicBlock* block)
: m_comp(comp)
, m_block(block)
, m_curRegSucc(nullptr)
, m_curTry(comp->ehGetBlockExnFlowDsc(block))
, m_remainingRegSuccs(block->NumSucc(comp))
{
// If "block" is a "leave helper" block (the empty BBJ_ALWAYS block that pairs with a
// preceding BBJ_CALLFINALLY block to implement a "leave" IL instruction), then no exceptions
// can occur within it, so clear m_curTry if it's non-null.
if (m_curTry != NULL)
if (m_curTry != nullptr)
{
BasicBlock* beforeBlock = block->bbPrev;
if (beforeBlock != NULL && beforeBlock->isBBCallAlwaysPair())
if (beforeBlock != nullptr && beforeBlock->isBBCallAlwaysPair())
{
m_curTry = NULL;
m_curTry = nullptr;
}
}
if (m_curTry == NULL && m_remainingRegSuccs > 0)
if (m_curTry == nullptr && m_remainingRegSuccs > 0)
{
// Examine the successors to see if any are the start of try blocks.
FindNextRegSuccTry();
......@@ -79,7 +79,7 @@ EHSuccessorIter::EHSuccessorIter(Compiler* comp, BasicBlock* block) :
void EHSuccessorIter::FindNextRegSuccTry()
{
assert(m_curTry == NULL);
assert(m_curTry == nullptr);
// Must now consider the next regular successor, if any.
while (m_remainingRegSuccs > 0)
......@@ -94,7 +94,9 @@ void EHSuccessorIter::FindNextRegSuccTry()
// If the try region started by "m_curRegSucc" (represented by newTryIndex) contains m_block,
// we've already yielded its handler, as one of the EH handler successors of m_block itself.
if (m_comp->bbInExnFlowRegions(newTryIndex, m_block))
{
continue;
}
// Otherwise, consider this try.
m_curTry = m_comp->ehGetDsc(newTryIndex);
......@@ -105,24 +107,24 @@ void EHSuccessorIter::FindNextRegSuccTry()
void EHSuccessorIter::operator++(void)
{
assert(m_curTry != NULL);
assert(m_curTry != nullptr);
if (m_curTry->ebdEnclosingTryIndex != EHblkDsc::NO_ENCLOSING_INDEX)
{
m_curTry = m_comp->ehGetDsc(m_curTry->ebdEnclosingTryIndex);
// If we've gone over into considering try's containing successors,
// then the enclosing try must have the successor as its first block.
if (m_curRegSucc == NULL || m_curTry->ebdTryBeg == m_curRegSucc)
if (m_curRegSucc == nullptr || m_curTry->ebdTryBeg == m_curRegSucc)
{
return;
}
// Otherwise, give up, try the next regular successor.
m_curTry = NULL;
m_curTry = nullptr;
}
else
{
m_curTry = NULL;
m_curTry = nullptr;
}
// We've exhausted all try blocks.
......@@ -132,14 +134,14 @@ void EHSuccessorIter::operator++(void)
BasicBlock* EHSuccessorIter::operator*()
{
assert(m_curTry != NULL);
assert(m_curTry != nullptr);
return m_curTry->ExFlowBlock();
}
flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
{
BlockToFlowListMap* ehPreds = GetBlockToEHPreds();
flowList* res;
flowList* res;
if (ehPreds->Lookup(blk, &res))
{
return res;
......@@ -150,21 +152,22 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
if (bbIsExFlowBlock(blk, &tryIndex))
{
// Find the first block of the try.
EHblkDsc* ehblk = ehGetDsc(tryIndex);
EHblkDsc* ehblk = ehGetDsc(tryIndex);
BasicBlock* tryStart = ehblk->ebdTryBeg;
for (flowList* tryStartPreds = tryStart->bbPreds; tryStartPreds != nullptr; tryStartPreds = tryStartPreds->flNext)
for (flowList* tryStartPreds = tryStart->bbPreds; tryStartPreds != nullptr;
tryStartPreds = tryStartPreds->flNext)
{
res = new (this, CMK_FlowList) flowList(tryStartPreds->flBlock, res);
#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
}
// Now add all blocks handled by this handler (except for second blocks of BBJ_CALLFINALLY/BBJ_ALWAYS pairs;
// these cannot cause transfer to the handler...)
BasicBlock* prevBB = NULL;
BasicBlock* prevBB = nullptr;
// TODO-Throughput: It would be nice if we could iterate just over the blocks in the try, via
// something like:
......@@ -172,14 +175,14 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
// (plus adding in any filter blocks outside the try whose exceptions are handled here).
// That doesn't work, however: funclets have caused us to sometimes split the body of a try into
// more than one sequence of contiguous blocks. We need to find a better way to do this.
for (BasicBlock* bb = fgFirstBB; bb != NULL; prevBB = bb, bb = bb->bbNext)
for (BasicBlock *bb = fgFirstBB; bb != nullptr; prevBB = bb, bb = bb->bbNext)
{
if (bbInExnFlowRegions(tryIndex, bb) && (prevBB == NULL || !prevBB->isBBCallAlwaysPair()))
if (bbInExnFlowRegions(tryIndex, bb) && (prevBB == nullptr || !prevBB->isBBCallAlwaysPair()))
{
res = new (this, CMK_FlowList) flowList(bb, res);
#if MEASURE_BLOCK_SIZE
genFlowNodeCnt += 1;
genFlowNodeCnt += 1;
genFlowNodeSize += sizeof(flowList);
#endif // MEASURE_BLOCK_SIZE
}
......@@ -198,7 +201,6 @@ flowList* Compiler::BlockPredsWithEH(BasicBlock* blk)
return res;
}
#ifdef DEBUG
//------------------------------------------------------------------------
......@@ -212,7 +214,8 @@ void BasicBlock::dspBlockILRange()
}
else
{
printf("[???" "..");
printf("[???"
"..");
}
if (bbCodeOffsEnd != BAD_IL_OFFSET)
......@@ -223,7 +226,8 @@ void BasicBlock::dspBlockILRange()
else
{
// brace-matching editor workaround for following line: (
printf("???" ")");
printf("???"
")");
}
}
......@@ -232,38 +236,126 @@ void BasicBlock::dspBlockILRange()
//
void BasicBlock::dspFlags()
{
if (bbFlags & BBF_VISITED) printf("v ");
if (bbFlags & BBF_MARKED) printf("m ");
if (bbFlags & BBF_CHANGED) printf("! ");
if (bbFlags & BBF_REMOVED) printf("del ");
if (bbFlags & BBF_DONT_REMOVE) printf("keep ");
if (bbFlags & BBF_IMPORTED) printf("i ");
if (bbFlags & BBF_INTERNAL) printf("internal ");
if (bbFlags & BBF_FAILED_VERIFICATION) printf("failV ");
if (bbFlags & BBF_TRY_BEG) printf("try ");
if (bbFlags & BBF_NEEDS_GCPOLL) printf("poll ");
if (bbFlags & BBF_RUN_RARELY) printf("rare ");
if (bbFlags & BBF_LOOP_HEAD) printf("Loop ");
if (bbFlags & BBF_LOOP_CALL0) printf("Loop0 ");
if (bbFlags & BBF_LOOP_CALL1) printf("Loop1 ");
if (bbFlags & BBF_HAS_LABEL) printf("label ");
if (bbFlags & BBF_JMP_TARGET) printf("target ");
if (bbFlags & BBF_HAS_JMP) printf("jmp ");
if (bbFlags & BBF_GC_SAFE_POINT) printf("gcsafe ");
if (bbFlags & BBF_FUNCLET_BEG) printf("flet ");
if (bbFlags & BBF_HAS_IDX_LEN) printf("idxlen ");
if (bbFlags & BBF_HAS_NEWARRAY) printf("new[] ");
if (bbFlags & BBF_HAS_NEWOBJ) printf("newobj ");
if (bbFlags & BBF_VISITED)
{
printf("v ");
}
if (bbFlags & BBF_MARKED)
{
printf("m ");
}
if (bbFlags & BBF_CHANGED)
{
printf("! ");
}
if (bbFlags & BBF_REMOVED)
{
printf("del ");
}
if (bbFlags & BBF_DONT_REMOVE)
{
printf("keep ");
}
if (bbFlags & BBF_IMPORTED)
{
printf("i ");
}
if (bbFlags & BBF_INTERNAL)
{
printf("internal ");
}
if (bbFlags & BBF_FAILED_VERIFICATION)
{
printf("failV ");
}
if (bbFlags & BBF_TRY_BEG)
{
printf("try ");
}
if (bbFlags & BBF_NEEDS_GCPOLL)
{
printf("poll ");
}
if (bbFlags & BBF_RUN_RARELY)
{
printf("rare ");
}
if (bbFlags & BBF_LOOP_HEAD)
{
printf("Loop ");
}
if (bbFlags & BBF_LOOP_CALL0)
{
printf("Loop0 ");
}
if (bbFlags & BBF_LOOP_CALL1)
{
printf("Loop1 ");
}
if (bbFlags & BBF_HAS_LABEL)
{
printf("label ");
}
if (bbFlags & BBF_JMP_TARGET)
{
printf("target ");
}
if (bbFlags & BBF_HAS_JMP)
{
printf("jmp ");
}
if (bbFlags & BBF_GC_SAFE_POINT)
{
printf("gcsafe ");
}
if (bbFlags & BBF_FUNCLET_BEG)
{
printf("flet ");
}
if (bbFlags & BBF_HAS_IDX_LEN)
{
printf("idxlen ");
}
if (bbFlags & BBF_HAS_NEWARRAY)
{
printf("new[] ");
}
if (bbFlags & BBF_HAS_NEWOBJ)
{
printf("newobj ");
}
#if FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
if (bbFlags & BBF_FINALLY_TARGET) printf("ftarget ");
if (bbFlags & BBF_FINALLY_TARGET)
printf("ftarget ");
#endif // FEATURE_EH_FUNCLETS && defined(_TARGET_ARM_)
if (bbFlags & BBF_BACKWARD_JUMP) printf("bwd ");
if (bbFlags & BBF_RETLESS_CALL) printf("retless ");
if (bbFlags & BBF_LOOP_PREHEADER) printf("LoopPH ");
if (bbFlags & BBF_COLD) printf("cold ");
if (bbFlags & BBF_PROF_WEIGHT) printf("IBC ");
if (bbFlags & BBF_FORWARD_SWITCH) printf("fswitch ");
if (bbFlags & BBF_KEEP_BBJ_ALWAYS) printf("KEEP ");
if (bbFlags & BBF_BACKWARD_JUMP)
{
printf("bwd ");
}
if (bbFlags & BBF_RETLESS_CALL)
{
printf("retless ");
}
if (bbFlags & BBF_LOOP_PREHEADER)
{
printf("LoopPH ");
}
if (bbFlags & BBF_COLD)
{
printf("cold ");
}
if (bbFlags & BBF_PROF_WEIGHT)
{
printf("IBC ");
}
if (bbFlags & BBF_FORWARD_SWITCH)
{
printf("fswitch ");
}
if (bbFlags & BBF_KEEP_BBJ_ALWAYS)
{
printf("KEEP ");
}
}
/*****************************************************************************
......@@ -272,7 +364,7 @@ void BasicBlock::dspFlags()
* Returns the number of characters printed.
*/
unsigned BasicBlock::dspPreds()
unsigned BasicBlock::dspPreds()
{
unsigned count = 0;
for (flowList* pred = bbPreds; pred != nullptr; pred = pred->flNext)
......@@ -308,7 +400,7 @@ unsigned BasicBlock::dspPreds()
* Returns the number of characters printed.
*/
unsigned BasicBlock::dspCheapPreds()
unsigned BasicBlock::dspCheapPreds()
{
unsigned count = 0;
for (BasicBlockList* pred = bbCheapPreds; pred != nullptr; pred = pred->next)
......@@ -337,13 +429,13 @@ unsigned BasicBlock::dspCheapPreds()
* Returns the count of successors.
*/
unsigned BasicBlock::dspSuccs(Compiler* compiler)
unsigned BasicBlock::dspSuccs(Compiler* compiler)
{
unsigned numSuccs = NumSucc(compiler);
unsigned count = 0;
unsigned count = 0;
for (unsigned i = 0; i < numSuccs; i++)
{
printf("%s", (count == 0) ? "" : ",");
printf("%s", (count == 0) ? "" : ",");
printf("BB%02u", GetSucc(i, compiler)->bbNum);
count++;
}
......@@ -351,83 +443,84 @@ unsigned BasicBlock::dspSuccs(Compiler* compiler)
}
// Display a compact representation of the bbJumpKind, that is, where this block branches.
// This is similar to code in Compiler::fgTableDispBasicBlock(), but doesn't have that code's requirements to align things strictly.
void BasicBlock::dspJumpKind()
// This is similar to code in Compiler::fgTableDispBasicBlock(), but doesn't have that code's requirements to align
// things strictly.
void BasicBlock::dspJumpKind()
{
switch (bbJumpKind)
{
case BBJ_EHFINALLYRET:
printf(" (finret)");
break;
case BBJ_EHFINALLYRET:
printf(" (finret)");
break;
case BBJ_EHFILTERRET:
printf(" (fltret)");
break;
case BBJ_EHFILTERRET:
printf(" (fltret)");
break;
case BBJ_EHCATCHRET:
printf(" -> BB%02u (cret)", bbJumpDest->bbNum);
break;
case BBJ_EHCATCHRET:
printf(" -> BB%02u (cret)", bbJumpDest->bbNum);
break;
case BBJ_THROW:
printf(" (throw)");
break;
case BBJ_THROW:
printf(" (throw)");
break;
case BBJ_RETURN:
printf(" (return)");
break;
case BBJ_RETURN:
printf(" (return)");
break;
case BBJ_NONE:
// For fall-through blocks, print nothing.
break;
case BBJ_NONE:
// For fall-through blocks, print nothing.
break;
case BBJ_ALWAYS:
if (bbFlags & BBF_KEEP_BBJ_ALWAYS)
{
printf(" -> BB%02u (ALWAYS)", bbJumpDest->bbNum);
}
else
{
printf(" -> BB%02u (always)", bbJumpDest->bbNum);
}
break;
case BBJ_ALWAYS:
if (bbFlags & BBF_KEEP_BBJ_ALWAYS)
{
printf(" -> BB%02u (ALWAYS)", bbJumpDest->bbNum);
}
else
{
printf(" -> BB%02u (always)", bbJumpDest->bbNum);
}
break;
case BBJ_LEAVE:
printf(" -> BB%02u (leave)", bbJumpDest->bbNum);
break;
case BBJ_LEAVE:
printf(" -> BB%02u (leave)", bbJumpDest->bbNum);
break;
case BBJ_CALLFINALLY:
printf(" -> BB%02u (callf)", bbJumpDest->bbNum);
break;
case BBJ_CALLFINALLY:
printf(" -> BB%02u (callf)", bbJumpDest->bbNum);
break;
case BBJ_COND:
printf(" -> BB%02u (cond)", bbJumpDest->bbNum);
break;
case BBJ_COND:
printf(" -> BB%02u (cond)", bbJumpDest->bbNum);
break;
case BBJ_SWITCH:
printf(" ->");
case BBJ_SWITCH:
printf(" ->");
unsigned jumpCnt;
jumpCnt = bbJumpSwt->bbsCount;
BasicBlock** jumpTab;
jumpTab = bbJumpSwt->bbsDstTab;
do
{
printf("%cBB%02u",
(jumpTab == bbJumpSwt->bbsDstTab) ? ' ' : ',',
(*jumpTab)->bbNum);
}
while (++jumpTab, --jumpCnt);
unsigned jumpCnt;
jumpCnt = bbJumpSwt->bbsCount;
BasicBlock** jumpTab;
jumpTab = bbJumpSwt->bbsDstTab;
do
{
printf("%cBB%02u", (jumpTab == bbJumpSwt->bbsDstTab) ? ' ' : ',', (*jumpTab)->bbNum);
} while (++jumpTab, --jumpCnt);
printf(" (switch)");
break;
printf(" (switch)");
break;
default:
unreached();
break;
default:
unreached();
break;
}
}
void BasicBlock::dspBlockHeader(Compiler* compiler, bool showKind /*= true*/, bool showFlags /*= false*/, bool showPreds /*= true*/)
void BasicBlock::dspBlockHeader(Compiler* compiler,
bool showKind /*= true*/,
bool showFlags /*= false*/,
bool showPreds /*= true*/)
{
printf("BB%02u ", bbNum);
dspBlockILRange();
......@@ -470,8 +563,8 @@ void BasicBlock::CloneBlockState(Compiler* compiler, BasicBlock* to, const Basic
{
assert(to->bbTreeList == nullptr);
to->bbFlags = from->bbFlags;
to->bbWeight = from->bbWeight;
to->bbFlags = from->bbFlags;
to->bbWeight = from->bbWeight;
BlockSetOps::AssignAllowUninitRhs(compiler, to->bbReach, from->bbReach);
to->copyEHRegion(from);
to->bbCatchTyp = from->bbCatchTyp;
......@@ -483,17 +576,18 @@ void BasicBlock::CloneBlockState(Compiler* compiler, BasicBlock* to, const Basic
to->bbCodeOffsEnd = from->bbCodeOffsEnd;
VarSetOps::AssignAllowUninitRhs(compiler, to->bbScope, from->bbScope);
#if FEATURE_STACK_FP_X87
to->bbFPStateX87 = from->bbFPStateX87;
to->bbFPStateX87 = from->bbFPStateX87;
#endif // FEATURE_STACK_FP_X87
to->bbNatLoopNum = from->bbNatLoopNum;
to->bbNatLoopNum = from->bbNatLoopNum;
#ifdef DEBUG
to->bbLoopNum = from->bbLoopNum;
to->bbTgtStkDepth = from->bbTgtStkDepth;
#endif // DEBUG
for (GenTreePtr fromStmt = from->bbTreeList; fromStmt != NULL; fromStmt = fromStmt->gtNext)
for (GenTreePtr fromStmt = from->bbTreeList; fromStmt != nullptr; fromStmt = fromStmt->gtNext)
{
compiler->fgInsertStmtAtEnd(to, compiler->fgNewStmtFromTree(compiler->gtCloneExpr(fromStmt->gtStmt.gtStmtExpr)));
compiler->fgInsertStmtAtEnd(to,
compiler->fgNewStmtFromTree(compiler->gtCloneExpr(fromStmt->gtStmt.gtStmtExpr)));
}
}
......@@ -510,7 +604,9 @@ void BasicBlock::CloneBlockState(Compiler* compiler, BasicBlock* to, const Basic
GenTreeStmt* BasicBlock::firstStmt()
{
if (bbTreeList == nullptr)
{
return nullptr;
}
return bbTreeList->AsStmt();
}
......@@ -530,7 +626,9 @@ GenTreeStmt* BasicBlock::firstStmt()
GenTreeStmt* BasicBlock::lastStmt()
{
if (bbTreeList == nullptr)
{
return nullptr;
}
GenTree* result = bbTreeList->gtPrev;
assert(result && result->gtNext == nullptr);
......@@ -554,7 +652,9 @@ GenTreeStmt* BasicBlock::lastStmt()
GenTreeStmt* BasicBlock::lastTopLevelStmt()
{
if (bbTreeList == nullptr)
{
return nullptr;
}
GenTreePtr stmt = lastStmt();
......@@ -635,5 +735,3 @@ unsigned PtrKeyFuncs<BasicBlock>::GetHashCode(const BasicBlock* ptr)
#endif
return ptr->bbNum;
}
此差异已折叠。
......@@ -16,7 +16,7 @@
// 0th bit to avoid having to do "bbNum - 1" calculations everywhere (at the BlockSet call
// sites). This makes reading the code easier, and avoids potential problems of forgetting
// to do a "- 1" somewhere.
//
//
// Basic blocks can be renumbered during compilation, so it is important to not mix
// BlockSets created before and after a renumbering. Every time the blocks are renumbered
// creates a different "epoch", during which the basic block numbers are stable.
......@@ -25,27 +25,25 @@
#include "compilerbitsettraits.h"
#include "bitsetasshortlong.h"
class BlockSetOps : public BitSetOps</*BitSetType*/BitSetShortLongRep,
/*Brand*/BSShortLong,
/*Env*/Compiler*,
/*BitSetTraits*/BasicBlockBitSetTraits>
class BlockSetOps : public BitSetOps</*BitSetType*/ BitSetShortLongRep,
/*Brand*/ BSShortLong,
/*Env*/ Compiler*,
/*BitSetTraits*/ BasicBlockBitSetTraits>
{
public:
// Specialize BlockSetOps::MakeFull(). Since we number basic blocks from one, we remove bit zero from
// the block set. Otherwise, IsEmpty() would never return true.
static
BitSetShortLongRep
MakeFull(Compiler* env)
static BitSetShortLongRep MakeFull(Compiler* env)
{
BitSetShortLongRep retval;
// First, make a full set using the BitSetOps::MakeFull
retval = BitSetOps</*BitSetType*/BitSetShortLongRep,
/*Brand*/BSShortLong,
/*Env*/Compiler*,
/*BitSetTraits*/BasicBlockBitSetTraits>::MakeFull(env);
retval = BitSetOps</*BitSetType*/ BitSetShortLongRep,
/*Brand*/ BSShortLong,
/*Env*/ Compiler*,
/*BitSetTraits*/ BasicBlockBitSetTraits>::MakeFull(env);
// Now, remove element zero, since we number basic blocks starting at one, and index the set with the
// basic block number. If we left this, then IsEmpty() would never return true.
BlockSetOps::RemoveElemD(env, retval, 0);
......@@ -54,11 +52,11 @@ public:
}
};
typedef BitSetShortLongRep BlockSet;
typedef BitSetShortLongRep BlockSet;
// These types should be used as the types for BlockSet arguments and return values, respectively.
typedef BlockSetOps::ValArgType BlockSet_ValArg_T;
typedef BlockSetOps::RetValType BlockSet_ValRet_T;
typedef BlockSetOps::ValArgType BlockSet_ValArg_T;
typedef BlockSetOps::RetValType BlockSet_ValRet_T;
// Initialize "_varName" to "_initVal." Copies contents, not references; if "_varName" is uninitialized, allocates a
// var set for it (using "_comp" for any necessary allocation), and copies the contents of "_initVal" into it.
......@@ -72,8 +70,8 @@ typedef BlockSetOps::RetValType BlockSet_ValRet_T;
// Use this to initialize an iterator "_iterName" to iterate over a BlockSet "_blockSet".
// "_blockNum" will be an unsigned variable to which we assign the elements of "_blockSet".
#define BLOCKSET_ITER_INIT(_comp, _iterName, _blockSet, _blockNum) \
unsigned _blockNum = 0; \
#define BLOCKSET_ITER_INIT(_comp, _iterName, _blockSet, _blockNum) \
unsigned _blockNum = 0; \
BlockSetOps::Iter _iterName(_comp, _blockSet)
#endif // _BLOCKSET_INCLUDED_
此差异已折叠。
此差异已折叠。
......@@ -10,205 +10,210 @@
#ifndef LEGACY_BACKEND // Not necessary (it's this way in the #include location), but helpful to IntelliSense
void genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree);
void genSetRegToConst(regNumber targetReg, var_types targetType, GenTreePtr tree);
void genCodeForTreeNode(GenTreePtr treeNode);
void genCodeForTreeNode(GenTreePtr treeNode);
void genCodeForBinary(GenTreePtr treeNode);
void genCodeForBinary(GenTreePtr treeNode);
void genCodeForDivMod(GenTreeOp* treeNode);
void genCodeForDivMod(GenTreeOp* treeNode);
void genCodeForMulHi(GenTreeOp* treeNode);
void genCodeForMulHi(GenTreeOp* treeNode);
void genLeaInstruction(GenTreeAddrMode *lea);
void genLeaInstruction(GenTreeAddrMode* lea);
void genSetRegToCond(regNumber dstReg, GenTreePtr tree);
void genSetRegToCond(regNumber dstReg, GenTreePtr tree);
void genIntToIntCast(GenTreePtr treeNode);
void genIntToIntCast(GenTreePtr treeNode);
void genFloatToFloatCast(GenTreePtr treeNode);
void genFloatToFloatCast(GenTreePtr treeNode);
void genFloatToIntCast(GenTreePtr treeNode);
void genFloatToIntCast(GenTreePtr treeNode);
void genIntToFloatCast(GenTreePtr treeNode);
void genIntToFloatCast(GenTreePtr treeNode);
void genCkfinite(GenTreePtr treeNode);
void genCkfinite(GenTreePtr treeNode);
void genIntrinsic(GenTreePtr treeNode);
void genIntrinsic(GenTreePtr treeNode);
void genPutArgStk(GenTreePtr treeNode);
unsigned getBaseVarForPutArgStk(GenTreePtr treeNode);
void genPutArgStk(GenTreePtr treeNode);
unsigned getBaseVarForPutArgStk(GenTreePtr treeNode);
#if defined(_TARGET_XARCH_) || defined(_TARGET_ARM64_)
unsigned getFirstArgWithStackSlot();
unsigned getFirstArgWithStackSlot();
#endif // _TARGET_XARCH_ || _TARGET_ARM64_
void genCompareFloat(GenTreePtr treeNode);
void genCompareFloat(GenTreePtr treeNode);
void genCompareInt(GenTreePtr treeNode);
void genCompareInt(GenTreePtr treeNode);
#if !defined(_TARGET_64BIT_)
void genCompareLong(GenTreePtr treeNode);
void genJTrueLong(GenTreePtr treeNode);
void genCompareLong(GenTreePtr treeNode);
void genJTrueLong(GenTreePtr treeNode);
#endif
#ifdef FEATURE_SIMD
enum SIMDScalarMoveType
{
SMT_ZeroInitUpper, // zero initlaize target upper bits
SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
SMT_PreserveUpper // preserve target upper bits
};
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned *ival = nullptr);
void genSIMDScalarMove(var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitArray(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode);
void genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode);
void genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode);
void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
void genSIMDIntrinsic(GenTreeSIMD* simdNode);
void genSIMDCheck(GenTree* treeNode);
// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
// values through an indirection. Note that Vector3 locals allocated on stack would have
// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
void genStoreIndTypeSIMD12(GenTree* treeNode);
void genStoreLclFldTypeSIMD12(GenTree* treeNode);
void genLoadIndTypeSIMD12(GenTree* treeNode);
void genLoadLclFldTypeSIMD12(GenTree* treeNode);
enum SIMDScalarMoveType
{
SMT_ZeroInitUpper, // zero initlaize target upper bits
SMT_ZeroInitUpper_SrcHasUpperZeros, // zero initialize target upper bits; source upper bits are known to be zero
SMT_PreserveUpper // preserve target upper bits
};
instruction getOpForSIMDIntrinsic(SIMDIntrinsicID intrinsicId, var_types baseType, unsigned* ival = nullptr);
void genSIMDScalarMove(var_types type, regNumber target, regNumber src, SIMDScalarMoveType moveType);
void genSIMDZero(var_types targetType, var_types baseType, regNumber targetReg);
void genSIMDIntrinsicInit(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitN(GenTreeSIMD* simdNode);
void genSIMDIntrinsicInitArray(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUnOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicBinOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicRelOp(GenTreeSIMD* simdNode);
void genSIMDIntrinsicDotProduct(GenTreeSIMD* simdNode);
void genSIMDIntrinsicSetItem(GenTreeSIMD* simdNode);
void genSIMDIntrinsicGetItem(GenTreeSIMD* simdNode);
void genSIMDIntrinsicShuffleSSE2(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperSave(GenTreeSIMD* simdNode);
void genSIMDIntrinsicUpperRestore(GenTreeSIMD* simdNode);
void genSIMDIntrinsic(GenTreeSIMD* simdNode);
void genSIMDCheck(GenTree* treeNode);
// TYP_SIMD12 (i.e Vector3 of size 12 bytes) is not a hardware supported size and requires
// two reads/writes on 64-bit targets. These routines abstract reading/writing of Vector3
// values through an indirection. Note that Vector3 locals allocated on stack would have
// their size rounded to TARGET_POINTER_SIZE (which is 8 bytes on 64-bit targets) and hence
// Vector3 locals could be treated as TYP_SIMD16 while reading/writing.
void genStoreIndTypeSIMD12(GenTree* treeNode);
void genStoreLclFldTypeSIMD12(GenTree* treeNode);
void genLoadIndTypeSIMD12(GenTree* treeNode);
void genLoadLclFldTypeSIMD12(GenTree* treeNode);
#endif // FEATURE_SIMD
#if !defined(_TARGET_64BIT_)
// CodeGen for Long Ints
// CodeGen for Long Ints
void genStoreLongLclVar(GenTree* treeNode);
void genStoreLongLclVar(GenTree* treeNode);
#endif // !defined(_TARGET_64BIT_)
void genProduceReg(GenTree *tree);
void genProduceReg(GenTree* tree);
void genUnspillRegIfNeeded(GenTree* tree);
void genUnspillRegIfNeeded(GenTree* tree);
regNumber genConsumeReg(GenTree *tree);
regNumber genConsumeReg(GenTree* tree);
void genConsumeRegAndCopy(GenTree *tree, regNumber needReg);
void genConsumeRegAndCopy(GenTree* tree, regNumber needReg);
void genConsumeIfReg(GenTreePtr tree)
void genConsumeIfReg(GenTreePtr tree)
{
if (!tree->isContained())
{
if (!tree->isContained())
(void) genConsumeReg(tree);
(void)genConsumeReg(tree);
}
}
void genRegCopy(GenTreePtr tree);
void genRegCopy(GenTreePtr tree);
void genTransferRegGCState(regNumber dst, regNumber src);
void genTransferRegGCState(regNumber dst, regNumber src);
void genConsumeAddress(GenTree* addr);
void genConsumeAddress(GenTree* addr);
void genConsumeAddrMode(GenTreeAddrMode *mode);
void genConsumeAddrMode(GenTreeAddrMode* mode);
void genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
void genConsumeBlockOp(GenTreeBlkOp* blkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg);
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
void genConsumePutStructArgStk(GenTreePutArgStk* putArgStkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg, unsigned baseVarNum);
void genConsumePutStructArgStk(
GenTreePutArgStk* putArgStkNode, regNumber dstReg, regNumber srcReg, regNumber sizeReg, unsigned baseVarNum);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
void genConsumeRegs(GenTree* tree);
void genConsumeRegs(GenTree* tree);
void genConsumeOperands(GenTreeOp* tree);
void genConsumeOperands(GenTreeOp* tree);
void genEmitGSCookieCheck(bool pushReg);
void genEmitGSCookieCheck(bool pushReg);
void genSetRegToIcon (regNumber reg,
ssize_t val,
var_types type = TYP_INT,
insFlags flags = INS_FLAGS_DONT_CARE);
void genSetRegToIcon(regNumber reg, ssize_t val, var_types type = TYP_INT, insFlags flags = INS_FLAGS_DONT_CARE);
void genCodeForShift (GenTreePtr tree);
void genCodeForShift(GenTreePtr tree);
#ifdef _TARGET_XARCH_
void genCodeForShiftRMW (GenTreeStoreInd* storeInd);
void genCodeForShiftRMW(GenTreeStoreInd* storeInd);
#endif // _TARGET_XARCH_
void genCodeForCpObj (GenTreeCpObj* cpObjNode);
void genCodeForCpObj(GenTreeCpObj* cpObjNode);
void genCodeForCpBlk (GenTreeCpBlk* cpBlkNode);
void genCodeForCpBlk(GenTreeCpBlk* cpBlkNode);
void genCodeForCpBlkRepMovs (GenTreeCpBlk* cpBlkNode);
void genCodeForCpBlkRepMovs(GenTreeCpBlk* cpBlkNode);
void genCodeForCpBlkUnroll (GenTreeCpBlk* cpBlkNode);
void genCodeForCpBlkUnroll(GenTreeCpBlk* cpBlkNode);
#ifdef FEATURE_UNIX_AMD64_STRUCT_PASSING
void genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum);
void genPutStructArgStk(GenTreePtr treeNode, unsigned baseVarNum);
void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
void genStructPutArgRepMovs(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
void genStructPutArgUnroll(GenTreePutArgStk* putArgStkNode, unsigned baseVarNum);
#endif // FEATURE_UNIX_AMD64_STRUCT_PASSING
void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genCodeForLoadOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genCodeForStoreOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genCodeForStoreOffset(instruction ins, emitAttr size, regNumber dst, GenTree* base, unsigned offset);
void genCodeForInitBlk(GenTreeInitBlk* initBlkNode);
void genCodeForInitBlk (GenTreeInitBlk* initBlkNode);
void genCodeForInitBlkRepStos(GenTreeInitBlk* initBlkNode);
void genCodeForInitBlkRepStos (GenTreeInitBlk* initBlkNode);
void genCodeForInitBlkUnroll(GenTreeInitBlk* initBlkNode);
void genCodeForInitBlkUnroll (GenTreeInitBlk* initBlkNode);
void genJumpTable(GenTree* tree);
void genJumpTable(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
void genTableBasedSwitch(GenTree* tree);
void genCodeForArrIndex(GenTreeArrIndex* treeNode);
void genCodeForArrIndex (GenTreeArrIndex* treeNode);
void genCodeForArrOffset(GenTreeArrOffs* treeNode);
void genCodeForArrOffset (GenTreeArrOffs* treeNode);
instruction genGetInsForOper(genTreeOps oper, var_types type);
instruction genGetInsForOper (genTreeOps oper, var_types type);
void genStoreInd(GenTreePtr node);
void genStoreInd(GenTreePtr node);
bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
bool genEmitOptimizedGCWriteBarrier(GCInfo::WriteBarrierForm writeBarrierForm, GenTree* addr, GenTree* data);
void genCallInstruction(GenTreePtr call);
void genCallInstruction(GenTreePtr call);
void genJmpMethod(GenTreePtr jmp);
void genJmpMethod(GenTreePtr jmp);
void genMultiRegCallStoreToLocal(GenTreePtr treeNode);
void genMultiRegCallStoreToLocal(GenTreePtr treeNode);
// Deals with codegen for muti-register struct returns.
bool isStructReturn(GenTreePtr treeNode);
void genStructReturn(GenTreePtr treeNode);
// Deals with codegen for muti-register struct returns.
bool isStructReturn(GenTreePtr treeNode);
void genStructReturn(GenTreePtr treeNode);
// Codegen for GT_RETURN.
void genReturn(GenTreePtr treeNode);
// Codegen for GT_RETURN.
void genReturn(GenTreePtr treeNode);
void genLclHeap(GenTreePtr tree);
void genLclHeap(GenTreePtr tree);
bool genIsRegCandidateLocal (GenTreePtr tree)
bool genIsRegCandidateLocal(GenTreePtr tree)
{
if (!tree->IsLocal())
{
if (!tree->IsLocal()) return false;
const LclVarDsc * varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
return(varDsc->lvIsRegCandidate());
return false;
}
const LclVarDsc* varDsc = &compiler->lvaTable[tree->gtLclVarCommon.gtLclNum];
return (varDsc->lvIsRegCandidate());
}
#ifdef DEBUG
GenTree* lastConsumedNode;
void genCheckConsumeNode(GenTree* treeNode);
#else // !DEBUG
inline void genCheckConsumeNode(GenTree* treeNode) {}
GenTree* lastConsumedNode;
void genCheckConsumeNode(GenTree* treeNode);
#else // !DEBUG
inline void genCheckConsumeNode(GenTree* treeNode)
{
}
#endif // DEBUG
#endif // !LEGACY_BACKEND
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -11,12 +11,12 @@
#include "bitsetasshortlong.h"
///////////////////////////////////////////////////////////////////////////////
//
//
// CompAllocBitSetTraits: a base class for other BitSet traits classes.
//
//
// The classes in this file define "BitSetTraits" arguments to the "BitSetOps" type, ones that assume that
// Compiler* is the "Env" type.
//
//
// This class just captures the compiler's allocator as an IAllocator.
//
class CompAllocBitSetTraits
......@@ -30,14 +30,14 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
//
//
// TrackedVarBitSetTraits
//
//
// This class is customizes the bit set to represent sets of tracked local vars.
// The size of the bitset is determined by the # of tracked locals (up to some internal
// maximum), and the Compiler* tracks the tracked local epochs.
//
class TrackedVarBitSetTraits: public CompAllocBitSetTraits
class TrackedVarBitSetTraits : public CompAllocBitSetTraits
{
public:
static inline unsigned GetSize(Compiler* comp);
......@@ -50,16 +50,16 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
//
//
// AllVarBitSetTraits
//
//
// This class is customizes the bit set to represent sets of all local vars (tracked or not) --
// at least up to some maximum index. (This index is private to the Compiler, and it is
// the responsibility of the compiler not to use indices >= this maximum.)
// We rely on the fact that variables are never deleted, and therefore use the
// total # of locals as the epoch number (up to the maximum).
//
class AllVarBitSetTraits: public CompAllocBitSetTraits
class AllVarBitSetTraits : public CompAllocBitSetTraits
{
public:
static inline unsigned GetSize(Compiler* comp);
......@@ -72,9 +72,9 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
//
//
// BasicBlockBitSetTraits
//
//
// This class is customizes the bit set to represent sets of BasicBlocks.
// The size of the bitset is determined by maximum assigned BasicBlock number
// (Compiler::fgBBNumMax) (Note that fgBBcount is not equal to this during inlining,
......@@ -83,7 +83,7 @@ public:
// Thus, if you only care about the inlinee, during inlining, this bit set will waste
// the lower numbered block bits.) The Compiler* tracks the BasicBlock epochs.
//
class BasicBlockBitSetTraits: public CompAllocBitSetTraits
class BasicBlockBitSetTraits : public CompAllocBitSetTraits
{
public:
static inline unsigned GetSize(Compiler* comp);
......@@ -96,23 +96,21 @@ public:
};
///////////////////////////////////////////////////////////////////////////////
//
//
// BitVecTraits
//
//
// This class simplifies creation and usage of "ShortLong" bitsets.
//
struct BitVecTraits
{
private:
unsigned size;
unsigned size;
Compiler* comp;
public:
BitVecTraits(unsigned size, Compiler* comp)
: size(size)
, comp(comp)
{ }
BitVecTraits(unsigned size, Compiler* comp) : size(size), comp(comp)
{
}
static inline IAllocator* GetAllocator(BitVecTraits* b);
......
......@@ -4,7 +4,7 @@
/*****************************************************************************/
#ifndef CompMemKindMacro
#error Define CompMemKindMacro before including this file.
#error Define CompMemKindMacro before including this file.
#endif
// This list of macro invocations should be used to define the CompMemKind enumeration,
......
此差异已折叠。
......@@ -18,7 +18,6 @@
#include "compiler.h"
#include "jitstd.h"
class DataFlow
{
private:
......@@ -72,7 +71,7 @@ void DataFlow::ForwardAnalysis(TCallback& callback)
if (callback.EndMerge(block))
{
AllSuccessorIter succsBegin = block->GetAllSuccs(m_pCompiler).begin();
AllSuccessorIter succsEnd = block->GetAllSuccs(m_pCompiler).end();
AllSuccessorIter succsEnd = block->GetAllSuccs(m_pCompiler).end();
for (AllSuccessorIter succ = succsBegin; succ != succsEnd; ++succ)
{
worklist.insert(worklist.end(), *succ);
......@@ -80,4 +79,3 @@ void DataFlow::ForwardAnalysis(TCallback& callback)
}
}
}
......@@ -19,17 +19,14 @@ XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
class DecomposeLongs
{
public:
DecomposeLongs(Compiler* compiler)
: m_compiler(compiler)
DecomposeLongs(Compiler* compiler) : m_compiler(compiler)
{
}
void PrepareForDecomposition();
void DecomposeBlock(BasicBlock* block);
private:
private:
// Driver functions
static Compiler::fgWalkResult DecompNodeHelper(GenTree** ppTree, Compiler::fgWalkData* data);
void DecomposeStmt(GenTreeStmt* stmt);
......
......@@ -8,4 +8,3 @@
#include "shimload.h"
ExternC PfnDliHook __pfnDliNotifyHook = ShimDelayLoadHook;
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -14,9 +14,9 @@
#elif defined(_TARGET_ARM64_)
#include "emitarm64.h"
#else
#error Unsupported or unset target architecture
#error Unsupported or unset target architecture
#endif
/*****************************************************************************/
#endif//_EMITDEF_H_
#endif //_EMITDEF_H_
/*****************************************************************************/
......@@ -10,5 +10,5 @@
#elif defined(_TARGET_ARM64_)
#include "emitfmtsarm64.h"
#else
#error Unsupported or unset target architecture
#error Unsupported or unset target architecture
#endif // target type
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
......@@ -9,5 +9,5 @@
#elif defined(_TARGET_ARM64_)
#include "instrsarm64.h"
#else
#error Unsupported or unset target architecture
#error Unsupported or unset target architecture
#endif // target type
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
此差异已折叠。
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册