未验证 提交 5b933898 编写于 作者: J John Doe 提交者: GitHub

Fix various typos in comments (#43019)

* accross -> across

* additionaly -> additionally

* adddress -> address

* addrees -> address

* addresss -> address

* aligment -> alignment

* Alignement -> Alignment

* alredy -> already

* argment -> argument

* Argumemnts -> Arguments
上级 f1e131a4
......@@ -629,7 +629,7 @@ void CodeGen::genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, i
// The caller can tell us to fold in a stack pointer adjustment, which we will do with the first instruction.
// Note that the stack pointer adjustment must be by a multiple of 16 to preserve the invariant that the
// stack pointer is always 16 byte aligned. If we are saving an odd number of callee-saved
// registers, though, we will have an empty aligment slot somewhere. It turns out we will put
// registers, though, we will have an empty alignment slot somewhere. It turns out we will put
// it below (at a lower address) the callee-saved registers, as that is currently how we
// do frame layout. This means that the first stack offset will be 8 and the stack pointer
// adjustment must be done by a SUB, and not folded in to a pre-indexed store.
......
......@@ -2274,7 +2274,7 @@ void Compiler::compSetProcessor()
instructionSetFlags.RemoveInstructionSet(InstructionSet_PCLMULQDQ);
}
// We need to additionaly check that COMPlus_EnableSSE3_4 is set, as that
// We need to additionally check that COMPlus_EnableSSE3_4 is set, as that
// is a prexisting config flag that controls the SSE3+ ISAs
if (!JitConfig.EnableSSE3() || !JitConfig.EnableSSE3_4())
{
......@@ -3319,7 +3319,7 @@ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = {
//------------------------------------------------------------------------
// compStressCompile: determine if a stress mode should be enabled
//
// Argumemnts:
// Arguments:
// stressArea - stress mode to possibly enable
// weight - percent of time this mode should be turned on
// (range 0 to 100); weight 0 effectively disables
......
......@@ -7287,7 +7287,7 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT
default:
{
// Do we need the addrees of a static field?
// Do we need the address of a static field?
//
if (access & CORINFO_ACCESS_ADDRESS)
{
......@@ -19145,7 +19145,7 @@ void Compiler::impInlineRecordArgInfo(
// expression from some set of inlines.
// - when argument type casting is needed the necessary casts are added
// around the argument node.
// - if an argment can be simplified by folding then the node here is the
// - if an argument can be simplified by folding then the node here is the
// folded value.
//
// The method may make observations that lead to marking this candidate as
......
......@@ -5826,7 +5826,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals()
}
// If we are an OSR method, we "inherit" the frame of the original method,
// and the stack is already double aligned on entry (since the return adddress push
// and the stack is already double aligned on entry (since the return address push
// and any special alignment push happened "before").
if (opts.IsOSR())
{
......@@ -6783,7 +6783,7 @@ void Compiler::lvaAlignFrame()
#if DOUBLE_ALIGN
if (genDoubleAlign())
{
// Double Frame Alignement for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals()
// Double Frame Alignment for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals()
if (compLclFrameSize == 0)
{
......
......@@ -6513,7 +6513,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, Bas
{
// A nullcheck is essentially the same as an indirection with no use.
// The difference lies in whether a target register must be allocated.
// On XARCH we can generate a compare with no target register as long as the addresss
// On XARCH we can generate a compare with no target register as long as the address
// is not contained.
// On ARM64 we can generate a load to REG_ZR in all cases.
// However, on ARM we must always generate a load to a register.
......
......@@ -1417,7 +1417,7 @@ void Compiler::optValnumCSE_Availablity()
isUse = BitVecOps::IsMember(cseLivenessTraits, available_cses, CseAvailBit);
isDef = !isUse; // If is isn't a CSE use, it is a CSE def
// Is this a "use", that we haven't yet marked as live accross a call
// Is this a "use", that we haven't yet marked as live across a call
// and it is not available when we have calls that kill CSE's (cseAvailCrossCallBit)
// if the above is true then we will mark this the CSE as live across a call
//
......
......@@ -385,7 +385,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic,
#if defined(TARGET_XARCH)
bool isVectorT256 = (SimdAsHWIntrinsicInfo::lookupClassId(intrinsic) == SimdAsHWIntrinsicClassId::VectorT256);
// We should have alredy exited early if SSE2 isn't supported
// We should have already exited early if SSE2 isn't supported
assert(compIsaSupportedDebugOnly(InstructionSet_SSE2));
switch (intrinsic)
......
Markdown is supported
0% .
You are about to add 0 people to the discussion. Proceed with caution.
先完成此消息的编辑!
想要评论请 注册