From 5b9338980cc1d55794eacda4ad4c7a3264e7d438 Mon Sep 17 00:00:00 2001 From: John Doe Date: Mon, 5 Oct 2020 09:05:56 -0700 Subject: [PATCH] Fix various typos in comments (#43019) * accross -> across * additionaly -> additionally * adddress -> address * addrees -> address * addresss -> address * aligment -> alignment * Alignement -> Alignment * alredy -> already * argment -> argument * Argumemnts -> Arguments --- src/coreclr/src/jit/codegenarm64.cpp | 2 +- src/coreclr/src/jit/compiler.cpp | 4 ++-- src/coreclr/src/jit/importer.cpp | 4 ++-- src/coreclr/src/jit/lclvars.cpp | 4 ++-- src/coreclr/src/jit/lower.cpp | 2 +- src/coreclr/src/jit/optcse.cpp | 2 +- src/coreclr/src/jit/simdashwintrinsic.cpp | 2 +- 7 files changed, 10 insertions(+), 10 deletions(-) diff --git a/src/coreclr/src/jit/codegenarm64.cpp b/src/coreclr/src/jit/codegenarm64.cpp index 453c4c58b28..a2e907db09d 100644 --- a/src/coreclr/src/jit/codegenarm64.cpp +++ b/src/coreclr/src/jit/codegenarm64.cpp @@ -629,7 +629,7 @@ void CodeGen::genSaveCalleeSavedRegisterGroup(regMaskTP regsMask, int spDelta, i // The caller can tell us to fold in a stack pointer adjustment, which we will do with the first instruction. // Note that the stack pointer adjustment must be by a multiple of 16 to preserve the invariant that the // stack pointer is always 16 byte aligned. If we are saving an odd number of callee-saved -// registers, though, we will have an empty aligment slot somewhere. It turns out we will put +// registers, though, we will have an empty alignment slot somewhere. It turns out we will put // it below (at a lower address) the callee-saved registers, as that is currently how we // do frame layout. This means that the first stack offset will be 8 and the stack pointer // adjustment must be done by a SUB, and not folded in to a pre-indexed store. diff --git a/src/coreclr/src/jit/compiler.cpp b/src/coreclr/src/jit/compiler.cpp index 197e2a5d8d7..22784047fcb 100644 --- a/src/coreclr/src/jit/compiler.cpp +++ b/src/coreclr/src/jit/compiler.cpp @@ -2274,7 +2274,7 @@ void Compiler::compSetProcessor() instructionSetFlags.RemoveInstructionSet(InstructionSet_PCLMULQDQ); } - // We need to additionaly check that COMPlus_EnableSSE3_4 is set, as that + // We need to additionally check that COMPlus_EnableSSE3_4 is set, as that // is a prexisting config flag that controls the SSE3+ ISAs if (!JitConfig.EnableSSE3() || !JitConfig.EnableSSE3_4()) { @@ -3319,7 +3319,7 @@ const LPCWSTR Compiler::s_compStressModeNames[STRESS_COUNT + 1] = { //------------------------------------------------------------------------ // compStressCompile: determine if a stress mode should be enabled // -// Argumemnts: +// Arguments: // stressArea - stress mode to possibly enable // weight - percent of time this mode should be turned on // (range 0 to 100); weight 0 effectively disables diff --git a/src/coreclr/src/jit/importer.cpp b/src/coreclr/src/jit/importer.cpp index 368d324754f..7a2826bf677 100644 --- a/src/coreclr/src/jit/importer.cpp +++ b/src/coreclr/src/jit/importer.cpp @@ -7287,7 +7287,7 @@ GenTree* Compiler::impImportStaticFieldAccess(CORINFO_RESOLVED_TOKEN* pResolvedT default: { - // Do we need the addrees of a static field? + // Do we need the address of a static field? // if (access & CORINFO_ACCESS_ADDRESS) { @@ -19145,7 +19145,7 @@ void Compiler::impInlineRecordArgInfo( // expression from some set of inlines. // - when argument type casting is needed the necessary casts are added // around the argument node. -// - if an argment can be simplified by folding then the node here is the +// - if an argument can be simplified by folding then the node here is the // folded value. // // The method may make observations that lead to marking this candidate as diff --git a/src/coreclr/src/jit/lclvars.cpp b/src/coreclr/src/jit/lclvars.cpp index 975a359466c..101f31cbf52 100644 --- a/src/coreclr/src/jit/lclvars.cpp +++ b/src/coreclr/src/jit/lclvars.cpp @@ -5826,7 +5826,7 @@ void Compiler::lvaAssignVirtualFrameOffsetsToLocals() } // If we are an OSR method, we "inherit" the frame of the original method, - // and the stack is already double aligned on entry (since the return adddress push + // and the stack is already double aligned on entry (since the return address push // and any special alignment push happened "before"). if (opts.IsOSR()) { @@ -6783,7 +6783,7 @@ void Compiler::lvaAlignFrame() #if DOUBLE_ALIGN if (genDoubleAlign()) { - // Double Frame Alignement for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals() + // Double Frame Alignment for x86 is handled in Compiler::lvaAssignVirtualFrameOffsetsToLocals() if (compLclFrameSize == 0) { diff --git a/src/coreclr/src/jit/lower.cpp b/src/coreclr/src/jit/lower.cpp index 9a5f843a038..ecfe71a4355 100644 --- a/src/coreclr/src/jit/lower.cpp +++ b/src/coreclr/src/jit/lower.cpp @@ -6513,7 +6513,7 @@ void Lowering::TransformUnusedIndirection(GenTreeIndir* ind, Compiler* comp, Bas { // A nullcheck is essentially the same as an indirection with no use. // The difference lies in whether a target register must be allocated. - // On XARCH we can generate a compare with no target register as long as the addresss + // On XARCH we can generate a compare with no target register as long as the address // is not contained. // On ARM64 we can generate a load to REG_ZR in all cases. // However, on ARM we must always generate a load to a register. diff --git a/src/coreclr/src/jit/optcse.cpp b/src/coreclr/src/jit/optcse.cpp index 06339353826..469595d897b 100644 --- a/src/coreclr/src/jit/optcse.cpp +++ b/src/coreclr/src/jit/optcse.cpp @@ -1417,7 +1417,7 @@ void Compiler::optValnumCSE_Availablity() isUse = BitVecOps::IsMember(cseLivenessTraits, available_cses, CseAvailBit); isDef = !isUse; // If is isn't a CSE use, it is a CSE def - // Is this a "use", that we haven't yet marked as live accross a call + // Is this a "use", that we haven't yet marked as live across a call // and it is not available when we have calls that kill CSE's (cseAvailCrossCallBit) // if the above is true then we will mark this the CSE as live across a call // diff --git a/src/coreclr/src/jit/simdashwintrinsic.cpp b/src/coreclr/src/jit/simdashwintrinsic.cpp index dec5706181a..059bf82e5c1 100644 --- a/src/coreclr/src/jit/simdashwintrinsic.cpp +++ b/src/coreclr/src/jit/simdashwintrinsic.cpp @@ -385,7 +385,7 @@ GenTree* Compiler::impSimdAsHWIntrinsicSpecial(NamedIntrinsic intrinsic, #if defined(TARGET_XARCH) bool isVectorT256 = (SimdAsHWIntrinsicInfo::lookupClassId(intrinsic) == SimdAsHWIntrinsicClassId::VectorT256); - // We should have alredy exited early if SSE2 isn't supported + // We should have already exited early if SSE2 isn't supported assert(compIsaSupportedDebugOnly(InstructionSet_SSE2)); switch (intrinsic) -- GitLab