* approrpriate -> appropriate
* allignment -> alignment
* aquire -> acquire
* aquisition -> acquisition
* arbitraty -> arbitrary
* arcance -> arcane
* archetecture -> architecture
* Archicture -> Architecture
* architecures -> architectures
* argmuent -> argument
Commit migrated from https://github.com/dotnet/coreclr/commit/
1a34c72e9e719c9d3879c2a519bf70a05f42869c
{
namespace
{
- BOOL IsPlatformArchicture(PEKIND kArchitecture)
+ BOOL IsPlatformArchitecture(PEKIND kArchitecture)
{
return ((kArchitecture != peMSIL) && (kArchitecture != peNone));
}
/* static */
BOOL Assembly::IsValidArchitecture(PEKIND kArchitecture)
{
- if (!IsPlatformArchicture(kArchitecture))
+ if (!IsPlatformArchitecture(kArchitecture))
return TRUE;
return (kArchitecture == GetSystemArchitecture());
{
WRAPPER_NO_CONTRACT;
- LOG((LF_CORDB,LL_INFO10000, "D::Lock aquire attempt by 0x%x\n",
+ LOG((LF_CORDB,LL_INFO10000, "D::Lock acquire attempt by 0x%x\n",
GetCurrentThreadId()));
// Debugger lock is larger than both Controller & debugger-data locks.
if (m_mutexCount == 1)
{
- LOG((LF_CORDB,LL_INFO10000, "D::Lock aquired by 0x%x\n", m_mutexOwner));
+ LOG((LF_CORDB,LL_INFO10000, "D::Lock acquired by 0x%x\n", m_mutexOwner));
}
#endif
// The DJI gets deleted as part of the Unbind/Rebind process in MovedCode.
// This is to signal that we should not skip here.
// <NICE> under exactly what scenarios (EnC, code pitching etc.) will this apply?... </NICE>
- // <NICE> can't we be a little clearer about why we don't want to bind the patch in this arcance situation?</NICE>
+ // <NICE> can't we be a little clearer about why we don't want to bind the patch in this arcane situation?</NICE>
if (dcp->HasDJI() && !dcp->IsBreakpointPatch() && !dcp->IsStepperPatch())
{
LOG((LF_CORDB, LL_INFO10000, "Neither stepper nor BP but we have valid a DJI (i.e. the DJI hasn't been deleted as part of the Unbind/MovedCode/Rebind mess)! - getting next patch!\n"));
if (varDsc->lvType == TYP_STRUCT && varDsc->lvOtherArgReg >= MAX_REG_ARG && varDsc->lvOtherArgReg != REG_NA)
{
// This is a split struct. It will account for an extra (8 bytes)
- // of allignment.
+ // of alignment.
varDsc->lvStkOffs += TARGET_POINTER_SIZE;
argOffs += TARGET_POINTER_SIZE;
}
{
assert(arg->OperGet() == GT_LCL_VAR);
- // We need to construct a `GT_OBJ` node for the argmuent,
+ // We need to construct a `GT_OBJ` node for the argument,
// so we need to get the address of the lclVar.
lcl = arg->AsLclVarCommon();
}
else if ( FILE_MAP_WRITE == flags )
{
TRACE( "FILE_MAP_WRITE\n" );
- /* The limitation of x86 archetecture
+ /* The limitation of x86 architecture
means you cant have writable but not readable
page. In Windows maps of FILE_MAP_WRITE can still be
read from. */
// We cannot insert GCStress instruction at this call
// For arm64 & arm (R2R) call to jithelpers happens via a stub.
// For other architectures call does not happen via stub.
-// For other architecures we can get the target directly by calling getTargetOfCall().
+// For other architectures we can get the target directly by calling getTargetOfCall().
// This is not the case for arm64/arm so need to decode the stub
// instruction to find the actual jithelper target.
// For other architecture we detect call to JIT_RareDisableHelper
else
{
// Iterate through the methods on the interface, and if they have a slot which was filled in
- // on an equivalent interface inherited from the parent fill in the approrpriate slot.
+ // on an equivalent interface inherited from the parent fill in the appropriate slot.
// This code path is only used when there is an implicit implementation of an interface
// that was not implemented on a parent type, but there was an equivalent interface implemented
// on a parent type.
// ExecuteCodeWithGuaranteedCleanup ensures that we will call the backout code delegate even if an SO occurs. We do this by calling the
// try delegate from within an EX_TRY/EX_CATCH block that will catch any thrown exceptions and thus cause the stack to be unwound. This
// guarantees that the backout delegate is called with at least DEFAULT_ENTRY_PROBE_SIZE pages of stack. After the backout delegate is called,
-// we re-raise any exceptions that occurred inside the try delegate. Note that any CER that uses large or arbitraty amounts of stack in
+// we re-raise any exceptions that occurred inside the try delegate. Note that any CER that uses large or arbitrary amounts of stack in
// it's try block must use ExecuteCodeWithGuaranteedCleanup.
//
// ExecuteCodeWithGuaranteedCleanup also guarantees that the backount code will be run before any filters higher up on the stack. This
//
// -plus we might need some more for debugger EH dispatch, Watson, etc...
// -also need to take into account that we can lose up to 1 page of the guard region
- // -additionally, we need to provide some region to hosts to allow for lock aquisition in a hosted scenario
+ // -additionally, we need to provide some region to hosts to allow for lock acquisition in a hosted scenario
//
EXTRA_PAGES = 3;
INDEBUG(EXTRA_PAGES += 1);