Add an extra space on comments to match code style
This commit is contained in:
parent
bbad6280b1
commit
4d4ccd6c52
37 changed files with 260 additions and 260 deletions
|
@ -62,7 +62,7 @@ namespace ARMeilleure.CodeGen.Optimizations
|
|||
|
||||
private static void PropagateCopy(Operation copyOp)
|
||||
{
|
||||
//Propagate copy source operand to all uses of the destination operand.
|
||||
// Propagate copy source operand to all uses of the destination operand.
|
||||
Operand dest = copyOp.Destination;
|
||||
Operand source = copyOp.GetSource(0);
|
||||
|
||||
|
@ -82,8 +82,8 @@ namespace ARMeilleure.CodeGen.Optimizations
|
|||
|
||||
private static void RemoveNode(BasicBlock block, LinkedListNode<Node> llNode)
|
||||
{
|
||||
//Remove a node from the nodes list, and also remove itself
|
||||
//from all the use lists on the operands that this node uses.
|
||||
// Remove a node from the nodes list, and also remove itself
|
||||
// from all the use lists on the operands that this node uses.
|
||||
block.Operations.Remove(llNode);
|
||||
|
||||
Node node = llNode.Value;
|
||||
|
|
|
@ -47,9 +47,9 @@ namespace ARMeilleure.CodeGen.Optimizations
|
|||
|
||||
private static void TryEliminateBitwiseAnd(Operation operation)
|
||||
{
|
||||
//Try to recognize and optimize those 3 patterns (in order):
|
||||
//x & 0xFFFFFFFF == x, 0xFFFFFFFF & y == y,
|
||||
//x & 0x00000000 == 0x00000000, 0x00000000 & y == 0x00000000
|
||||
// Try to recognize and optimize those 3 patterns (in order):
|
||||
// x & 0xFFFFFFFF == x, 0xFFFFFFFF & y == y,
|
||||
// x & 0x00000000 == 0x00000000, 0x00000000 & y == 0x00000000
|
||||
Operand x = operation.GetSource(0);
|
||||
Operand y = operation.GetSource(1);
|
||||
|
||||
|
@ -69,9 +69,9 @@ namespace ARMeilleure.CodeGen.Optimizations
|
|||
|
||||
private static void TryEliminateBitwiseOr(Operation operation)
|
||||
{
|
||||
//Try to recognize and optimize those 3 patterns (in order):
|
||||
//x | 0x00000000 == x, 0x00000000 | y == y,
|
||||
//x | 0xFFFFFFFF == 0xFFFFFFFF, 0xFFFFFFFF | y == 0xFFFFFFFF
|
||||
// Try to recognize and optimize those 3 patterns (in order):
|
||||
// x | 0x00000000 == x, 0x00000000 | y == y,
|
||||
// x | 0xFFFFFFFF == 0xFFFFFFFF, 0xFFFFFFFF | y == 0xFFFFFFFF
|
||||
Operand x = operation.GetSource(0);
|
||||
Operand y = operation.GetSource(1);
|
||||
|
||||
|
@ -124,8 +124,8 @@ namespace ARMeilleure.CodeGen.Optimizations
|
|||
return;
|
||||
}
|
||||
|
||||
//The condition is constant, we can turn it into a copy, and select
|
||||
//the source based on the condition value.
|
||||
// The condition is constant, we can turn it into a copy, and select
|
||||
// the source based on the condition value.
|
||||
int srcIndex = cond.Value != 0 ? 1 : 2;
|
||||
|
||||
Operand source = operation.GetSource(srcIndex);
|
||||
|
|
|
@ -61,7 +61,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
foreach (Copy copy in _copies)
|
||||
{
|
||||
//If the destination is not used anywhere, we can assign it immediately.
|
||||
// If the destination is not used anywhere, we can assign it immediately.
|
||||
if (!locations.ContainsKey(copy.Dest))
|
||||
{
|
||||
readyQueue.Enqueue(copy.Dest);
|
||||
|
@ -109,13 +109,13 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
if (copyDest != locations[sources[copySource]])
|
||||
{
|
||||
//Find the other swap destination register.
|
||||
//To do that, we search all the pending registers, and pick
|
||||
//the one where the copy source register is equal to the
|
||||
//current destination register being processed (copyDest).
|
||||
// Find the other swap destination register.
|
||||
// To do that, we search all the pending registers, and pick
|
||||
// the one where the copy source register is equal to the
|
||||
// current destination register being processed (copyDest).
|
||||
foreach (Register pending in pendingQueue)
|
||||
{
|
||||
//Is this a copy of pending <- copyDest?
|
||||
// Is this a copy of pending <- copyDest?
|
||||
if (copyDest == locations[sources[pending]])
|
||||
{
|
||||
swapOther = pending;
|
||||
|
@ -125,10 +125,10 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
}
|
||||
}
|
||||
|
||||
//The value that was previously at "copyDest" now lives on
|
||||
//"copySource" thanks to the swap, now we need to update the
|
||||
//location for the next copy that is supposed to copy the value
|
||||
//that used to live on "copyDest".
|
||||
// The value that was previously at "copyDest" now lives on
|
||||
// "copySource" thanks to the swap, now we need to update the
|
||||
// location for the next copy that is supposed to copy the value
|
||||
// that used to live on "copyDest".
|
||||
locations[sources[swapOther]] = copySource;
|
||||
}
|
||||
}
|
||||
|
@ -173,22 +173,22 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
if (left.IsSpilled && !right.IsSpilled)
|
||||
{
|
||||
//Move from the stack to a register.
|
||||
// Move from the stack to a register.
|
||||
AddSplitFill(left, right, type);
|
||||
}
|
||||
else if (!left.IsSpilled && right.IsSpilled)
|
||||
{
|
||||
//Move from a register to the stack.
|
||||
// Move from a register to the stack.
|
||||
AddSplitSpill(left, right, type);
|
||||
}
|
||||
else if (!left.IsSpilled && !right.IsSpilled && left.Register != right.Register)
|
||||
{
|
||||
//Move from one register to another.
|
||||
// Move from one register to another.
|
||||
AddSplitCopy(left, right, type);
|
||||
}
|
||||
else if (left.SpillOffset != right.SpillOffset)
|
||||
{
|
||||
//This would be the stack-to-stack move case, but this is not supported.
|
||||
// This would be the stack-to-stack move case, but this is not supported.
|
||||
throw new ArgumentException("Both intervals were spilled.");
|
||||
}
|
||||
}
|
||||
|
|
|
@ -169,9 +169,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
}
|
||||
else
|
||||
{
|
||||
//Spill the interval that will free the register for the longest
|
||||
//amount of time, as long there's no interference of the current
|
||||
//interval with a fixed interval using the same register.
|
||||
// Spill the interval that will free the register for the longest
|
||||
// amount of time, as long there's no interference of the current
|
||||
// interval with a fixed interval using the same register.
|
||||
bool hasRegisterSelected = false;
|
||||
|
||||
RegisterType regType = current.Local.Type.ToRegisterType();
|
||||
|
@ -493,7 +493,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
int branchIndex = cfg.PostOrderMap[block.Index];
|
||||
int targetIndex = cfg.PostOrderMap[successor.Index];
|
||||
|
||||
//Is the branch jumping backwards?
|
||||
// Is the branch jumping backwards?
|
||||
if (targetIndex >= branchIndex)
|
||||
{
|
||||
int targetPos = blockStarts[successor.Index];
|
||||
|
|
|
@ -125,7 +125,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
private void AllocateInterval(AllocationContext context, LiveInterval current, int cIndex)
|
||||
{
|
||||
//Check active intervals that already ended.
|
||||
// Check active intervals that already ended.
|
||||
foreach (int iIndex in context.Active)
|
||||
{
|
||||
LiveInterval interval = _intervals[iIndex];
|
||||
|
@ -140,7 +140,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
}
|
||||
}
|
||||
|
||||
//Check inactive intervals that already ended or were reactivated.
|
||||
// Check inactive intervals that already ended or were reactivated.
|
||||
foreach (int iIndex in context.Inactive)
|
||||
{
|
||||
LiveInterval interval = _intervals[iIndex];
|
||||
|
@ -206,17 +206,17 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
int selectedNextUse = freePositions[selectedReg];
|
||||
|
||||
//Intervals starts and ends at odd positions, unless they span an entire
|
||||
//block, in this case they will have ranges at a even position.
|
||||
//When a interval is loaded from the stack to a register, we can only
|
||||
//do the split at a odd position, because otherwise the split interval
|
||||
//that is inserted on the list to be processed may clobber a register
|
||||
//used by the instruction at the same position as the split.
|
||||
//The problem only happens when a interval ends exactly at this instruction,
|
||||
//because otherwise they would interfere, and the register wouldn't be selected.
|
||||
//When the interval is aligned and the above happens, there's no problem as
|
||||
//the instruction that is actually with the last use is the one
|
||||
//before that position.
|
||||
// Intervals starts and ends at odd positions, unless they span an entire
|
||||
// block, in this case they will have ranges at a even position.
|
||||
// When a interval is loaded from the stack to a register, we can only
|
||||
// do the split at a odd position, because otherwise the split interval
|
||||
// that is inserted on the list to be processed may clobber a register
|
||||
// used by the instruction at the same position as the split.
|
||||
// The problem only happens when a interval ends exactly at this instruction,
|
||||
// because otherwise they would interfere, and the register wouldn't be selected.
|
||||
// When the interval is aligned and the above happens, there's no problem as
|
||||
// the instruction that is actually with the last use is the one
|
||||
// before that position.
|
||||
selectedNextUse &= ~InstructionGapMask;
|
||||
|
||||
if (selectedNextUse <= current.GetStart())
|
||||
|
@ -352,8 +352,8 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
if (usePositions[selectedReg] < currentFirstUse)
|
||||
{
|
||||
//All intervals on inactive and active are being used before current,
|
||||
//so spill the current interval.
|
||||
// All intervals on inactive and active are being used before current,
|
||||
// so spill the current interval.
|
||||
Debug.Assert(currentFirstUse > current.GetStart(), "Trying to spill a interval currently being used.");
|
||||
|
||||
LiveInterval splitChild = current.Split(currentFirstUse);
|
||||
|
@ -366,8 +366,8 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
}
|
||||
else if (blockedPositions[selectedReg] > current.GetEnd())
|
||||
{
|
||||
//Spill made the register available for the entire current lifetime,
|
||||
//so we only need to split the intervals using the selected register.
|
||||
// Spill made the register available for the entire current lifetime,
|
||||
// so we only need to split the intervals using the selected register.
|
||||
current.Register = new Register(selectedReg, regType);
|
||||
|
||||
SplitAndSpillOverlappingIntervals(context, current);
|
||||
|
@ -376,9 +376,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
}
|
||||
else
|
||||
{
|
||||
//There are conflicts even after spill due to the use of fixed registers
|
||||
//that can't be spilled, so we need to also split current at the point of
|
||||
//the first fixed register use.
|
||||
// There are conflicts even after spill due to the use of fixed registers
|
||||
// that can't be spilled, so we need to also split current at the point of
|
||||
// the first fixed register use.
|
||||
current.Register = new Register(selectedReg, regType);
|
||||
|
||||
int splitPosition = blockedPositions[selectedReg] & ~InstructionGapMask;
|
||||
|
@ -467,10 +467,10 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
LiveInterval current,
|
||||
LiveInterval interval)
|
||||
{
|
||||
//If there's a next use after the start of the current interval,
|
||||
//we need to split the spilled interval twice, and re-insert it
|
||||
//on the "pending" list to ensure that it will get a new register
|
||||
//on that use position.
|
||||
// If there's a next use after the start of the current interval,
|
||||
// we need to split the spilled interval twice, and re-insert it
|
||||
// on the "pending" list to ensure that it will get a new register
|
||||
// on that use position.
|
||||
int nextUse = interval.NextUseAfter(current.GetStart());
|
||||
|
||||
LiveInterval splitChild;
|
||||
|
@ -528,9 +528,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
Debug.Assert(!interval.IsFixed, "Trying to spill a fixed interval.");
|
||||
Debug.Assert(interval.UsesCount == 0, "Trying to spill a interval with uses.");
|
||||
|
||||
//We first check if any of the siblings were spilled, if so we can reuse
|
||||
//the stack offset. Otherwise, we allocate a new space on the stack.
|
||||
//This prevents stack-to-stack copies being necessary for a split interval.
|
||||
// We first check if any of the siblings were spilled, if so we can reuse
|
||||
// the stack offset. Otherwise, we allocate a new space on the stack.
|
||||
// This prevents stack-to-stack copies being necessary for a split interval.
|
||||
if (!interval.TrySpillWithSiblingOffset())
|
||||
{
|
||||
interval.Spill(context.StackAlloc.Allocate(interval.Local.Type));
|
||||
|
@ -618,8 +618,8 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
{
|
||||
int succIndex = successor.Index;
|
||||
|
||||
//If the current node is a split node, then the actual successor node
|
||||
//(the successor before the split) should be right after it.
|
||||
// If the current node is a split node, then the actual successor node
|
||||
// (the successor before the split) should be right after it.
|
||||
if (IsSplitEdgeBlock(successor))
|
||||
{
|
||||
succIndex = Successors(successor).First().Index;
|
||||
|
@ -675,7 +675,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
}
|
||||
else
|
||||
{
|
||||
//Split the critical edge.
|
||||
// Split the critical edge.
|
||||
BasicBlock splitBlock = cfg.SplitEdge(block, successor);
|
||||
|
||||
foreach (Operation operation in sequence)
|
||||
|
@ -773,7 +773,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
if (block.Operations.Count == 0)
|
||||
{
|
||||
//Pretend we have a dummy instruction on the empty block.
|
||||
// Pretend we have a dummy instruction on the empty block.
|
||||
_operationNodes.Add(null);
|
||||
|
||||
_operationsCount += InstructionGap;
|
||||
|
@ -792,7 +792,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
BitMap[] blkLiveGen = new BitMap[cfg.Blocks.Count];
|
||||
BitMap[] blkLiveKill = new BitMap[cfg.Blocks.Count];
|
||||
|
||||
//Compute local live sets.
|
||||
// Compute local live sets.
|
||||
foreach (BasicBlock block in cfg.Blocks)
|
||||
{
|
||||
BitMap liveGen = new BitMap(mapSize);
|
||||
|
@ -820,7 +820,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
blkLiveKill[block.Index] = liveKill;
|
||||
}
|
||||
|
||||
//Compute global live sets.
|
||||
// Compute global live sets.
|
||||
BitMap[] blkLiveIn = new BitMap[cfg.Blocks.Count];
|
||||
BitMap[] blkLiveOut = new BitMap[cfg.Blocks.Count];
|
||||
|
||||
|
@ -863,16 +863,16 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
_blockEdges = new HashSet<int>();
|
||||
|
||||
//Compute lifetime intervals.
|
||||
// Compute lifetime intervals.
|
||||
int operationPos = _operationsCount;
|
||||
|
||||
for (int index = 0; index < cfg.PostOrderBlocks.Length; index++)
|
||||
{
|
||||
BasicBlock block = cfg.PostOrderBlocks[index];
|
||||
|
||||
//We handle empty blocks by pretending they have a dummy instruction,
|
||||
//because otherwise the block would have the same start and end position,
|
||||
//and this is not valid.
|
||||
// We handle empty blocks by pretending they have a dummy instruction,
|
||||
// because otherwise the block would have the same start and end position,
|
||||
// and this is not valid.
|
||||
int instCount = Math.Max(block.Operations.Count, 1);
|
||||
|
||||
int blockStart = operationPos - instCount * InstructionGap;
|
||||
|
|
|
@ -114,10 +114,10 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
if (index >= 0)
|
||||
{
|
||||
//New range insersects with an existing range, we need to remove
|
||||
//all the intersecting ranges before adding the new one.
|
||||
//We also extend the new range as needed, based on the values of
|
||||
//the existing ranges being removed.
|
||||
// New range insersects with an existing range, we need to remove
|
||||
// all the intersecting ranges before adding the new one.
|
||||
// We also extend the new range as needed, based on the values of
|
||||
// the existing ranges being removed.
|
||||
int lIndex = index;
|
||||
int rIndex = index;
|
||||
|
||||
|
@ -153,14 +153,14 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
private void InsertRange(int index, int start, int end)
|
||||
{
|
||||
//Here we insert a new range on the ranges list.
|
||||
//If possible, we extend an existing range rather than inserting a new one.
|
||||
//We can extend an existing range if any of the following conditions are true:
|
||||
//- The new range starts right after the end of the previous range on the list.
|
||||
//- The new range ends right before the start of the next range on the list.
|
||||
//If both cases are true, we can extend either one. We prefer to extend the
|
||||
//previous range, and then remove the next one, but theres no specific reason
|
||||
//for that, extending either one will do.
|
||||
// Here we insert a new range on the ranges list.
|
||||
// If possible, we extend an existing range rather than inserting a new one.
|
||||
// We can extend an existing range if any of the following conditions are true:
|
||||
// - The new range starts right after the end of the previous range on the list.
|
||||
// - The new range ends right before the start of the next range on the list.
|
||||
// If both cases are true, we can extend either one. We prefer to extend the
|
||||
// previous range, and then remove the next one, but theres no specific reason
|
||||
// for that, extending either one will do.
|
||||
int? extIndex = null;
|
||||
|
||||
if (index > 0 && _ranges[index - 1].End == start)
|
||||
|
@ -225,9 +225,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
|
|||
|
||||
if (overlapIndex >= 0)
|
||||
{
|
||||
//It's possible that we have multiple overlaps within a single interval,
|
||||
//in this case, we pick the one with the lowest start position, since
|
||||
//we return the first overlap position.
|
||||
// It's possible that we have multiple overlaps within a single interval,
|
||||
// in this case, we pick the one with the lowest start position, since
|
||||
// we return the first overlap position.
|
||||
while (overlapIndex > 0 && _ranges[overlapIndex - 1].End > range.Start)
|
||||
{
|
||||
overlapIndex--;
|
||||
|
|
|
@ -1044,7 +1044,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
if (memOp != null)
|
||||
{
|
||||
//Either source or destination is a memory operand.
|
||||
// Either source or destination is a memory operand.
|
||||
Register baseReg = memOp.BaseAddress.GetRegister();
|
||||
|
||||
X86Register baseRegLow = (X86Register)(baseReg.Index & 0b111);
|
||||
|
@ -1106,7 +1106,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
}
|
||||
else
|
||||
{
|
||||
//Source and destination are registers.
|
||||
// Source and destination are registers.
|
||||
modRM |= 0xc0;
|
||||
}
|
||||
|
||||
|
@ -1129,7 +1129,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
if ((rexPrefix & 0b1011) == 0 && opCodeHigh == 0xf)
|
||||
{
|
||||
//Two-byte form.
|
||||
// Two-byte form.
|
||||
WriteByte(0xc5);
|
||||
|
||||
vexByte2 |= (~rexPrefix & 4) << 5;
|
||||
|
@ -1138,7 +1138,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
}
|
||||
else
|
||||
{
|
||||
//Three-byte form.
|
||||
// Three-byte form.
|
||||
WriteByte(0xc4);
|
||||
|
||||
int vexByte1 = (~rexPrefix & 7) << 5;
|
||||
|
|
|
@ -90,8 +90,8 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
private int GetCallArgsRegionSize(AllocationResult allocResult, int maxCallArgs, out int xmmSaveRegionSize)
|
||||
{
|
||||
//We need to add 8 bytes to the total size, as the call to this
|
||||
//function already pushed 8 bytes (the return address).
|
||||
// We need to add 8 bytes to the total size, as the call to this
|
||||
// function already pushed 8 bytes (the return address).
|
||||
int intMask = CallingConvention.GetIntCalleeSavedRegisters() & allocResult.IntUsedRegisters;
|
||||
int vecMask = CallingConvention.GetVecCalleeSavedRegisters() & allocResult.VecUsedRegisters;
|
||||
|
||||
|
@ -103,14 +103,14 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
if (argsCount < 0)
|
||||
{
|
||||
//When the function has no calls, argsCount is -1.
|
||||
//In this case, we don't need to allocate the shadow space.
|
||||
// When the function has no calls, argsCount is -1.
|
||||
// In this case, we don't need to allocate the shadow space.
|
||||
argsCount = 0;
|
||||
}
|
||||
else if (argsCount < 4)
|
||||
{
|
||||
//The ABI mandates that the space for at least 4 arguments
|
||||
//is reserved on the stack (this is called shadow space).
|
||||
// The ABI mandates that the space for at least 4 arguments
|
||||
// is reserved on the stack (this is called shadow space).
|
||||
argsCount = 4;
|
||||
}
|
||||
|
||||
|
@ -118,7 +118,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
int callArgsAndFrameSize = frameSize + argsCount * 16; //FIXME * 16 => calc
|
||||
|
||||
//Ensure that the Stack Pointer will be aligned to 16 bytes.
|
||||
// Ensure that the Stack Pointer will be aligned to 16 bytes.
|
||||
callArgsAndFrameSize = (callArgsAndFrameSize + 0xf) & ~0xf;
|
||||
|
||||
return callArgsAndFrameSize - frameSize;
|
||||
|
@ -179,7 +179,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
public byte[] GetCode()
|
||||
{
|
||||
//Write jump relative offsets.
|
||||
// Write jump relative offsets.
|
||||
bool modified;
|
||||
|
||||
do
|
||||
|
@ -234,14 +234,14 @@ namespace ARMeilleure.CodeGen.X86
|
|||
jump.InstSize = Assembler.GetJmpLength(offset);
|
||||
}
|
||||
|
||||
//The jump is relative to the next instruction, not the current one.
|
||||
//Since we didn't know the next instruction address when calculating
|
||||
//the offset (as the size of the current jump instruction was not know),
|
||||
//we now need to compensate the offset with the jump instruction size.
|
||||
//It's also worth to note that:
|
||||
//- This is only needed for backward jumps.
|
||||
//- The GetJmpLength and GetJccLength also compensates the offset
|
||||
//internally when computing the jump instruction size.
|
||||
// The jump is relative to the next instruction, not the current one.
|
||||
// Since we didn't know the next instruction address when calculating
|
||||
// the offset (as the size of the current jump instruction was not know),
|
||||
// we now need to compensate the offset with the jump instruction size.
|
||||
// It's also worth to note that:
|
||||
// - This is only needed for backward jumps.
|
||||
// - The GetJmpLength and GetJccLength also compensates the offset
|
||||
// internally when computing the jump instruction size.
|
||||
if (offset < 0)
|
||||
{
|
||||
offset -= jump.InstSize;
|
||||
|
@ -259,7 +259,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
}
|
||||
while (modified);
|
||||
|
||||
//Write the code, ignoring the dummy bytes after jumps, into a new stream.
|
||||
// Write the code, ignoring the dummy bytes after jumps, into a new stream.
|
||||
_stream.Seek(0, SeekOrigin.Begin);
|
||||
|
||||
using (MemoryStream codeStream = new MemoryStream())
|
||||
|
|
|
@ -632,7 +632,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
|
||||
Debug.Assert(dest.Type.IsInteger() || source.Kind != OperandKind.Constant);
|
||||
|
||||
//Moves to the same register are useless.
|
||||
// Moves to the same register are useless.
|
||||
if (dest.Kind == source.Kind && dest.Value == source.Value)
|
||||
{
|
||||
return;
|
||||
|
@ -641,7 +641,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
if (dest.Kind == OperandKind.Register &&
|
||||
source.Kind == OperandKind.Constant && source.Value == 0)
|
||||
{
|
||||
//Assemble "mov reg, 0" as "xor reg, reg" as the later is more efficient.
|
||||
// Assemble "mov reg, 0" as "xor reg, reg" as the later is more efficient.
|
||||
context.Assembler.Xor(dest, dest, OperandType.I32);
|
||||
}
|
||||
else if (dest.Type.IsInteger())
|
||||
|
@ -668,20 +668,20 @@ namespace ARMeilleure.CodeGen.X86
|
|||
int operandSize = dest.Type == OperandType.I32 ? 32 : 64;
|
||||
int operandMask = operandSize - 1;
|
||||
|
||||
//When the input operand is 0, the result is undefined, however the
|
||||
//ZF flag is set. We are supposed to return the operand size on that
|
||||
//case. So, add an additional jump to handle that case, by moving the
|
||||
//operand size constant to the destination register.
|
||||
// When the input operand is 0, the result is undefined, however the
|
||||
// ZF flag is set. We are supposed to return the operand size on that
|
||||
// case. So, add an additional jump to handle that case, by moving the
|
||||
// operand size constant to the destination register.
|
||||
context.JumpToNear(X86Condition.NotEqual);
|
||||
|
||||
context.Assembler.Mov(dest, new Operand(operandSize | operandMask), OperandType.I32);
|
||||
|
||||
context.JumpHere();
|
||||
|
||||
//BSR returns the zero based index of the last bit set on the operand,
|
||||
//starting from the least significant bit. However we are supposed to
|
||||
//return the number of 0 bits on the high end. So, we invert the result
|
||||
//of the BSR using XOR to get the correct value.
|
||||
// BSR returns the zero based index of the last bit set on the operand,
|
||||
// starting from the least significant bit. However we are supposed to
|
||||
// return the number of 0 bits on the high end. So, we invert the result
|
||||
// of the BSR using XOR to get the correct value.
|
||||
context.Assembler.Xor(dest, new Operand(operandMask), OperandType.I32);
|
||||
}
|
||||
|
||||
|
@ -1137,7 +1137,7 @@ namespace ARMeilleure.CodeGen.X86
|
|||
{
|
||||
Debug.Assert(index < (dest.Type == OperandType.FP32 ? 4 : 2));
|
||||
|
||||
//Floating-point types.
|
||||
// Floating-point types.
|
||||
if ((index >= 2 && dest.Type == OperandType.FP32) ||
|
||||
(index == 1 && dest.Type == OperandType.FP64))
|
||||
{
|
||||
|
|
|
@ -24,8 +24,8 @@ namespace ARMeilleure.Decoders
|
|||
{
|
||||
public static Condition Invert(this Condition cond)
|
||||
{
|
||||
//Bit 0 of all conditions is basically a negation bit, so
|
||||
//inverting this bit has the effect of inverting the condition.
|
||||
// Bit 0 of all conditions is basically a negation bit, so
|
||||
// inverting this bit has the effect of inverting the condition.
|
||||
return (Condition)((int)cond ^ 1);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace ARMeilleure.Decoders
|
|||
|
||||
while (workQueue.TryDequeue(out Block currBlock))
|
||||
{
|
||||
//Check if the current block is inside another block.
|
||||
// Check if the current block is inside another block.
|
||||
if (BinarySearch(blocks, currBlock.Address, out int nBlkIndex))
|
||||
{
|
||||
Block nBlock = blocks[nBlkIndex];
|
||||
|
@ -71,7 +71,7 @@ namespace ARMeilleure.Decoders
|
|||
continue;
|
||||
}
|
||||
|
||||
//If we have a block after the current one, set the limit address.
|
||||
// If we have a block after the current one, set the limit address.
|
||||
ulong limitAddress = ulong.MaxValue;
|
||||
|
||||
if (nBlkIndex != blocks.Count)
|
||||
|
@ -94,10 +94,10 @@ namespace ARMeilleure.Decoders
|
|||
|
||||
if (currBlock.OpCodes.Count != 0)
|
||||
{
|
||||
//Set child blocks. "Branch" is the block the branch instruction
|
||||
//points to (when taken), "Next" is the block at the next address,
|
||||
//executed when the branch is not taken. For Unconditional Branches
|
||||
//(except BL/BLR that are sub calls) or end of executable, Next is null.
|
||||
// Set child blocks. "Branch" is the block the branch instruction
|
||||
// points to (when taken), "Next" is the block at the next address,
|
||||
// executed when the branch is not taken. For Unconditional Branches
|
||||
// (except BL/BLR that are sub calls) or end of executable, Next is null.
|
||||
OpCode lastOp = currBlock.GetLastOp();
|
||||
|
||||
bool isCall = IsCall(lastOp);
|
||||
|
@ -113,7 +113,7 @@ namespace ARMeilleure.Decoders
|
|||
}
|
||||
}
|
||||
|
||||
//Insert the new block on the list (sorted by address).
|
||||
// Insert the new block on the list (sorted by address).
|
||||
if (blocks.Count != 0)
|
||||
{
|
||||
Block nBlock = blocks[nBlkIndex];
|
||||
|
@ -211,25 +211,25 @@ namespace ARMeilleure.Decoders
|
|||
return false;
|
||||
}
|
||||
|
||||
//Note: On ARM32, most instructions have conditional execution,
|
||||
//so there's no "Always" (unconditional) branch like on ARM64.
|
||||
//We need to check if the condition is "Always" instead.
|
||||
// Note: On ARM32, most instructions have conditional execution,
|
||||
// so there's no "Always" (unconditional) branch like on ARM64.
|
||||
// We need to check if the condition is "Always" instead.
|
||||
return IsAarch32Branch(op) && op.Cond >= Condition.Al;
|
||||
}
|
||||
|
||||
private static bool IsAarch32Branch(OpCode opCode)
|
||||
{
|
||||
//Note: On ARM32, most ALU operations can write to R15 (PC),
|
||||
//so we must consider such operations as a branch in potential aswell.
|
||||
// Note: On ARM32, most ALU operations can write to R15 (PC),
|
||||
// so we must consider such operations as a branch in potential aswell.
|
||||
if (opCode is IOpCode32Alu opAlu && opAlu.Rd == RegisterAlias.Aarch32Pc)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
//Same thing for memory operations. We have the cases where PC is a target
|
||||
//register (Rt == 15 or (mask & (1 << 15)) != 0), and cases where there is
|
||||
//a write back to PC (wback == true && Rn == 15), however the later may
|
||||
//be "undefined" depending on the CPU, so compilers should not produce that.
|
||||
// Same thing for memory operations. We have the cases where PC is a target
|
||||
// register (Rt == 15 or (mask & (1 << 15)) != 0), and cases where there is
|
||||
// a write back to PC (wback == true && Rn == 15), however the later may
|
||||
// be "undefined" depending on the CPU, so compilers should not produce that.
|
||||
if (opCode is IOpCode32Mem || opCode is IOpCode32MemMult)
|
||||
{
|
||||
int rt, rn;
|
||||
|
@ -243,8 +243,8 @@ namespace ARMeilleure.Decoders
|
|||
wBack = opMem.WBack;
|
||||
isLoad = opMem.IsLoad;
|
||||
|
||||
//For the dual load, we also need to take into account the
|
||||
//case were Rt2 == 15 (PC).
|
||||
// For the dual load, we also need to take into account the
|
||||
// case were Rt2 == 15 (PC).
|
||||
if (rt == 14 && opMem.Instruction.Name == InstName.Ldrd)
|
||||
{
|
||||
rt = RegisterAlias.Aarch32Pc;
|
||||
|
@ -271,14 +271,14 @@ namespace ARMeilleure.Decoders
|
|||
}
|
||||
}
|
||||
|
||||
//Explicit branch instructions.
|
||||
// Explicit branch instructions.
|
||||
return opCode is IOpCode32BImm ||
|
||||
opCode is IOpCode32BReg;
|
||||
}
|
||||
|
||||
private static bool IsCall(OpCode opCode)
|
||||
{
|
||||
//TODO (CQ): ARM32 support.
|
||||
// TODO (CQ): ARM32 support.
|
||||
return opCode.Instruction.Name == InstName.Bl ||
|
||||
opCode.Instruction.Name == InstName.Blr;
|
||||
}
|
||||
|
|
|
@ -13,8 +13,8 @@ namespace ARMeilleure.Decoders
|
|||
|
||||
public uint GetPc()
|
||||
{
|
||||
//Due to backwards compatibility and legacy behavior of ARMv4 CPUs pipeline,
|
||||
//the PC actually points 2 instructions ahead.
|
||||
// Due to backwards compatibility and legacy behavior of ARMv4 CPUs pipeline,
|
||||
// the PC actually points 2 instructions ahead.
|
||||
return (uint)Address + (uint)OpCodeSizeInBytes * 2;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -8,7 +8,7 @@ namespace ARMeilleure.Decoders
|
|||
{
|
||||
uint pc = GetPc();
|
||||
|
||||
//When the codition is never, the instruction is BLX to Thumb mode.
|
||||
// When the codition is never, the instruction is BLX to Thumb mode.
|
||||
if (Cond != Condition.Nv)
|
||||
{
|
||||
pc &= ~3u;
|
||||
|
|
|
@ -21,16 +21,16 @@ namespace ARMeilleure.Decoders
|
|||
Extend64 = ((opCode >> 22) & 3) == 2;
|
||||
WBack = ((opCode >> 24) & 1) == 0;
|
||||
|
||||
//The type is not valid for the Unsigned Immediate 12-bits encoding,
|
||||
//because the bits 11:10 are used for the larger Immediate offset.
|
||||
// The type is not valid for the Unsigned Immediate 12-bits encoding,
|
||||
// because the bits 11:10 are used for the larger Immediate offset.
|
||||
MemOp type = WBack ? (MemOp)((opCode >> 10) & 3) : MemOp.Unsigned;
|
||||
|
||||
PostIdx = type == MemOp.PostIndexed;
|
||||
Unscaled = type == MemOp.Unscaled ||
|
||||
type == MemOp.Unprivileged;
|
||||
|
||||
//Unscaled and Unprivileged doesn't write back,
|
||||
//but they do use the 9-bits Signed Immediate.
|
||||
// Unscaled and Unprivileged doesn't write back,
|
||||
// but they do use the 9-bits Signed Immediate.
|
||||
if (Unscaled)
|
||||
{
|
||||
WBack = false;
|
||||
|
@ -38,12 +38,12 @@ namespace ARMeilleure.Decoders
|
|||
|
||||
if (WBack || Unscaled)
|
||||
{
|
||||
//9-bits Signed Immediate.
|
||||
// 9-bits Signed Immediate.
|
||||
Immediate = (opCode << 11) >> 23;
|
||||
}
|
||||
else
|
||||
{
|
||||
//12-bits Unsigned Immediate.
|
||||
// 12-bits Unsigned Immediate.
|
||||
Immediate = ((opCode >> 10) & 0xfff) << Size;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -28,14 +28,14 @@ namespace ARMeilleure.Decoders
|
|||
switch (op | (modeLow << 1))
|
||||
{
|
||||
case 0:
|
||||
//64-bits Immediate.
|
||||
//Transform abcd efgh into abcd efgh abcd efgh ...
|
||||
// 64-bits Immediate.
|
||||
// Transform abcd efgh into abcd efgh abcd efgh ...
|
||||
imm = (long)((ulong)imm * 0x0101010101010101);
|
||||
break;
|
||||
|
||||
case 1:
|
||||
//64-bits Immediate.
|
||||
//Transform abcd efgh into aaaa aaaa bbbb bbbb ...
|
||||
// 64-bits Immediate.
|
||||
// Transform abcd efgh into aaaa aaaa bbbb bbbb ...
|
||||
imm = (imm & 0xf0) >> 4 | (imm & 0x0f) << 4;
|
||||
imm = (imm & 0xcc) >> 2 | (imm & 0x33) << 2;
|
||||
imm = (imm & 0xaa) >> 1 | (imm & 0x55) << 1;
|
||||
|
@ -50,29 +50,29 @@ namespace ARMeilleure.Decoders
|
|||
|
||||
case 2:
|
||||
case 3:
|
||||
//Floating point Immediate.
|
||||
// Floating point Immediate.
|
||||
imm = DecoderHelper.DecodeImm8Float(imm, Size);
|
||||
break;
|
||||
}
|
||||
}
|
||||
else if ((modeHigh & 0b110) == 0b100)
|
||||
{
|
||||
//16-bits shifted Immediate.
|
||||
// 16-bits shifted Immediate.
|
||||
Size = 1; imm <<= (modeHigh & 1) << 3;
|
||||
}
|
||||
else if ((modeHigh & 0b100) == 0b000)
|
||||
{
|
||||
//32-bits shifted Immediate.
|
||||
// 32-bits shifted Immediate.
|
||||
Size = 2; imm <<= modeHigh << 3;
|
||||
}
|
||||
else if ((modeHigh & 0b111) == 0b110)
|
||||
{
|
||||
//32-bits shifted Immediate (fill with ones).
|
||||
// 32-bits shifted Immediate (fill with ones).
|
||||
Size = 2; imm = ShlOnes(imm, 8 << modeLow);
|
||||
}
|
||||
else
|
||||
{
|
||||
//8 bits without shift.
|
||||
// 8 bits without shift.
|
||||
Size = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -38,7 +38,7 @@ namespace ARMeilleure.Decoders
|
|||
static OpCodeTable()
|
||||
{
|
||||
#region "OpCode Table (AArch64)"
|
||||
//Base
|
||||
// Base
|
||||
SetA64("x0011010000xxxxx000000xxxxxxxxxx", InstName.Adc, InstEmit.Adc, typeof(OpCodeAluRs));
|
||||
SetA64("x0111010000xxxxx000000xxxxxxxxxx", InstName.Adcs, InstEmit.Adcs, typeof(OpCodeAluRs));
|
||||
SetA64("x00100010xxxxxxxxxxxxxxxxxxxxxxx", InstName.Add, InstEmit.Add, typeof(OpCodeAluImm));
|
||||
|
@ -189,7 +189,7 @@ namespace ARMeilleure.Decoders
|
|||
SetA64("10011011101xxxxx1xxxxxxxxxxxxxxx", InstName.Umsubl, InstEmit.Umsubl, typeof(OpCodeMul));
|
||||
SetA64("10011011110xxxxx0xxxxxxxxxxxxxxx", InstName.Umulh, InstEmit.Umulh, typeof(OpCodeMul));
|
||||
|
||||
//FP & SIMD
|
||||
// FP & SIMD
|
||||
SetA64("0101111011100000101110xxxxxxxxxx", InstName.Abs_S, InstEmit.Abs_S, typeof(OpCodeSimd));
|
||||
SetA64("0>001110<<100000101110xxxxxxxxxx", InstName.Abs_V, InstEmit.Abs_V, typeof(OpCodeSimd));
|
||||
SetA64("01011110111xxxxx100001xxxxxxxxxx", InstName.Add_S, InstEmit.Add_S, typeof(OpCodeSimdReg));
|
||||
|
@ -593,7 +593,7 @@ namespace ARMeilleure.Decoders
|
|||
#endregion
|
||||
|
||||
#region "OpCode Table (AArch32)"
|
||||
//Base
|
||||
// Base
|
||||
SetA32("<<<<0010100xxxxxxxxxxxxxxxxxxxxx", InstName.Add, null, typeof(OpCode32AluImm));
|
||||
SetA32("<<<<0000100xxxxxxxxxxxxxxxx0xxxx", InstName.Add, null, typeof(OpCode32AluRsImm));
|
||||
SetA32("<<<<1010xxxxxxxxxxxxxxxxxxxxxxxx", InstName.B, null, typeof(OpCode32BImm));
|
||||
|
@ -684,12 +684,12 @@ namespace ARMeilleure.Decoders
|
|||
|
||||
for (int index = 0; index < encoding.Length; index++, bit--)
|
||||
{
|
||||
//Note: < and > are used on special encodings.
|
||||
//The < means that we should never have ALL bits with the '<' set.
|
||||
//So, when the encoding has <<, it means that 00, 01, and 10 are valid,
|
||||
//but not 11. <<< is 000, 001, ..., 110 but NOT 111, and so on...
|
||||
//For >, the invalid value is zero. So, for >> 01, 10 and 11 are valid,
|
||||
//but 00 isn't.
|
||||
// Note: < and > are used on special encodings.
|
||||
// The < means that we should never have ALL bits with the '<' set.
|
||||
// So, when the encoding has <<, it means that 00, 01, and 10 are valid,
|
||||
// but not 11. <<< is 000, 001, ..., 110 but NOT 111, and so on...
|
||||
// For >, the invalid value is zero. So, for >> 01, 10 and 11 are valid,
|
||||
// but 00 isn't.
|
||||
char chr = encoding[index];
|
||||
|
||||
if (chr == '1')
|
||||
|
|
|
@ -13,7 +13,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
public static void EmitAdcsCCheck(ArmEmitterContext context, Operand n, Operand d)
|
||||
{
|
||||
//C = (Rd == Rn && CIn) || Rd < Rn
|
||||
// C = (Rd == Rn && CIn) || Rd < Rn
|
||||
Operand cIn = GetFlag(PState.CFlag);
|
||||
|
||||
Operand cOut = context.BitwiseAnd(context.ICompareEqual(d, n), cIn);
|
||||
|
@ -25,13 +25,13 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static void EmitAddsCCheck(ArmEmitterContext context, Operand n, Operand d)
|
||||
{
|
||||
//C = Rd < Rn
|
||||
// C = Rd < Rn
|
||||
SetFlag(context, PState.CFlag, context.ICompareLessUI(d, n));
|
||||
}
|
||||
|
||||
public static void EmitAddsVCheck(ArmEmitterContext context, Operand n, Operand m, Operand d)
|
||||
{
|
||||
//V = (Rd ^ Rn) & ~(Rn ^ Rm) < 0
|
||||
// V = (Rd ^ Rn) & ~(Rn ^ Rm) < 0
|
||||
Operand vOut = context.BitwiseExclusiveOr(d, n);
|
||||
|
||||
vOut = context.BitwiseAnd(vOut, context.BitwiseNot(context.BitwiseExclusiveOr(n, m)));
|
||||
|
@ -43,7 +43,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static void EmitSbcsCCheck(ArmEmitterContext context, Operand n, Operand m)
|
||||
{
|
||||
//C = (Rn == Rm && CIn) || Rn > Rm
|
||||
// C = (Rn == Rm && CIn) || Rn > Rm
|
||||
Operand cIn = GetFlag(PState.CFlag);
|
||||
|
||||
Operand cOut = context.BitwiseAnd(context.ICompareEqual(n, m), cIn);
|
||||
|
@ -55,13 +55,13 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static void EmitSubsCCheck(ArmEmitterContext context, Operand n, Operand m)
|
||||
{
|
||||
//C = Rn >= Rm
|
||||
// C = Rn >= Rm
|
||||
SetFlag(context, PState.CFlag, context.ICompareGreaterOrEqualUI(n, m));
|
||||
}
|
||||
|
||||
public static void EmitSubsVCheck(ArmEmitterContext context, Operand n, Operand m, Operand d)
|
||||
{
|
||||
//V = (Rd ^ Rn) & (Rn ^ Rm) < 0
|
||||
// V = (Rd ^ Rn) & (Rn ^ Rm) < 0
|
||||
Operand vOut = context.BitwiseExclusiveOr(d, n);
|
||||
|
||||
vOut = context.BitwiseAnd(vOut, context.BitwiseExclusiveOr(n, m));
|
||||
|
@ -98,7 +98,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
switch (context.CurrOp)
|
||||
{
|
||||
//ARM32.
|
||||
// ARM32.
|
||||
case OpCode32AluImm op:
|
||||
{
|
||||
if (op.SetFlags && op.IsRotated)
|
||||
|
@ -113,7 +113,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
case OpCodeT16AluImm8 op: return Const(op.Immediate);
|
||||
|
||||
//ARM64.
|
||||
// ARM64.
|
||||
case IOpCodeAluImm op:
|
||||
{
|
||||
if (op.GetOperandType() == OperandType.I32)
|
||||
|
@ -159,7 +159,7 @@ namespace ARMeilleure.Instructions
|
|||
return new InvalidOperationException($"Invalid OpCode type \"{opCode?.GetType().Name ?? "null"}\".");
|
||||
}
|
||||
|
||||
//ARM32 helpers.
|
||||
// ARM32 helpers.
|
||||
private static Operand GetMShiftedByImmediate(ArmEmitterContext context, OpCode32AluRsImm op, bool setCarry)
|
||||
{
|
||||
Operand m = GetIntA32(context, op.Rm);
|
||||
|
@ -307,7 +307,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
private static Operand GetRrxC(ArmEmitterContext context, Operand m, bool setCarry)
|
||||
{
|
||||
//Rotate right by 1 with carry.
|
||||
// Rotate right by 1 with carry.
|
||||
Operand cIn = context.Copy(GetFlag(PState.CFlag));
|
||||
|
||||
if (setCarry)
|
||||
|
|
|
@ -20,7 +20,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
if (op.Pos < op.Shift)
|
||||
{
|
||||
//BFI.
|
||||
// BFI.
|
||||
int shift = op.GetBitsCount() - op.Shift;
|
||||
|
||||
int width = op.Pos + 1;
|
||||
|
@ -33,7 +33,7 @@ namespace ARMeilleure.Instructions
|
|||
}
|
||||
else
|
||||
{
|
||||
//BFXIL.
|
||||
// BFXIL.
|
||||
int shift = op.Shift;
|
||||
|
||||
int width = op.Pos - shift + 1;
|
||||
|
|
|
@ -16,7 +16,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
OpCodeAluBinary op = (OpCodeAluBinary)context.CurrOp;
|
||||
|
||||
//If Rm == 0, Rd = 0 (division by zero).
|
||||
// If Rm == 0, Rd = 0 (division by zero).
|
||||
Operand n = GetIntOrZR(context, op.Rn);
|
||||
Operand m = GetIntOrZR(context, op.Rm);
|
||||
|
||||
|
@ -29,7 +29,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
if (!unsigned)
|
||||
{
|
||||
//If Rn == INT_MIN && Rm == -1, Rd = INT_MIN (overflow).
|
||||
// If Rn == INT_MIN && Rm == -1, Rd = INT_MIN (overflow).
|
||||
bool is32Bits = op.RegisterSize == RegisterSize.Int32;
|
||||
|
||||
Operand intMin = is32Bits ? Const(int.MinValue) : Const(long.MinValue);
|
||||
|
|
|
@ -160,10 +160,10 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
private static void EmitContinueOrReturnCheck(ArmEmitterContext context, Operand retVal)
|
||||
{
|
||||
//Note: The return value of the called method will be placed
|
||||
//at the Stack, the return value is always a Int64 with the
|
||||
//return address of the function. We check if the address is
|
||||
//correct, if it isn't we keep returning until we reach the dispatcher.
|
||||
// Note: The return value of the called method will be placed
|
||||
// at the Stack, the return value is always a Int64 with the
|
||||
// return address of the function. We check if the address is
|
||||
// correct, if it isn't we keep returning until we reach the dispatcher.
|
||||
ulong nextAddr = GetNextOpAddress(context.CurrOp);
|
||||
|
||||
if (context.CurrBlock.Next != null)
|
||||
|
|
|
@ -49,10 +49,10 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static int GetRegisterAlias(Aarch32Mode mode, int register)
|
||||
{
|
||||
//Only registers >= 8 are banked,
|
||||
//with registers in the range [8, 12] being
|
||||
//banked for the FIQ mode, and registers
|
||||
//13 and 14 being banked for all modes.
|
||||
// Only registers >= 8 are banked,
|
||||
// with registers in the range [8, 12] being
|
||||
// banked for the FIQ mode, and registers
|
||||
// 13 and 14 being banked for all modes.
|
||||
if ((uint)register < 8)
|
||||
{
|
||||
return register;
|
||||
|
|
|
@ -131,7 +131,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
address = context.Copy(GetIntOrSP(context, op.Rn));
|
||||
|
||||
//Pre-indexing.
|
||||
// Pre-indexing.
|
||||
if (!op.PostIdx)
|
||||
{
|
||||
address = context.Add(address, Const(op.Immediate));
|
||||
|
@ -162,7 +162,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
private static void EmitWBackIfNeeded(ArmEmitterContext context, Operand address)
|
||||
{
|
||||
//Check whenever the current OpCode has post-indexed write back, if so write it.
|
||||
// Check whenever the current OpCode has post-indexed write back, if so write it.
|
||||
if (context.CurrOp is OpCodeMemImm op && op.WBack)
|
||||
{
|
||||
if (op.PostIdx)
|
||||
|
|
|
@ -60,10 +60,10 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
if (pair)
|
||||
{
|
||||
//Exclusive loads should be atomic. For pairwise loads, we need to
|
||||
//read all the data at once. For a 32-bits pairwise load, we do a
|
||||
//simple 64-bits load, for a 128-bits load, we need to call a special
|
||||
//method to read 128-bits atomically.
|
||||
// Exclusive loads should be atomic. For pairwise loads, we need to
|
||||
// read all the data at once. For a 32-bits pairwise load, we do a
|
||||
// simple 64-bits load, for a 128-bits load, we need to call a special
|
||||
// method to read 128-bits atomically.
|
||||
if (op.Size == 2)
|
||||
{
|
||||
Operand value = EmitLoad(context, address, exclusive, 3);
|
||||
|
@ -94,7 +94,7 @@ namespace ARMeilleure.Instructions
|
|||
}
|
||||
else
|
||||
{
|
||||
//8, 16, 32 or 64-bits (non-pairwise) load.
|
||||
// 8, 16, 32 or 64-bits (non-pairwise) load.
|
||||
Operand value = EmitLoad(context, address, exclusive, op.Size);
|
||||
|
||||
SetIntOrZR(context, op.Rt, value);
|
||||
|
@ -137,7 +137,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static void Pfrm(ArmEmitterContext context)
|
||||
{
|
||||
//Memory Prefetch, execute as no-op.
|
||||
// Memory Prefetch, execute as no-op.
|
||||
}
|
||||
|
||||
public static void Stlr(ArmEmitterContext context) => EmitStr(context, AccessType.Ordered);
|
||||
|
@ -201,8 +201,8 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
if (s != null)
|
||||
{
|
||||
//This is only needed for exclusive stores. The function returns 0
|
||||
//when the store is successful, and 1 otherwise.
|
||||
// This is only needed for exclusive stores. The function returns 0
|
||||
// when the store is successful, and 1 otherwise.
|
||||
SetIntOrZR(context, op.Rs, s);
|
||||
}
|
||||
}
|
||||
|
@ -253,9 +253,9 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
private static void EmitBarrier(ArmEmitterContext context)
|
||||
{
|
||||
//Note: This barrier is most likely not necessary, and probably
|
||||
//doesn't make any difference since we need to do a ton of stuff
|
||||
//(software MMU emulation) to read or write anything anyway.
|
||||
// Note: This barrier is most likely not necessary, and probably
|
||||
// doesn't make any difference since we need to do a ton of stuff
|
||||
// (software MMU emulation) to read or write anything anyway.
|
||||
}
|
||||
}
|
||||
}
|
|
@ -83,7 +83,7 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
if (op.Replicate)
|
||||
{
|
||||
//Only loads uses the replicate mode.
|
||||
// Only loads uses the replicate mode.
|
||||
Debug.Assert(isLoad, "Replicate mode is not valid for stores.");
|
||||
|
||||
int elems = op.GetBytesCount() >> op.Size;
|
||||
|
|
|
@ -14,12 +14,12 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static void Hint(ArmEmitterContext context)
|
||||
{
|
||||
//Execute as no-op.
|
||||
// Execute as no-op.
|
||||
}
|
||||
|
||||
public static void Isb(ArmEmitterContext context)
|
||||
{
|
||||
//Execute as no-op.
|
||||
// Execute as no-op.
|
||||
}
|
||||
|
||||
public static void Mrs(ArmEmitterContext context)
|
||||
|
@ -65,21 +65,21 @@ namespace ARMeilleure.Instructions
|
|||
|
||||
public static void Nop(ArmEmitterContext context)
|
||||
{
|
||||
//Do nothing.
|
||||
// Do nothing.
|
||||
}
|
||||
|
||||
public static void Sys(ArmEmitterContext context)
|
||||
{
|
||||
//This instruction is used to do some operations on the CPU like cache invalidation,
|
||||
//address translation and the like.
|
||||
//We treat it as no-op here since we don't have any cache being emulated anyway.
|
||||
// This instruction is used to do some operations on the CPU like cache invalidation,
|
||||
// address translation and the like.
|
||||
// We treat it as no-op here since we don't have any cache being emulated anyway.
|
||||
OpCodeSystem op = (OpCodeSystem)context.CurrOp;
|
||||
|
||||
switch (GetPackedId(op))
|
||||
{
|
||||
case 0b11_011_0111_0100_001:
|
||||
{
|
||||
//DC ZVA
|
||||
// DC ZVA
|
||||
Operand t = GetIntOrZR(context, op.Rt);
|
||||
|
||||
for (long offset = 0; offset < (4 << DczSizeLog2); offset += 8)
|
||||
|
@ -92,7 +92,7 @@ namespace ARMeilleure.Instructions
|
|||
break;
|
||||
}
|
||||
|
||||
//No-op
|
||||
// No-op
|
||||
case 0b11_011_0111_1110_001: //DC CIVAC
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -2,7 +2,7 @@ namespace ARMeilleure.Instructions
|
|||
{
|
||||
enum InstName
|
||||
{
|
||||
//Base (AArch64)
|
||||
// Base (AArch64)
|
||||
Adc,
|
||||
Adcs,
|
||||
Add,
|
||||
|
@ -102,7 +102,7 @@ namespace ARMeilleure.Instructions
|
|||
Umulh,
|
||||
Und,
|
||||
|
||||
//FP & SIMD (AArch64)
|
||||
// FP & SIMD (AArch64)
|
||||
Abs_S,
|
||||
Abs_V,
|
||||
Add_S,
|
||||
|
@ -440,7 +440,7 @@ namespace ARMeilleure.Instructions
|
|||
Zip1_V,
|
||||
Zip2_V,
|
||||
|
||||
//Base (AArch32)
|
||||
// Base (AArch32)
|
||||
Blx,
|
||||
Bx,
|
||||
Cmp,
|
||||
|
|
|
@ -57,9 +57,9 @@ namespace ARMeilleure.IntermediateRepresentation
|
|||
|
||||
public void Append(Node node)
|
||||
{
|
||||
//If the branch block is not null, then the list of operations
|
||||
//should end with a branch instruction. We insert the new operation
|
||||
//before this branch.
|
||||
// If the branch block is not null, then the list of operations
|
||||
// should end with a branch instruction. We insert the new operation
|
||||
// before this branch.
|
||||
if (_branch != null || (Operations.Last != null && IsLeafBlock()))
|
||||
{
|
||||
Operations.AddBefore(Operations.Last, node);
|
||||
|
|
|
@ -96,9 +96,9 @@ namespace ARMeilleure.Memory
|
|||
IntPtr[] addresses,
|
||||
out ulong count)
|
||||
{
|
||||
//This is only supported on windows, but returning
|
||||
//false (failed) is also valid for platforms without
|
||||
//write tracking support on the OS.
|
||||
// This is only supported on windows, but returning
|
||||
// false (failed) is also valid for platforms without
|
||||
// write tracking support on the OS.
|
||||
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
|
||||
{
|
||||
return MemoryManagementWindows.GetModifiedPages(address, size, addresses, out count);
|
||||
|
|
|
@ -46,12 +46,12 @@ namespace ARMeilleure.Memory
|
|||
AddressSpaceBits = addressSpaceBits;
|
||||
AddressSpaceSize = 1L << addressSpaceBits;
|
||||
|
||||
//When flat page table is requested, we use a single
|
||||
//array for the mappings of the entire address space.
|
||||
//This has better performance, but also high memory usage.
|
||||
//The multi level page table uses 9 bits per level, so
|
||||
//the memory usage is lower, but the performance is also
|
||||
//lower, since each address translation requires multiple reads.
|
||||
// When flat page table is requested, we use a single
|
||||
// array for the mappings of the entire address space.
|
||||
// This has better performance, but also high memory usage.
|
||||
// The multi level page table uses 9 bits per level, so
|
||||
// the memory usage is lower, but the performance is also
|
||||
// lower, since each address translation requires multiple reads.
|
||||
if (useFlatPageTable)
|
||||
{
|
||||
PtLevelBits = addressSpaceBits - PageBits;
|
||||
|
@ -233,13 +233,13 @@ namespace ARMeilleure.Memory
|
|||
|
||||
if (nextPtr == IntPtr.Zero)
|
||||
{
|
||||
//Entry does not yet exist, allocate a new one.
|
||||
// Entry does not yet exist, allocate a new one.
|
||||
IntPtr newPtr = Allocate((ulong)(PtLevelSize * IntPtr.Size));
|
||||
|
||||
//Try to swap the current pointer (should be zero), with the allocated one.
|
||||
// Try to swap the current pointer (should be zero), with the allocated one.
|
||||
nextPtr = Interlocked.CompareExchange(ref *ptePtr, newPtr, IntPtr.Zero);
|
||||
|
||||
//If the old pointer is not null, then another thread already has set it.
|
||||
// If the old pointer is not null, then another thread already has set it.
|
||||
if (nextPtr != IntPtr.Zero)
|
||||
{
|
||||
Free(newPtr);
|
||||
|
@ -501,7 +501,7 @@ namespace ARMeilleure.Memory
|
|||
|
||||
private void AbortWithAlignmentFault(long position)
|
||||
{
|
||||
//TODO: Abort mode and exception support on the CPU.
|
||||
// TODO: Abort mode and exception support on the CPU.
|
||||
throw new InvalidOperationException($"Tried to compare exchange a misaligned address 0x{position:X16}.");
|
||||
}
|
||||
|
||||
|
@ -614,7 +614,7 @@ namespace ARMeilleure.Memory
|
|||
|
||||
public void ReadBytes(long position, byte[] data, int startIndex, int size)
|
||||
{
|
||||
//Note: This will be moved later.
|
||||
// Note: This will be moved later.
|
||||
long endAddr = position + size;
|
||||
|
||||
if ((ulong)size > int.MaxValue)
|
||||
|
@ -748,7 +748,7 @@ namespace ARMeilleure.Memory
|
|||
|
||||
public void WriteBytes(long position, byte[] data, int startIndex, int size)
|
||||
{
|
||||
//Note: This will be moved later.
|
||||
// Note: This will be moved later.
|
||||
long endAddr = position + size;
|
||||
|
||||
if ((ulong)endAddr < (ulong)position)
|
||||
|
@ -778,7 +778,7 @@ namespace ARMeilleure.Memory
|
|||
|
||||
public void CopyBytes(long src, long dst, long size)
|
||||
{
|
||||
//Note: This will be moved later.
|
||||
// Note: This will be moved later.
|
||||
if (IsContiguous(src, size) &&
|
||||
IsContiguous(dst, size))
|
||||
{
|
||||
|
|
|
@ -84,7 +84,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
if (visited.Count < blocks.Count)
|
||||
{
|
||||
//Remove unreachable blocks and renumber.
|
||||
// Remove unreachable blocks and renumber.
|
||||
int index = 0;
|
||||
|
||||
for (LinkedListNode<BasicBlock> node = blocks.First; node != null;)
|
||||
|
@ -129,14 +129,14 @@ namespace ARMeilleure.Translation
|
|||
throw new ArgumentException("Predecessor and successor are not connected.");
|
||||
}
|
||||
|
||||
//Insert the new block on the list of blocks.
|
||||
// Insert the new block on the list of blocks.
|
||||
BasicBlock succPrev = successor.Node.Previous?.Value;
|
||||
|
||||
if (succPrev != null && succPrev != predecessor && succPrev.Next == successor)
|
||||
{
|
||||
//Can't insert after the predecessor or before the successor.
|
||||
//Here, we insert it before the successor by also spliting another
|
||||
//edge (the one between the block before "successor" and "successor").
|
||||
// Can't insert after the predecessor or before the successor.
|
||||
// Here, we insert it before the successor by also spliting another
|
||||
// edge (the one between the block before "successor" and "successor").
|
||||
BasicBlock splitBlock2 = new BasicBlock(splitBlock.Index + 1);
|
||||
|
||||
succPrev.Next = splitBlock2;
|
||||
|
|
|
@ -5,8 +5,8 @@ namespace ARMeilleure.Translation
|
|||
{
|
||||
static class Dominance
|
||||
{
|
||||
//Those methods are an implementation of the algorithms on "A Simple, Fast Dominance Algorithm".
|
||||
//https://www.cs.rice.edu/~keith/EMBED/dom.pdf
|
||||
// Those methods are an implementation of the algorithms on "A Simple, Fast Dominance Algorithm".
|
||||
// https://www.cs.rice.edu/~keith/EMBED/dom.pdf
|
||||
public static void FindDominators(ControlFlowGraph cfg)
|
||||
{
|
||||
BasicBlock Intersect(BasicBlock block1, BasicBlock block2)
|
||||
|
|
|
@ -80,7 +80,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
public Operand Call(Delegate func, params Operand[] callArgs)
|
||||
{
|
||||
//Add the delegate to the cache to ensure it will not be garbage collected.
|
||||
// Add the delegate to the cache to ensure it will not be garbage collected.
|
||||
func = DelegateCache.GetOrAdd(func);
|
||||
|
||||
IntPtr ptr = Marshal.GetFunctionPointerForDelegate<Delegate>(func);
|
||||
|
|
|
@ -93,7 +93,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
if (!JitCache.TryFind(offset, out JitCacheEntry funcEntry))
|
||||
{
|
||||
//Not found.
|
||||
// Not found.
|
||||
return null;
|
||||
}
|
||||
|
||||
|
|
|
@ -35,9 +35,9 @@ namespace ARMeilleure.Translation
|
|||
|
||||
private Operand GetUnique(Operand operand)
|
||||
{
|
||||
//Operand is supposed to be a value or reference type based on kind.
|
||||
//We differentiate local variables by reference, but everything else
|
||||
//is supposed to be considered the same, if "Value" is the same.
|
||||
// Operand is supposed to be a value or reference type based on kind.
|
||||
// We differentiate local variables by reference, but everything else
|
||||
// is supposed to be considered the same, if "Value" is the same.
|
||||
if (operand.Kind != OperandKind.LocalVariable)
|
||||
{
|
||||
if (_uniqueOperands.TryGetValue(operand.Value, out Operand prevOperand))
|
||||
|
|
|
@ -70,7 +70,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
public static void RunPass(ControlFlowGraph cfg)
|
||||
{
|
||||
//Computer local register inputs and outputs used inside blocks.
|
||||
// Computer local register inputs and outputs used inside blocks.
|
||||
RegisterMask[] localInputs = new RegisterMask[cfg.Blocks.Count];
|
||||
RegisterMask[] localOutputs = new RegisterMask[cfg.Blocks.Count];
|
||||
|
||||
|
@ -101,7 +101,7 @@ namespace ARMeilleure.Translation
|
|||
}
|
||||
}
|
||||
|
||||
//Compute global register inputs and outputs used across blocks.
|
||||
// Compute global register inputs and outputs used across blocks.
|
||||
RegisterMask[] globalCmnOutputs = new RegisterMask[cfg.Blocks.Count];
|
||||
|
||||
RegisterMask[] globalInputs = new RegisterMask[cfg.Blocks.Count];
|
||||
|
@ -115,7 +115,7 @@ namespace ARMeilleure.Translation
|
|||
{
|
||||
modified = false;
|
||||
|
||||
//Compute register outputs.
|
||||
// Compute register outputs.
|
||||
for (int index = cfg.PostOrderBlocks.Length - 1; index >= 0; index--)
|
||||
{
|
||||
BasicBlock block = cfg.PostOrderBlocks[index];
|
||||
|
@ -162,7 +162,7 @@ namespace ARMeilleure.Translation
|
|||
}
|
||||
}
|
||||
|
||||
//Compute register inputs.
|
||||
// Compute register inputs.
|
||||
for (int index = 0; index < cfg.PostOrderBlocks.Length; index++)
|
||||
{
|
||||
BasicBlock block = cfg.PostOrderBlocks[index];
|
||||
|
@ -191,7 +191,7 @@ namespace ARMeilleure.Translation
|
|||
}
|
||||
while (modified);
|
||||
|
||||
//Insert load and store context instructions where needed.
|
||||
// Insert load and store context instructions where needed.
|
||||
foreach (BasicBlock block in cfg.Blocks)
|
||||
{
|
||||
bool hasContextLoad = HasContextLoad(block);
|
||||
|
@ -201,8 +201,8 @@ namespace ARMeilleure.Translation
|
|||
block.Operations.RemoveFirst();
|
||||
}
|
||||
|
||||
//The only block without any predecessor should be the entry block.
|
||||
//It always needs a context load as it is the first block to run.
|
||||
// The only block without any predecessor should be the entry block.
|
||||
// It always needs a context load as it is the first block to run.
|
||||
if (block.Predecessors.Count == 0 || hasContextLoad)
|
||||
{
|
||||
LoadLocals(block, globalInputs[block.Index].VecMask, RegisterType.Vector);
|
||||
|
@ -396,7 +396,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
private static long ClearCallerSavedIntRegs(long mask)
|
||||
{
|
||||
//TODO: ARM32 support.
|
||||
// TODO: ARM32 support.
|
||||
mask &= ~(CallerSavedIntRegistersMask | PStateNzcvFlagsMask);
|
||||
|
||||
return mask;
|
||||
|
@ -404,7 +404,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
private static long ClearCallerSavedVecRegs(long mask)
|
||||
{
|
||||
//TODO: ARM32 support.
|
||||
// TODO: ARM32 support.
|
||||
mask &= ~CallerSavedVecRegistersMask;
|
||||
|
||||
return mask;
|
||||
|
|
|
@ -54,7 +54,7 @@ namespace ARMeilleure.Translation
|
|||
|
||||
Queue<BasicBlock> dfPhiBlocks = new Queue<BasicBlock>();
|
||||
|
||||
//First pass, get all defs and locals uses.
|
||||
// First pass, get all defs and locals uses.
|
||||
foreach (BasicBlock block in cfg.Blocks)
|
||||
{
|
||||
Operand[] localDefs = new Operand[RegisterConsts.TotalCount];
|
||||
|
@ -125,7 +125,7 @@ namespace ARMeilleure.Translation
|
|||
}
|
||||
}
|
||||
|
||||
//Second pass, rename variables with definitions on different blocks.
|
||||
// Second pass, rename variables with definitions on different blocks.
|
||||
foreach (BasicBlock block in cfg.Blocks)
|
||||
{
|
||||
Operand[] localDefs = new Operand[RegisterConsts.TotalCount];
|
||||
|
@ -213,10 +213,10 @@ namespace ARMeilleure.Translation
|
|||
|
||||
private static Operand InsertPhi(DefMap[] globalDefs, BasicBlock block, Operand operand)
|
||||
{
|
||||
//This block has a Phi that has not been materialized yet, but that
|
||||
//would define a new version of the variable we're looking for. We need
|
||||
//to materialize the Phi, add all the block/operand pairs into the Phi, and
|
||||
//then use the definition from that Phi.
|
||||
// This block has a Phi that has not been materialized yet, but that
|
||||
// would define a new version of the variable we're looking for. We need
|
||||
// to materialize the Phi, add all the block/operand pairs into the Phi, and
|
||||
// then use the definition from that Phi.
|
||||
Operand local = Local(operand.Type);
|
||||
|
||||
PhiNode phi = new PhiNode(local, block.Predecessors.Count);
|
||||
|
|
|
@ -145,11 +145,11 @@ namespace ARMeilleure.Translation
|
|||
{
|
||||
context.MarkLabel(lblPredicateSkip);
|
||||
|
||||
//If this is the last op on the block, and there's no "next" block
|
||||
//after this one, then we have to return right now, with the address
|
||||
//of the next instruction to be executed (in the case that the condition
|
||||
//is false, and the branch was not taken, as all basic blocks should end
|
||||
//with some kind of branch).
|
||||
// If this is the last op on the block, and there's no "next" block
|
||||
// after this one, then we have to return right now, with the address
|
||||
// of the next instruction to be executed (in the case that the condition
|
||||
// is false, and the branch was not taken, as all basic blocks should end
|
||||
// with some kind of branch).
|
||||
if (isLastOp && block.Next == null)
|
||||
{
|
||||
context.Return(Const(opCode.Address + (ulong)opCode.OpCodeSizeInBytes));
|
||||
|
|
Loading…
Add table
Reference in a new issue