Add an extra space on comments to match code style

This commit is contained in:
gdkchan 2019-07-28 14:19:58 -03:00
commit 4d4ccd6c52
37 changed files with 260 additions and 260 deletions

View file

@ -62,7 +62,7 @@ namespace ARMeilleure.CodeGen.Optimizations
private static void PropagateCopy(Operation copyOp) private static void PropagateCopy(Operation copyOp)
{ {
//Propagate copy source operand to all uses of the destination operand. // Propagate copy source operand to all uses of the destination operand.
Operand dest = copyOp.Destination; Operand dest = copyOp.Destination;
Operand source = copyOp.GetSource(0); Operand source = copyOp.GetSource(0);
@ -82,8 +82,8 @@ namespace ARMeilleure.CodeGen.Optimizations
private static void RemoveNode(BasicBlock block, LinkedListNode<Node> llNode) private static void RemoveNode(BasicBlock block, LinkedListNode<Node> llNode)
{ {
//Remove a node from the nodes list, and also remove itself // Remove a node from the nodes list, and also remove itself
//from all the use lists on the operands that this node uses. // from all the use lists on the operands that this node uses.
block.Operations.Remove(llNode); block.Operations.Remove(llNode);
Node node = llNode.Value; Node node = llNode.Value;

View file

@ -47,9 +47,9 @@ namespace ARMeilleure.CodeGen.Optimizations
private static void TryEliminateBitwiseAnd(Operation operation) private static void TryEliminateBitwiseAnd(Operation operation)
{ {
//Try to recognize and optimize those 3 patterns (in order): // Try to recognize and optimize those 3 patterns (in order):
//x & 0xFFFFFFFF == x, 0xFFFFFFFF & y == y, // x & 0xFFFFFFFF == x, 0xFFFFFFFF & y == y,
//x & 0x00000000 == 0x00000000, 0x00000000 & y == 0x00000000 // x & 0x00000000 == 0x00000000, 0x00000000 & y == 0x00000000
Operand x = operation.GetSource(0); Operand x = operation.GetSource(0);
Operand y = operation.GetSource(1); Operand y = operation.GetSource(1);
@ -69,9 +69,9 @@ namespace ARMeilleure.CodeGen.Optimizations
private static void TryEliminateBitwiseOr(Operation operation) private static void TryEliminateBitwiseOr(Operation operation)
{ {
//Try to recognize and optimize those 3 patterns (in order): // Try to recognize and optimize those 3 patterns (in order):
//x | 0x00000000 == x, 0x00000000 | y == y, // x | 0x00000000 == x, 0x00000000 | y == y,
//x | 0xFFFFFFFF == 0xFFFFFFFF, 0xFFFFFFFF | y == 0xFFFFFFFF // x | 0xFFFFFFFF == 0xFFFFFFFF, 0xFFFFFFFF | y == 0xFFFFFFFF
Operand x = operation.GetSource(0); Operand x = operation.GetSource(0);
Operand y = operation.GetSource(1); Operand y = operation.GetSource(1);
@ -124,8 +124,8 @@ namespace ARMeilleure.CodeGen.Optimizations
return; return;
} }
//The condition is constant, we can turn it into a copy, and select // The condition is constant, we can turn it into a copy, and select
//the source based on the condition value. // the source based on the condition value.
int srcIndex = cond.Value != 0 ? 1 : 2; int srcIndex = cond.Value != 0 ? 1 : 2;
Operand source = operation.GetSource(srcIndex); Operand source = operation.GetSource(srcIndex);

View file

@ -61,7 +61,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
foreach (Copy copy in _copies) foreach (Copy copy in _copies)
{ {
//If the destination is not used anywhere, we can assign it immediately. // If the destination is not used anywhere, we can assign it immediately.
if (!locations.ContainsKey(copy.Dest)) if (!locations.ContainsKey(copy.Dest))
{ {
readyQueue.Enqueue(copy.Dest); readyQueue.Enqueue(copy.Dest);
@ -109,13 +109,13 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
if (copyDest != locations[sources[copySource]]) if (copyDest != locations[sources[copySource]])
{ {
//Find the other swap destination register. // Find the other swap destination register.
//To do that, we search all the pending registers, and pick // To do that, we search all the pending registers, and pick
//the one where the copy source register is equal to the // the one where the copy source register is equal to the
//current destination register being processed (copyDest). // current destination register being processed (copyDest).
foreach (Register pending in pendingQueue) foreach (Register pending in pendingQueue)
{ {
//Is this a copy of pending <- copyDest? // Is this a copy of pending <- copyDest?
if (copyDest == locations[sources[pending]]) if (copyDest == locations[sources[pending]])
{ {
swapOther = pending; swapOther = pending;
@ -125,10 +125,10 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
} }
} }
//The value that was previously at "copyDest" now lives on // The value that was previously at "copyDest" now lives on
//"copySource" thanks to the swap, now we need to update the // "copySource" thanks to the swap, now we need to update the
//location for the next copy that is supposed to copy the value // location for the next copy that is supposed to copy the value
//that used to live on "copyDest". // that used to live on "copyDest".
locations[sources[swapOther]] = copySource; locations[sources[swapOther]] = copySource;
} }
} }
@ -173,22 +173,22 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
if (left.IsSpilled && !right.IsSpilled) if (left.IsSpilled && !right.IsSpilled)
{ {
//Move from the stack to a register. // Move from the stack to a register.
AddSplitFill(left, right, type); AddSplitFill(left, right, type);
} }
else if (!left.IsSpilled && right.IsSpilled) else if (!left.IsSpilled && right.IsSpilled)
{ {
//Move from a register to the stack. // Move from a register to the stack.
AddSplitSpill(left, right, type); AddSplitSpill(left, right, type);
} }
else if (!left.IsSpilled && !right.IsSpilled && left.Register != right.Register) else if (!left.IsSpilled && !right.IsSpilled && left.Register != right.Register)
{ {
//Move from one register to another. // Move from one register to another.
AddSplitCopy(left, right, type); AddSplitCopy(left, right, type);
} }
else if (left.SpillOffset != right.SpillOffset) else if (left.SpillOffset != right.SpillOffset)
{ {
//This would be the stack-to-stack move case, but this is not supported. // This would be the stack-to-stack move case, but this is not supported.
throw new ArgumentException("Both intervals were spilled."); throw new ArgumentException("Both intervals were spilled.");
} }
} }

View file

@ -169,9 +169,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
} }
else else
{ {
//Spill the interval that will free the register for the longest // Spill the interval that will free the register for the longest
//amount of time, as long there's no interference of the current // amount of time, as long there's no interference of the current
//interval with a fixed interval using the same register. // interval with a fixed interval using the same register.
bool hasRegisterSelected = false; bool hasRegisterSelected = false;
RegisterType regType = current.Local.Type.ToRegisterType(); RegisterType regType = current.Local.Type.ToRegisterType();
@ -493,7 +493,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
int branchIndex = cfg.PostOrderMap[block.Index]; int branchIndex = cfg.PostOrderMap[block.Index];
int targetIndex = cfg.PostOrderMap[successor.Index]; int targetIndex = cfg.PostOrderMap[successor.Index];
//Is the branch jumping backwards? // Is the branch jumping backwards?
if (targetIndex >= branchIndex) if (targetIndex >= branchIndex)
{ {
int targetPos = blockStarts[successor.Index]; int targetPos = blockStarts[successor.Index];

View file

@ -125,7 +125,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
private void AllocateInterval(AllocationContext context, LiveInterval current, int cIndex) private void AllocateInterval(AllocationContext context, LiveInterval current, int cIndex)
{ {
//Check active intervals that already ended. // Check active intervals that already ended.
foreach (int iIndex in context.Active) foreach (int iIndex in context.Active)
{ {
LiveInterval interval = _intervals[iIndex]; LiveInterval interval = _intervals[iIndex];
@ -140,7 +140,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
} }
} }
//Check inactive intervals that already ended or were reactivated. // Check inactive intervals that already ended or were reactivated.
foreach (int iIndex in context.Inactive) foreach (int iIndex in context.Inactive)
{ {
LiveInterval interval = _intervals[iIndex]; LiveInterval interval = _intervals[iIndex];
@ -206,17 +206,17 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
int selectedNextUse = freePositions[selectedReg]; int selectedNextUse = freePositions[selectedReg];
//Intervals starts and ends at odd positions, unless they span an entire // Intervals starts and ends at odd positions, unless they span an entire
//block, in this case they will have ranges at a even position. // block, in this case they will have ranges at a even position.
//When a interval is loaded from the stack to a register, we can only // When a interval is loaded from the stack to a register, we can only
//do the split at a odd position, because otherwise the split interval // do the split at a odd position, because otherwise the split interval
//that is inserted on the list to be processed may clobber a register // that is inserted on the list to be processed may clobber a register
//used by the instruction at the same position as the split. // used by the instruction at the same position as the split.
//The problem only happens when a interval ends exactly at this instruction, // The problem only happens when a interval ends exactly at this instruction,
//because otherwise they would interfere, and the register wouldn't be selected. // because otherwise they would interfere, and the register wouldn't be selected.
//When the interval is aligned and the above happens, there's no problem as // When the interval is aligned and the above happens, there's no problem as
//the instruction that is actually with the last use is the one // the instruction that is actually with the last use is the one
//before that position. // before that position.
selectedNextUse &= ~InstructionGapMask; selectedNextUse &= ~InstructionGapMask;
if (selectedNextUse <= current.GetStart()) if (selectedNextUse <= current.GetStart())
@ -352,8 +352,8 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
if (usePositions[selectedReg] < currentFirstUse) if (usePositions[selectedReg] < currentFirstUse)
{ {
//All intervals on inactive and active are being used before current, // All intervals on inactive and active are being used before current,
//so spill the current interval. // so spill the current interval.
Debug.Assert(currentFirstUse > current.GetStart(), "Trying to spill a interval currently being used."); Debug.Assert(currentFirstUse > current.GetStart(), "Trying to spill a interval currently being used.");
LiveInterval splitChild = current.Split(currentFirstUse); LiveInterval splitChild = current.Split(currentFirstUse);
@ -366,8 +366,8 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
} }
else if (blockedPositions[selectedReg] > current.GetEnd()) else if (blockedPositions[selectedReg] > current.GetEnd())
{ {
//Spill made the register available for the entire current lifetime, // Spill made the register available for the entire current lifetime,
//so we only need to split the intervals using the selected register. // so we only need to split the intervals using the selected register.
current.Register = new Register(selectedReg, regType); current.Register = new Register(selectedReg, regType);
SplitAndSpillOverlappingIntervals(context, current); SplitAndSpillOverlappingIntervals(context, current);
@ -376,9 +376,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
} }
else else
{ {
//There are conflicts even after spill due to the use of fixed registers // There are conflicts even after spill due to the use of fixed registers
//that can't be spilled, so we need to also split current at the point of // that can't be spilled, so we need to also split current at the point of
//the first fixed register use. // the first fixed register use.
current.Register = new Register(selectedReg, regType); current.Register = new Register(selectedReg, regType);
int splitPosition = blockedPositions[selectedReg] & ~InstructionGapMask; int splitPosition = blockedPositions[selectedReg] & ~InstructionGapMask;
@ -467,10 +467,10 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
LiveInterval current, LiveInterval current,
LiveInterval interval) LiveInterval interval)
{ {
//If there's a next use after the start of the current interval, // If there's a next use after the start of the current interval,
//we need to split the spilled interval twice, and re-insert it // we need to split the spilled interval twice, and re-insert it
//on the "pending" list to ensure that it will get a new register // on the "pending" list to ensure that it will get a new register
//on that use position. // on that use position.
int nextUse = interval.NextUseAfter(current.GetStart()); int nextUse = interval.NextUseAfter(current.GetStart());
LiveInterval splitChild; LiveInterval splitChild;
@ -528,9 +528,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
Debug.Assert(!interval.IsFixed, "Trying to spill a fixed interval."); Debug.Assert(!interval.IsFixed, "Trying to spill a fixed interval.");
Debug.Assert(interval.UsesCount == 0, "Trying to spill a interval with uses."); Debug.Assert(interval.UsesCount == 0, "Trying to spill a interval with uses.");
//We first check if any of the siblings were spilled, if so we can reuse // We first check if any of the siblings were spilled, if so we can reuse
//the stack offset. Otherwise, we allocate a new space on the stack. // the stack offset. Otherwise, we allocate a new space on the stack.
//This prevents stack-to-stack copies being necessary for a split interval. // This prevents stack-to-stack copies being necessary for a split interval.
if (!interval.TrySpillWithSiblingOffset()) if (!interval.TrySpillWithSiblingOffset())
{ {
interval.Spill(context.StackAlloc.Allocate(interval.Local.Type)); interval.Spill(context.StackAlloc.Allocate(interval.Local.Type));
@ -618,8 +618,8 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
{ {
int succIndex = successor.Index; int succIndex = successor.Index;
//If the current node is a split node, then the actual successor node // If the current node is a split node, then the actual successor node
//(the successor before the split) should be right after it. // (the successor before the split) should be right after it.
if (IsSplitEdgeBlock(successor)) if (IsSplitEdgeBlock(successor))
{ {
succIndex = Successors(successor).First().Index; succIndex = Successors(successor).First().Index;
@ -675,7 +675,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
} }
else else
{ {
//Split the critical edge. // Split the critical edge.
BasicBlock splitBlock = cfg.SplitEdge(block, successor); BasicBlock splitBlock = cfg.SplitEdge(block, successor);
foreach (Operation operation in sequence) foreach (Operation operation in sequence)
@ -773,7 +773,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
if (block.Operations.Count == 0) if (block.Operations.Count == 0)
{ {
//Pretend we have a dummy instruction on the empty block. // Pretend we have a dummy instruction on the empty block.
_operationNodes.Add(null); _operationNodes.Add(null);
_operationsCount += InstructionGap; _operationsCount += InstructionGap;
@ -792,7 +792,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
BitMap[] blkLiveGen = new BitMap[cfg.Blocks.Count]; BitMap[] blkLiveGen = new BitMap[cfg.Blocks.Count];
BitMap[] blkLiveKill = new BitMap[cfg.Blocks.Count]; BitMap[] blkLiveKill = new BitMap[cfg.Blocks.Count];
//Compute local live sets. // Compute local live sets.
foreach (BasicBlock block in cfg.Blocks) foreach (BasicBlock block in cfg.Blocks)
{ {
BitMap liveGen = new BitMap(mapSize); BitMap liveGen = new BitMap(mapSize);
@ -820,7 +820,7 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
blkLiveKill[block.Index] = liveKill; blkLiveKill[block.Index] = liveKill;
} }
//Compute global live sets. // Compute global live sets.
BitMap[] blkLiveIn = new BitMap[cfg.Blocks.Count]; BitMap[] blkLiveIn = new BitMap[cfg.Blocks.Count];
BitMap[] blkLiveOut = new BitMap[cfg.Blocks.Count]; BitMap[] blkLiveOut = new BitMap[cfg.Blocks.Count];
@ -863,16 +863,16 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
_blockEdges = new HashSet<int>(); _blockEdges = new HashSet<int>();
//Compute lifetime intervals. // Compute lifetime intervals.
int operationPos = _operationsCount; int operationPos = _operationsCount;
for (int index = 0; index < cfg.PostOrderBlocks.Length; index++) for (int index = 0; index < cfg.PostOrderBlocks.Length; index++)
{ {
BasicBlock block = cfg.PostOrderBlocks[index]; BasicBlock block = cfg.PostOrderBlocks[index];
//We handle empty blocks by pretending they have a dummy instruction, // We handle empty blocks by pretending they have a dummy instruction,
//because otherwise the block would have the same start and end position, // because otherwise the block would have the same start and end position,
//and this is not valid. // and this is not valid.
int instCount = Math.Max(block.Operations.Count, 1); int instCount = Math.Max(block.Operations.Count, 1);
int blockStart = operationPos - instCount * InstructionGap; int blockStart = operationPos - instCount * InstructionGap;

View file

@ -114,10 +114,10 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
if (index >= 0) if (index >= 0)
{ {
//New range insersects with an existing range, we need to remove // New range insersects with an existing range, we need to remove
//all the intersecting ranges before adding the new one. // all the intersecting ranges before adding the new one.
//We also extend the new range as needed, based on the values of // We also extend the new range as needed, based on the values of
//the existing ranges being removed. // the existing ranges being removed.
int lIndex = index; int lIndex = index;
int rIndex = index; int rIndex = index;
@ -153,14 +153,14 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
private void InsertRange(int index, int start, int end) private void InsertRange(int index, int start, int end)
{ {
//Here we insert a new range on the ranges list. // Here we insert a new range on the ranges list.
//If possible, we extend an existing range rather than inserting a new one. // If possible, we extend an existing range rather than inserting a new one.
//We can extend an existing range if any of the following conditions are true: // We can extend an existing range if any of the following conditions are true:
//- The new range starts right after the end of the previous range on the list. // - The new range starts right after the end of the previous range on the list.
//- The new range ends right before the start of the next range on the list. // - The new range ends right before the start of the next range on the list.
//If both cases are true, we can extend either one. We prefer to extend the // If both cases are true, we can extend either one. We prefer to extend the
//previous range, and then remove the next one, but theres no specific reason // previous range, and then remove the next one, but theres no specific reason
//for that, extending either one will do. // for that, extending either one will do.
int? extIndex = null; int? extIndex = null;
if (index > 0 && _ranges[index - 1].End == start) if (index > 0 && _ranges[index - 1].End == start)
@ -225,9 +225,9 @@ namespace ARMeilleure.CodeGen.RegisterAllocators
if (overlapIndex >= 0) if (overlapIndex >= 0)
{ {
//It's possible that we have multiple overlaps within a single interval, // It's possible that we have multiple overlaps within a single interval,
//in this case, we pick the one with the lowest start position, since // in this case, we pick the one with the lowest start position, since
//we return the first overlap position. // we return the first overlap position.
while (overlapIndex > 0 && _ranges[overlapIndex - 1].End > range.Start) while (overlapIndex > 0 && _ranges[overlapIndex - 1].End > range.Start)
{ {
overlapIndex--; overlapIndex--;

View file

@ -1044,7 +1044,7 @@ namespace ARMeilleure.CodeGen.X86
if (memOp != null) if (memOp != null)
{ {
//Either source or destination is a memory operand. // Either source or destination is a memory operand.
Register baseReg = memOp.BaseAddress.GetRegister(); Register baseReg = memOp.BaseAddress.GetRegister();
X86Register baseRegLow = (X86Register)(baseReg.Index & 0b111); X86Register baseRegLow = (X86Register)(baseReg.Index & 0b111);
@ -1106,7 +1106,7 @@ namespace ARMeilleure.CodeGen.X86
} }
else else
{ {
//Source and destination are registers. // Source and destination are registers.
modRM |= 0xc0; modRM |= 0xc0;
} }
@ -1129,7 +1129,7 @@ namespace ARMeilleure.CodeGen.X86
if ((rexPrefix & 0b1011) == 0 && opCodeHigh == 0xf) if ((rexPrefix & 0b1011) == 0 && opCodeHigh == 0xf)
{ {
//Two-byte form. // Two-byte form.
WriteByte(0xc5); WriteByte(0xc5);
vexByte2 |= (~rexPrefix & 4) << 5; vexByte2 |= (~rexPrefix & 4) << 5;
@ -1138,7 +1138,7 @@ namespace ARMeilleure.CodeGen.X86
} }
else else
{ {
//Three-byte form. // Three-byte form.
WriteByte(0xc4); WriteByte(0xc4);
int vexByte1 = (~rexPrefix & 7) << 5; int vexByte1 = (~rexPrefix & 7) << 5;

View file

@ -90,8 +90,8 @@ namespace ARMeilleure.CodeGen.X86
private int GetCallArgsRegionSize(AllocationResult allocResult, int maxCallArgs, out int xmmSaveRegionSize) private int GetCallArgsRegionSize(AllocationResult allocResult, int maxCallArgs, out int xmmSaveRegionSize)
{ {
//We need to add 8 bytes to the total size, as the call to this // We need to add 8 bytes to the total size, as the call to this
//function already pushed 8 bytes (the return address). // function already pushed 8 bytes (the return address).
int intMask = CallingConvention.GetIntCalleeSavedRegisters() & allocResult.IntUsedRegisters; int intMask = CallingConvention.GetIntCalleeSavedRegisters() & allocResult.IntUsedRegisters;
int vecMask = CallingConvention.GetVecCalleeSavedRegisters() & allocResult.VecUsedRegisters; int vecMask = CallingConvention.GetVecCalleeSavedRegisters() & allocResult.VecUsedRegisters;
@ -103,14 +103,14 @@ namespace ARMeilleure.CodeGen.X86
if (argsCount < 0) if (argsCount < 0)
{ {
//When the function has no calls, argsCount is -1. // When the function has no calls, argsCount is -1.
//In this case, we don't need to allocate the shadow space. // In this case, we don't need to allocate the shadow space.
argsCount = 0; argsCount = 0;
} }
else if (argsCount < 4) else if (argsCount < 4)
{ {
//The ABI mandates that the space for at least 4 arguments // The ABI mandates that the space for at least 4 arguments
//is reserved on the stack (this is called shadow space). // is reserved on the stack (this is called shadow space).
argsCount = 4; argsCount = 4;
} }
@ -118,7 +118,7 @@ namespace ARMeilleure.CodeGen.X86
int callArgsAndFrameSize = frameSize + argsCount * 16; //FIXME * 16 => calc int callArgsAndFrameSize = frameSize + argsCount * 16; //FIXME * 16 => calc
//Ensure that the Stack Pointer will be aligned to 16 bytes. // Ensure that the Stack Pointer will be aligned to 16 bytes.
callArgsAndFrameSize = (callArgsAndFrameSize + 0xf) & ~0xf; callArgsAndFrameSize = (callArgsAndFrameSize + 0xf) & ~0xf;
return callArgsAndFrameSize - frameSize; return callArgsAndFrameSize - frameSize;
@ -179,7 +179,7 @@ namespace ARMeilleure.CodeGen.X86
public byte[] GetCode() public byte[] GetCode()
{ {
//Write jump relative offsets. // Write jump relative offsets.
bool modified; bool modified;
do do
@ -234,14 +234,14 @@ namespace ARMeilleure.CodeGen.X86
jump.InstSize = Assembler.GetJmpLength(offset); jump.InstSize = Assembler.GetJmpLength(offset);
} }
//The jump is relative to the next instruction, not the current one. // The jump is relative to the next instruction, not the current one.
//Since we didn't know the next instruction address when calculating // Since we didn't know the next instruction address when calculating
//the offset (as the size of the current jump instruction was not know), // the offset (as the size of the current jump instruction was not know),
//we now need to compensate the offset with the jump instruction size. // we now need to compensate the offset with the jump instruction size.
//It's also worth to note that: // It's also worth to note that:
//- This is only needed for backward jumps. // - This is only needed for backward jumps.
//- The GetJmpLength and GetJccLength also compensates the offset // - The GetJmpLength and GetJccLength also compensates the offset
//internally when computing the jump instruction size. // internally when computing the jump instruction size.
if (offset < 0) if (offset < 0)
{ {
offset -= jump.InstSize; offset -= jump.InstSize;
@ -259,7 +259,7 @@ namespace ARMeilleure.CodeGen.X86
} }
while (modified); while (modified);
//Write the code, ignoring the dummy bytes after jumps, into a new stream. // Write the code, ignoring the dummy bytes after jumps, into a new stream.
_stream.Seek(0, SeekOrigin.Begin); _stream.Seek(0, SeekOrigin.Begin);
using (MemoryStream codeStream = new MemoryStream()) using (MemoryStream codeStream = new MemoryStream())

View file

@ -632,7 +632,7 @@ namespace ARMeilleure.CodeGen.X86
Debug.Assert(dest.Type.IsInteger() || source.Kind != OperandKind.Constant); Debug.Assert(dest.Type.IsInteger() || source.Kind != OperandKind.Constant);
//Moves to the same register are useless. // Moves to the same register are useless.
if (dest.Kind == source.Kind && dest.Value == source.Value) if (dest.Kind == source.Kind && dest.Value == source.Value)
{ {
return; return;
@ -641,7 +641,7 @@ namespace ARMeilleure.CodeGen.X86
if (dest.Kind == OperandKind.Register && if (dest.Kind == OperandKind.Register &&
source.Kind == OperandKind.Constant && source.Value == 0) source.Kind == OperandKind.Constant && source.Value == 0)
{ {
//Assemble "mov reg, 0" as "xor reg, reg" as the later is more efficient. // Assemble "mov reg, 0" as "xor reg, reg" as the later is more efficient.
context.Assembler.Xor(dest, dest, OperandType.I32); context.Assembler.Xor(dest, dest, OperandType.I32);
} }
else if (dest.Type.IsInteger()) else if (dest.Type.IsInteger())
@ -668,20 +668,20 @@ namespace ARMeilleure.CodeGen.X86
int operandSize = dest.Type == OperandType.I32 ? 32 : 64; int operandSize = dest.Type == OperandType.I32 ? 32 : 64;
int operandMask = operandSize - 1; int operandMask = operandSize - 1;
//When the input operand is 0, the result is undefined, however the // When the input operand is 0, the result is undefined, however the
//ZF flag is set. We are supposed to return the operand size on that // ZF flag is set. We are supposed to return the operand size on that
//case. So, add an additional jump to handle that case, by moving the // case. So, add an additional jump to handle that case, by moving the
//operand size constant to the destination register. // operand size constant to the destination register.
context.JumpToNear(X86Condition.NotEqual); context.JumpToNear(X86Condition.NotEqual);
context.Assembler.Mov(dest, new Operand(operandSize | operandMask), OperandType.I32); context.Assembler.Mov(dest, new Operand(operandSize | operandMask), OperandType.I32);
context.JumpHere(); context.JumpHere();
//BSR returns the zero based index of the last bit set on the operand, // BSR returns the zero based index of the last bit set on the operand,
//starting from the least significant bit. However we are supposed to // starting from the least significant bit. However we are supposed to
//return the number of 0 bits on the high end. So, we invert the result // return the number of 0 bits on the high end. So, we invert the result
//of the BSR using XOR to get the correct value. // of the BSR using XOR to get the correct value.
context.Assembler.Xor(dest, new Operand(operandMask), OperandType.I32); context.Assembler.Xor(dest, new Operand(operandMask), OperandType.I32);
} }
@ -1137,7 +1137,7 @@ namespace ARMeilleure.CodeGen.X86
{ {
Debug.Assert(index < (dest.Type == OperandType.FP32 ? 4 : 2)); Debug.Assert(index < (dest.Type == OperandType.FP32 ? 4 : 2));
//Floating-point types. // Floating-point types.
if ((index >= 2 && dest.Type == OperandType.FP32) || if ((index >= 2 && dest.Type == OperandType.FP32) ||
(index == 1 && dest.Type == OperandType.FP64)) (index == 1 && dest.Type == OperandType.FP64))
{ {

View file

@ -24,8 +24,8 @@ namespace ARMeilleure.Decoders
{ {
public static Condition Invert(this Condition cond) public static Condition Invert(this Condition cond)
{ {
//Bit 0 of all conditions is basically a negation bit, so // Bit 0 of all conditions is basically a negation bit, so
//inverting this bit has the effect of inverting the condition. // inverting this bit has the effect of inverting the condition.
return (Condition)((int)cond ^ 1); return (Condition)((int)cond ^ 1);
} }
} }

View file

@ -54,7 +54,7 @@ namespace ARMeilleure.Decoders
while (workQueue.TryDequeue(out Block currBlock)) while (workQueue.TryDequeue(out Block currBlock))
{ {
//Check if the current block is inside another block. // Check if the current block is inside another block.
if (BinarySearch(blocks, currBlock.Address, out int nBlkIndex)) if (BinarySearch(blocks, currBlock.Address, out int nBlkIndex))
{ {
Block nBlock = blocks[nBlkIndex]; Block nBlock = blocks[nBlkIndex];
@ -71,7 +71,7 @@ namespace ARMeilleure.Decoders
continue; continue;
} }
//If we have a block after the current one, set the limit address. // If we have a block after the current one, set the limit address.
ulong limitAddress = ulong.MaxValue; ulong limitAddress = ulong.MaxValue;
if (nBlkIndex != blocks.Count) if (nBlkIndex != blocks.Count)
@ -94,10 +94,10 @@ namespace ARMeilleure.Decoders
if (currBlock.OpCodes.Count != 0) if (currBlock.OpCodes.Count != 0)
{ {
//Set child blocks. "Branch" is the block the branch instruction // Set child blocks. "Branch" is the block the branch instruction
//points to (when taken), "Next" is the block at the next address, // points to (when taken), "Next" is the block at the next address,
//executed when the branch is not taken. For Unconditional Branches // executed when the branch is not taken. For Unconditional Branches
//(except BL/BLR that are sub calls) or end of executable, Next is null. // (except BL/BLR that are sub calls) or end of executable, Next is null.
OpCode lastOp = currBlock.GetLastOp(); OpCode lastOp = currBlock.GetLastOp();
bool isCall = IsCall(lastOp); bool isCall = IsCall(lastOp);
@ -113,7 +113,7 @@ namespace ARMeilleure.Decoders
} }
} }
//Insert the new block on the list (sorted by address). // Insert the new block on the list (sorted by address).
if (blocks.Count != 0) if (blocks.Count != 0)
{ {
Block nBlock = blocks[nBlkIndex]; Block nBlock = blocks[nBlkIndex];
@ -211,25 +211,25 @@ namespace ARMeilleure.Decoders
return false; return false;
} }
//Note: On ARM32, most instructions have conditional execution, // Note: On ARM32, most instructions have conditional execution,
//so there's no "Always" (unconditional) branch like on ARM64. // so there's no "Always" (unconditional) branch like on ARM64.
//We need to check if the condition is "Always" instead. // We need to check if the condition is "Always" instead.
return IsAarch32Branch(op) && op.Cond >= Condition.Al; return IsAarch32Branch(op) && op.Cond >= Condition.Al;
} }
private static bool IsAarch32Branch(OpCode opCode) private static bool IsAarch32Branch(OpCode opCode)
{ {
//Note: On ARM32, most ALU operations can write to R15 (PC), // Note: On ARM32, most ALU operations can write to R15 (PC),
//so we must consider such operations as a branch in potential aswell. // so we must consider such operations as a branch in potential aswell.
if (opCode is IOpCode32Alu opAlu && opAlu.Rd == RegisterAlias.Aarch32Pc) if (opCode is IOpCode32Alu opAlu && opAlu.Rd == RegisterAlias.Aarch32Pc)
{ {
return true; return true;
} }
//Same thing for memory operations. We have the cases where PC is a target // Same thing for memory operations. We have the cases where PC is a target
//register (Rt == 15 or (mask & (1 << 15)) != 0), and cases where there is // register (Rt == 15 or (mask & (1 << 15)) != 0), and cases where there is
//a write back to PC (wback == true && Rn == 15), however the later may // a write back to PC (wback == true && Rn == 15), however the later may
//be "undefined" depending on the CPU, so compilers should not produce that. // be "undefined" depending on the CPU, so compilers should not produce that.
if (opCode is IOpCode32Mem || opCode is IOpCode32MemMult) if (opCode is IOpCode32Mem || opCode is IOpCode32MemMult)
{ {
int rt, rn; int rt, rn;
@ -243,8 +243,8 @@ namespace ARMeilleure.Decoders
wBack = opMem.WBack; wBack = opMem.WBack;
isLoad = opMem.IsLoad; isLoad = opMem.IsLoad;
//For the dual load, we also need to take into account the // For the dual load, we also need to take into account the
//case were Rt2 == 15 (PC). // case were Rt2 == 15 (PC).
if (rt == 14 && opMem.Instruction.Name == InstName.Ldrd) if (rt == 14 && opMem.Instruction.Name == InstName.Ldrd)
{ {
rt = RegisterAlias.Aarch32Pc; rt = RegisterAlias.Aarch32Pc;
@ -271,14 +271,14 @@ namespace ARMeilleure.Decoders
} }
} }
//Explicit branch instructions. // Explicit branch instructions.
return opCode is IOpCode32BImm || return opCode is IOpCode32BImm ||
opCode is IOpCode32BReg; opCode is IOpCode32BReg;
} }
private static bool IsCall(OpCode opCode) private static bool IsCall(OpCode opCode)
{ {
//TODO (CQ): ARM32 support. // TODO (CQ): ARM32 support.
return opCode.Instruction.Name == InstName.Bl || return opCode.Instruction.Name == InstName.Bl ||
opCode.Instruction.Name == InstName.Blr; opCode.Instruction.Name == InstName.Blr;
} }

View file

@ -13,8 +13,8 @@ namespace ARMeilleure.Decoders
public uint GetPc() public uint GetPc()
{ {
//Due to backwards compatibility and legacy behavior of ARMv4 CPUs pipeline, // Due to backwards compatibility and legacy behavior of ARMv4 CPUs pipeline,
//the PC actually points 2 instructions ahead. // the PC actually points 2 instructions ahead.
return (uint)Address + (uint)OpCodeSizeInBytes * 2; return (uint)Address + (uint)OpCodeSizeInBytes * 2;
} }
} }

View file

@ -8,7 +8,7 @@ namespace ARMeilleure.Decoders
{ {
uint pc = GetPc(); uint pc = GetPc();
//When the codition is never, the instruction is BLX to Thumb mode. // When the codition is never, the instruction is BLX to Thumb mode.
if (Cond != Condition.Nv) if (Cond != Condition.Nv)
{ {
pc &= ~3u; pc &= ~3u;

View file

@ -21,16 +21,16 @@ namespace ARMeilleure.Decoders
Extend64 = ((opCode >> 22) & 3) == 2; Extend64 = ((opCode >> 22) & 3) == 2;
WBack = ((opCode >> 24) & 1) == 0; WBack = ((opCode >> 24) & 1) == 0;
//The type is not valid for the Unsigned Immediate 12-bits encoding, // The type is not valid for the Unsigned Immediate 12-bits encoding,
//because the bits 11:10 are used for the larger Immediate offset. // because the bits 11:10 are used for the larger Immediate offset.
MemOp type = WBack ? (MemOp)((opCode >> 10) & 3) : MemOp.Unsigned; MemOp type = WBack ? (MemOp)((opCode >> 10) & 3) : MemOp.Unsigned;
PostIdx = type == MemOp.PostIndexed; PostIdx = type == MemOp.PostIndexed;
Unscaled = type == MemOp.Unscaled || Unscaled = type == MemOp.Unscaled ||
type == MemOp.Unprivileged; type == MemOp.Unprivileged;
//Unscaled and Unprivileged doesn't write back, // Unscaled and Unprivileged doesn't write back,
//but they do use the 9-bits Signed Immediate. // but they do use the 9-bits Signed Immediate.
if (Unscaled) if (Unscaled)
{ {
WBack = false; WBack = false;
@ -38,12 +38,12 @@ namespace ARMeilleure.Decoders
if (WBack || Unscaled) if (WBack || Unscaled)
{ {
//9-bits Signed Immediate. // 9-bits Signed Immediate.
Immediate = (opCode << 11) >> 23; Immediate = (opCode << 11) >> 23;
} }
else else
{ {
//12-bits Unsigned Immediate. // 12-bits Unsigned Immediate.
Immediate = ((opCode >> 10) & 0xfff) << Size; Immediate = ((opCode >> 10) & 0xfff) << Size;
} }
} }

View file

@ -28,14 +28,14 @@ namespace ARMeilleure.Decoders
switch (op | (modeLow << 1)) switch (op | (modeLow << 1))
{ {
case 0: case 0:
//64-bits Immediate. // 64-bits Immediate.
//Transform abcd efgh into abcd efgh abcd efgh ... // Transform abcd efgh into abcd efgh abcd efgh ...
imm = (long)((ulong)imm * 0x0101010101010101); imm = (long)((ulong)imm * 0x0101010101010101);
break; break;
case 1: case 1:
//64-bits Immediate. // 64-bits Immediate.
//Transform abcd efgh into aaaa aaaa bbbb bbbb ... // Transform abcd efgh into aaaa aaaa bbbb bbbb ...
imm = (imm & 0xf0) >> 4 | (imm & 0x0f) << 4; imm = (imm & 0xf0) >> 4 | (imm & 0x0f) << 4;
imm = (imm & 0xcc) >> 2 | (imm & 0x33) << 2; imm = (imm & 0xcc) >> 2 | (imm & 0x33) << 2;
imm = (imm & 0xaa) >> 1 | (imm & 0x55) << 1; imm = (imm & 0xaa) >> 1 | (imm & 0x55) << 1;
@ -50,29 +50,29 @@ namespace ARMeilleure.Decoders
case 2: case 2:
case 3: case 3:
//Floating point Immediate. // Floating point Immediate.
imm = DecoderHelper.DecodeImm8Float(imm, Size); imm = DecoderHelper.DecodeImm8Float(imm, Size);
break; break;
} }
} }
else if ((modeHigh & 0b110) == 0b100) else if ((modeHigh & 0b110) == 0b100)
{ {
//16-bits shifted Immediate. // 16-bits shifted Immediate.
Size = 1; imm <<= (modeHigh & 1) << 3; Size = 1; imm <<= (modeHigh & 1) << 3;
} }
else if ((modeHigh & 0b100) == 0b000) else if ((modeHigh & 0b100) == 0b000)
{ {
//32-bits shifted Immediate. // 32-bits shifted Immediate.
Size = 2; imm <<= modeHigh << 3; Size = 2; imm <<= modeHigh << 3;
} }
else if ((modeHigh & 0b111) == 0b110) else if ((modeHigh & 0b111) == 0b110)
{ {
//32-bits shifted Immediate (fill with ones). // 32-bits shifted Immediate (fill with ones).
Size = 2; imm = ShlOnes(imm, 8 << modeLow); Size = 2; imm = ShlOnes(imm, 8 << modeLow);
} }
else else
{ {
//8 bits without shift. // 8 bits without shift.
Size = 0; Size = 0;
} }

View file

@ -38,7 +38,7 @@ namespace ARMeilleure.Decoders
static OpCodeTable() static OpCodeTable()
{ {
#region "OpCode Table (AArch64)" #region "OpCode Table (AArch64)"
//Base // Base
SetA64("x0011010000xxxxx000000xxxxxxxxxx", InstName.Adc, InstEmit.Adc, typeof(OpCodeAluRs)); SetA64("x0011010000xxxxx000000xxxxxxxxxx", InstName.Adc, InstEmit.Adc, typeof(OpCodeAluRs));
SetA64("x0111010000xxxxx000000xxxxxxxxxx", InstName.Adcs, InstEmit.Adcs, typeof(OpCodeAluRs)); SetA64("x0111010000xxxxx000000xxxxxxxxxx", InstName.Adcs, InstEmit.Adcs, typeof(OpCodeAluRs));
SetA64("x00100010xxxxxxxxxxxxxxxxxxxxxxx", InstName.Add, InstEmit.Add, typeof(OpCodeAluImm)); SetA64("x00100010xxxxxxxxxxxxxxxxxxxxxxx", InstName.Add, InstEmit.Add, typeof(OpCodeAluImm));
@ -189,7 +189,7 @@ namespace ARMeilleure.Decoders
SetA64("10011011101xxxxx1xxxxxxxxxxxxxxx", InstName.Umsubl, InstEmit.Umsubl, typeof(OpCodeMul)); SetA64("10011011101xxxxx1xxxxxxxxxxxxxxx", InstName.Umsubl, InstEmit.Umsubl, typeof(OpCodeMul));
SetA64("10011011110xxxxx0xxxxxxxxxxxxxxx", InstName.Umulh, InstEmit.Umulh, typeof(OpCodeMul)); SetA64("10011011110xxxxx0xxxxxxxxxxxxxxx", InstName.Umulh, InstEmit.Umulh, typeof(OpCodeMul));
//FP & SIMD // FP & SIMD
SetA64("0101111011100000101110xxxxxxxxxx", InstName.Abs_S, InstEmit.Abs_S, typeof(OpCodeSimd)); SetA64("0101111011100000101110xxxxxxxxxx", InstName.Abs_S, InstEmit.Abs_S, typeof(OpCodeSimd));
SetA64("0>001110<<100000101110xxxxxxxxxx", InstName.Abs_V, InstEmit.Abs_V, typeof(OpCodeSimd)); SetA64("0>001110<<100000101110xxxxxxxxxx", InstName.Abs_V, InstEmit.Abs_V, typeof(OpCodeSimd));
SetA64("01011110111xxxxx100001xxxxxxxxxx", InstName.Add_S, InstEmit.Add_S, typeof(OpCodeSimdReg)); SetA64("01011110111xxxxx100001xxxxxxxxxx", InstName.Add_S, InstEmit.Add_S, typeof(OpCodeSimdReg));
@ -593,7 +593,7 @@ namespace ARMeilleure.Decoders
#endregion #endregion
#region "OpCode Table (AArch32)" #region "OpCode Table (AArch32)"
//Base // Base
SetA32("<<<<0010100xxxxxxxxxxxxxxxxxxxxx", InstName.Add, null, typeof(OpCode32AluImm)); SetA32("<<<<0010100xxxxxxxxxxxxxxxxxxxxx", InstName.Add, null, typeof(OpCode32AluImm));
SetA32("<<<<0000100xxxxxxxxxxxxxxxx0xxxx", InstName.Add, null, typeof(OpCode32AluRsImm)); SetA32("<<<<0000100xxxxxxxxxxxxxxxx0xxxx", InstName.Add, null, typeof(OpCode32AluRsImm));
SetA32("<<<<1010xxxxxxxxxxxxxxxxxxxxxxxx", InstName.B, null, typeof(OpCode32BImm)); SetA32("<<<<1010xxxxxxxxxxxxxxxxxxxxxxxx", InstName.B, null, typeof(OpCode32BImm));
@ -684,12 +684,12 @@ namespace ARMeilleure.Decoders
for (int index = 0; index < encoding.Length; index++, bit--) for (int index = 0; index < encoding.Length; index++, bit--)
{ {
//Note: < and > are used on special encodings. // Note: < and > are used on special encodings.
//The < means that we should never have ALL bits with the '<' set. // The < means that we should never have ALL bits with the '<' set.
//So, when the encoding has <<, it means that 00, 01, and 10 are valid, // So, when the encoding has <<, it means that 00, 01, and 10 are valid,
//but not 11. <<< is 000, 001, ..., 110 but NOT 111, and so on... // but not 11. <<< is 000, 001, ..., 110 but NOT 111, and so on...
//For >, the invalid value is zero. So, for >> 01, 10 and 11 are valid, // For >, the invalid value is zero. So, for >> 01, 10 and 11 are valid,
//but 00 isn't. // but 00 isn't.
char chr = encoding[index]; char chr = encoding[index];
if (chr == '1') if (chr == '1')

View file

@ -13,7 +13,7 @@ namespace ARMeilleure.Instructions
{ {
public static void EmitAdcsCCheck(ArmEmitterContext context, Operand n, Operand d) public static void EmitAdcsCCheck(ArmEmitterContext context, Operand n, Operand d)
{ {
//C = (Rd == Rn && CIn) || Rd < Rn // C = (Rd == Rn && CIn) || Rd < Rn
Operand cIn = GetFlag(PState.CFlag); Operand cIn = GetFlag(PState.CFlag);
Operand cOut = context.BitwiseAnd(context.ICompareEqual(d, n), cIn); Operand cOut = context.BitwiseAnd(context.ICompareEqual(d, n), cIn);
@ -25,13 +25,13 @@ namespace ARMeilleure.Instructions
public static void EmitAddsCCheck(ArmEmitterContext context, Operand n, Operand d) public static void EmitAddsCCheck(ArmEmitterContext context, Operand n, Operand d)
{ {
//C = Rd < Rn // C = Rd < Rn
SetFlag(context, PState.CFlag, context.ICompareLessUI(d, n)); SetFlag(context, PState.CFlag, context.ICompareLessUI(d, n));
} }
public static void EmitAddsVCheck(ArmEmitterContext context, Operand n, Operand m, Operand d) public static void EmitAddsVCheck(ArmEmitterContext context, Operand n, Operand m, Operand d)
{ {
//V = (Rd ^ Rn) & ~(Rn ^ Rm) < 0 // V = (Rd ^ Rn) & ~(Rn ^ Rm) < 0
Operand vOut = context.BitwiseExclusiveOr(d, n); Operand vOut = context.BitwiseExclusiveOr(d, n);
vOut = context.BitwiseAnd(vOut, context.BitwiseNot(context.BitwiseExclusiveOr(n, m))); vOut = context.BitwiseAnd(vOut, context.BitwiseNot(context.BitwiseExclusiveOr(n, m)));
@ -43,7 +43,7 @@ namespace ARMeilleure.Instructions
public static void EmitSbcsCCheck(ArmEmitterContext context, Operand n, Operand m) public static void EmitSbcsCCheck(ArmEmitterContext context, Operand n, Operand m)
{ {
//C = (Rn == Rm && CIn) || Rn > Rm // C = (Rn == Rm && CIn) || Rn > Rm
Operand cIn = GetFlag(PState.CFlag); Operand cIn = GetFlag(PState.CFlag);
Operand cOut = context.BitwiseAnd(context.ICompareEqual(n, m), cIn); Operand cOut = context.BitwiseAnd(context.ICompareEqual(n, m), cIn);
@ -55,13 +55,13 @@ namespace ARMeilleure.Instructions
public static void EmitSubsCCheck(ArmEmitterContext context, Operand n, Operand m) public static void EmitSubsCCheck(ArmEmitterContext context, Operand n, Operand m)
{ {
//C = Rn >= Rm // C = Rn >= Rm
SetFlag(context, PState.CFlag, context.ICompareGreaterOrEqualUI(n, m)); SetFlag(context, PState.CFlag, context.ICompareGreaterOrEqualUI(n, m));
} }
public static void EmitSubsVCheck(ArmEmitterContext context, Operand n, Operand m, Operand d) public static void EmitSubsVCheck(ArmEmitterContext context, Operand n, Operand m, Operand d)
{ {
//V = (Rd ^ Rn) & (Rn ^ Rm) < 0 // V = (Rd ^ Rn) & (Rn ^ Rm) < 0
Operand vOut = context.BitwiseExclusiveOr(d, n); Operand vOut = context.BitwiseExclusiveOr(d, n);
vOut = context.BitwiseAnd(vOut, context.BitwiseExclusiveOr(n, m)); vOut = context.BitwiseAnd(vOut, context.BitwiseExclusiveOr(n, m));
@ -98,7 +98,7 @@ namespace ARMeilleure.Instructions
{ {
switch (context.CurrOp) switch (context.CurrOp)
{ {
//ARM32. // ARM32.
case OpCode32AluImm op: case OpCode32AluImm op:
{ {
if (op.SetFlags && op.IsRotated) if (op.SetFlags && op.IsRotated)
@ -113,7 +113,7 @@ namespace ARMeilleure.Instructions
case OpCodeT16AluImm8 op: return Const(op.Immediate); case OpCodeT16AluImm8 op: return Const(op.Immediate);
//ARM64. // ARM64.
case IOpCodeAluImm op: case IOpCodeAluImm op:
{ {
if (op.GetOperandType() == OperandType.I32) if (op.GetOperandType() == OperandType.I32)
@ -159,7 +159,7 @@ namespace ARMeilleure.Instructions
return new InvalidOperationException($"Invalid OpCode type \"{opCode?.GetType().Name ?? "null"}\"."); return new InvalidOperationException($"Invalid OpCode type \"{opCode?.GetType().Name ?? "null"}\".");
} }
//ARM32 helpers. // ARM32 helpers.
private static Operand GetMShiftedByImmediate(ArmEmitterContext context, OpCode32AluRsImm op, bool setCarry) private static Operand GetMShiftedByImmediate(ArmEmitterContext context, OpCode32AluRsImm op, bool setCarry)
{ {
Operand m = GetIntA32(context, op.Rm); Operand m = GetIntA32(context, op.Rm);
@ -307,7 +307,7 @@ namespace ARMeilleure.Instructions
private static Operand GetRrxC(ArmEmitterContext context, Operand m, bool setCarry) private static Operand GetRrxC(ArmEmitterContext context, Operand m, bool setCarry)
{ {
//Rotate right by 1 with carry. // Rotate right by 1 with carry.
Operand cIn = context.Copy(GetFlag(PState.CFlag)); Operand cIn = context.Copy(GetFlag(PState.CFlag));
if (setCarry) if (setCarry)

View file

@ -20,7 +20,7 @@ namespace ARMeilleure.Instructions
if (op.Pos < op.Shift) if (op.Pos < op.Shift)
{ {
//BFI. // BFI.
int shift = op.GetBitsCount() - op.Shift; int shift = op.GetBitsCount() - op.Shift;
int width = op.Pos + 1; int width = op.Pos + 1;
@ -33,7 +33,7 @@ namespace ARMeilleure.Instructions
} }
else else
{ {
//BFXIL. // BFXIL.
int shift = op.Shift; int shift = op.Shift;
int width = op.Pos - shift + 1; int width = op.Pos - shift + 1;

View file

@ -16,7 +16,7 @@ namespace ARMeilleure.Instructions
{ {
OpCodeAluBinary op = (OpCodeAluBinary)context.CurrOp; OpCodeAluBinary op = (OpCodeAluBinary)context.CurrOp;
//If Rm == 0, Rd = 0 (division by zero). // If Rm == 0, Rd = 0 (division by zero).
Operand n = GetIntOrZR(context, op.Rn); Operand n = GetIntOrZR(context, op.Rn);
Operand m = GetIntOrZR(context, op.Rm); Operand m = GetIntOrZR(context, op.Rm);
@ -29,7 +29,7 @@ namespace ARMeilleure.Instructions
if (!unsigned) if (!unsigned)
{ {
//If Rn == INT_MIN && Rm == -1, Rd = INT_MIN (overflow). // If Rn == INT_MIN && Rm == -1, Rd = INT_MIN (overflow).
bool is32Bits = op.RegisterSize == RegisterSize.Int32; bool is32Bits = op.RegisterSize == RegisterSize.Int32;
Operand intMin = is32Bits ? Const(int.MinValue) : Const(long.MinValue); Operand intMin = is32Bits ? Const(int.MinValue) : Const(long.MinValue);

View file

@ -160,10 +160,10 @@ namespace ARMeilleure.Instructions
private static void EmitContinueOrReturnCheck(ArmEmitterContext context, Operand retVal) private static void EmitContinueOrReturnCheck(ArmEmitterContext context, Operand retVal)
{ {
//Note: The return value of the called method will be placed // Note: The return value of the called method will be placed
//at the Stack, the return value is always a Int64 with the // at the Stack, the return value is always a Int64 with the
//return address of the function. We check if the address is // return address of the function. We check if the address is
//correct, if it isn't we keep returning until we reach the dispatcher. // correct, if it isn't we keep returning until we reach the dispatcher.
ulong nextAddr = GetNextOpAddress(context.CurrOp); ulong nextAddr = GetNextOpAddress(context.CurrOp);
if (context.CurrBlock.Next != null) if (context.CurrBlock.Next != null)

View file

@ -49,10 +49,10 @@ namespace ARMeilleure.Instructions
public static int GetRegisterAlias(Aarch32Mode mode, int register) public static int GetRegisterAlias(Aarch32Mode mode, int register)
{ {
//Only registers >= 8 are banked, // Only registers >= 8 are banked,
//with registers in the range [8, 12] being // with registers in the range [8, 12] being
//banked for the FIQ mode, and registers // banked for the FIQ mode, and registers
//13 and 14 being banked for all modes. // 13 and 14 being banked for all modes.
if ((uint)register < 8) if ((uint)register < 8)
{ {
return register; return register;

View file

@ -131,7 +131,7 @@ namespace ARMeilleure.Instructions
{ {
address = context.Copy(GetIntOrSP(context, op.Rn)); address = context.Copy(GetIntOrSP(context, op.Rn));
//Pre-indexing. // Pre-indexing.
if (!op.PostIdx) if (!op.PostIdx)
{ {
address = context.Add(address, Const(op.Immediate)); address = context.Add(address, Const(op.Immediate));
@ -162,7 +162,7 @@ namespace ARMeilleure.Instructions
private static void EmitWBackIfNeeded(ArmEmitterContext context, Operand address) private static void EmitWBackIfNeeded(ArmEmitterContext context, Operand address)
{ {
//Check whenever the current OpCode has post-indexed write back, if so write it. // Check whenever the current OpCode has post-indexed write back, if so write it.
if (context.CurrOp is OpCodeMemImm op && op.WBack) if (context.CurrOp is OpCodeMemImm op && op.WBack)
{ {
if (op.PostIdx) if (op.PostIdx)

View file

@ -60,10 +60,10 @@ namespace ARMeilleure.Instructions
if (pair) if (pair)
{ {
//Exclusive loads should be atomic. For pairwise loads, we need to // Exclusive loads should be atomic. For pairwise loads, we need to
//read all the data at once. For a 32-bits pairwise load, we do a // read all the data at once. For a 32-bits pairwise load, we do a
//simple 64-bits load, for a 128-bits load, we need to call a special // simple 64-bits load, for a 128-bits load, we need to call a special
//method to read 128-bits atomically. // method to read 128-bits atomically.
if (op.Size == 2) if (op.Size == 2)
{ {
Operand value = EmitLoad(context, address, exclusive, 3); Operand value = EmitLoad(context, address, exclusive, 3);
@ -94,7 +94,7 @@ namespace ARMeilleure.Instructions
} }
else else
{ {
//8, 16, 32 or 64-bits (non-pairwise) load. // 8, 16, 32 or 64-bits (non-pairwise) load.
Operand value = EmitLoad(context, address, exclusive, op.Size); Operand value = EmitLoad(context, address, exclusive, op.Size);
SetIntOrZR(context, op.Rt, value); SetIntOrZR(context, op.Rt, value);
@ -137,7 +137,7 @@ namespace ARMeilleure.Instructions
public static void Pfrm(ArmEmitterContext context) public static void Pfrm(ArmEmitterContext context)
{ {
//Memory Prefetch, execute as no-op. // Memory Prefetch, execute as no-op.
} }
public static void Stlr(ArmEmitterContext context) => EmitStr(context, AccessType.Ordered); public static void Stlr(ArmEmitterContext context) => EmitStr(context, AccessType.Ordered);
@ -201,8 +201,8 @@ namespace ARMeilleure.Instructions
if (s != null) if (s != null)
{ {
//This is only needed for exclusive stores. The function returns 0 // This is only needed for exclusive stores. The function returns 0
//when the store is successful, and 1 otherwise. // when the store is successful, and 1 otherwise.
SetIntOrZR(context, op.Rs, s); SetIntOrZR(context, op.Rs, s);
} }
} }
@ -253,9 +253,9 @@ namespace ARMeilleure.Instructions
private static void EmitBarrier(ArmEmitterContext context) private static void EmitBarrier(ArmEmitterContext context)
{ {
//Note: This barrier is most likely not necessary, and probably // Note: This barrier is most likely not necessary, and probably
//doesn't make any difference since we need to do a ton of stuff // doesn't make any difference since we need to do a ton of stuff
//(software MMU emulation) to read or write anything anyway. // (software MMU emulation) to read or write anything anyway.
} }
} }
} }

View file

@ -83,7 +83,7 @@ namespace ARMeilleure.Instructions
if (op.Replicate) if (op.Replicate)
{ {
//Only loads uses the replicate mode. // Only loads uses the replicate mode.
Debug.Assert(isLoad, "Replicate mode is not valid for stores."); Debug.Assert(isLoad, "Replicate mode is not valid for stores.");
int elems = op.GetBytesCount() >> op.Size; int elems = op.GetBytesCount() >> op.Size;

View file

@ -14,12 +14,12 @@ namespace ARMeilleure.Instructions
public static void Hint(ArmEmitterContext context) public static void Hint(ArmEmitterContext context)
{ {
//Execute as no-op. // Execute as no-op.
} }
public static void Isb(ArmEmitterContext context) public static void Isb(ArmEmitterContext context)
{ {
//Execute as no-op. // Execute as no-op.
} }
public static void Mrs(ArmEmitterContext context) public static void Mrs(ArmEmitterContext context)
@ -65,21 +65,21 @@ namespace ARMeilleure.Instructions
public static void Nop(ArmEmitterContext context) public static void Nop(ArmEmitterContext context)
{ {
//Do nothing. // Do nothing.
} }
public static void Sys(ArmEmitterContext context) public static void Sys(ArmEmitterContext context)
{ {
//This instruction is used to do some operations on the CPU like cache invalidation, // This instruction is used to do some operations on the CPU like cache invalidation,
//address translation and the like. // address translation and the like.
//We treat it as no-op here since we don't have any cache being emulated anyway. // We treat it as no-op here since we don't have any cache being emulated anyway.
OpCodeSystem op = (OpCodeSystem)context.CurrOp; OpCodeSystem op = (OpCodeSystem)context.CurrOp;
switch (GetPackedId(op)) switch (GetPackedId(op))
{ {
case 0b11_011_0111_0100_001: case 0b11_011_0111_0100_001:
{ {
//DC ZVA // DC ZVA
Operand t = GetIntOrZR(context, op.Rt); Operand t = GetIntOrZR(context, op.Rt);
for (long offset = 0; offset < (4 << DczSizeLog2); offset += 8) for (long offset = 0; offset < (4 << DczSizeLog2); offset += 8)
@ -92,7 +92,7 @@ namespace ARMeilleure.Instructions
break; break;
} }
//No-op // No-op
case 0b11_011_0111_1110_001: //DC CIVAC case 0b11_011_0111_1110_001: //DC CIVAC
break; break;
} }

View file

@ -2,7 +2,7 @@ namespace ARMeilleure.Instructions
{ {
enum InstName enum InstName
{ {
//Base (AArch64) // Base (AArch64)
Adc, Adc,
Adcs, Adcs,
Add, Add,
@ -102,7 +102,7 @@ namespace ARMeilleure.Instructions
Umulh, Umulh,
Und, Und,
//FP & SIMD (AArch64) // FP & SIMD (AArch64)
Abs_S, Abs_S,
Abs_V, Abs_V,
Add_S, Add_S,
@ -440,7 +440,7 @@ namespace ARMeilleure.Instructions
Zip1_V, Zip1_V,
Zip2_V, Zip2_V,
//Base (AArch32) // Base (AArch32)
Blx, Blx,
Bx, Bx,
Cmp, Cmp,

View file

@ -57,9 +57,9 @@ namespace ARMeilleure.IntermediateRepresentation
public void Append(Node node) public void Append(Node node)
{ {
//If the branch block is not null, then the list of operations // If the branch block is not null, then the list of operations
//should end with a branch instruction. We insert the new operation // should end with a branch instruction. We insert the new operation
//before this branch. // before this branch.
if (_branch != null || (Operations.Last != null && IsLeafBlock())) if (_branch != null || (Operations.Last != null && IsLeafBlock()))
{ {
Operations.AddBefore(Operations.Last, node); Operations.AddBefore(Operations.Last, node);

View file

@ -96,9 +96,9 @@ namespace ARMeilleure.Memory
IntPtr[] addresses, IntPtr[] addresses,
out ulong count) out ulong count)
{ {
//This is only supported on windows, but returning // This is only supported on windows, but returning
//false (failed) is also valid for platforms without // false (failed) is also valid for platforms without
//write tracking support on the OS. // write tracking support on the OS.
if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows)) if (RuntimeInformation.IsOSPlatform(OSPlatform.Windows))
{ {
return MemoryManagementWindows.GetModifiedPages(address, size, addresses, out count); return MemoryManagementWindows.GetModifiedPages(address, size, addresses, out count);

View file

@ -46,12 +46,12 @@ namespace ARMeilleure.Memory
AddressSpaceBits = addressSpaceBits; AddressSpaceBits = addressSpaceBits;
AddressSpaceSize = 1L << addressSpaceBits; AddressSpaceSize = 1L << addressSpaceBits;
//When flat page table is requested, we use a single // When flat page table is requested, we use a single
//array for the mappings of the entire address space. // array for the mappings of the entire address space.
//This has better performance, but also high memory usage. // This has better performance, but also high memory usage.
//The multi level page table uses 9 bits per level, so // The multi level page table uses 9 bits per level, so
//the memory usage is lower, but the performance is also // the memory usage is lower, but the performance is also
//lower, since each address translation requires multiple reads. // lower, since each address translation requires multiple reads.
if (useFlatPageTable) if (useFlatPageTable)
{ {
PtLevelBits = addressSpaceBits - PageBits; PtLevelBits = addressSpaceBits - PageBits;
@ -233,13 +233,13 @@ namespace ARMeilleure.Memory
if (nextPtr == IntPtr.Zero) if (nextPtr == IntPtr.Zero)
{ {
//Entry does not yet exist, allocate a new one. // Entry does not yet exist, allocate a new one.
IntPtr newPtr = Allocate((ulong)(PtLevelSize * IntPtr.Size)); IntPtr newPtr = Allocate((ulong)(PtLevelSize * IntPtr.Size));
//Try to swap the current pointer (should be zero), with the allocated one. // Try to swap the current pointer (should be zero), with the allocated one.
nextPtr = Interlocked.CompareExchange(ref *ptePtr, newPtr, IntPtr.Zero); nextPtr = Interlocked.CompareExchange(ref *ptePtr, newPtr, IntPtr.Zero);
//If the old pointer is not null, then another thread already has set it. // If the old pointer is not null, then another thread already has set it.
if (nextPtr != IntPtr.Zero) if (nextPtr != IntPtr.Zero)
{ {
Free(newPtr); Free(newPtr);
@ -501,7 +501,7 @@ namespace ARMeilleure.Memory
private void AbortWithAlignmentFault(long position) private void AbortWithAlignmentFault(long position)
{ {
//TODO: Abort mode and exception support on the CPU. // TODO: Abort mode and exception support on the CPU.
throw new InvalidOperationException($"Tried to compare exchange a misaligned address 0x{position:X16}."); throw new InvalidOperationException($"Tried to compare exchange a misaligned address 0x{position:X16}.");
} }
@ -614,7 +614,7 @@ namespace ARMeilleure.Memory
public void ReadBytes(long position, byte[] data, int startIndex, int size) public void ReadBytes(long position, byte[] data, int startIndex, int size)
{ {
//Note: This will be moved later. // Note: This will be moved later.
long endAddr = position + size; long endAddr = position + size;
if ((ulong)size > int.MaxValue) if ((ulong)size > int.MaxValue)
@ -748,7 +748,7 @@ namespace ARMeilleure.Memory
public void WriteBytes(long position, byte[] data, int startIndex, int size) public void WriteBytes(long position, byte[] data, int startIndex, int size)
{ {
//Note: This will be moved later. // Note: This will be moved later.
long endAddr = position + size; long endAddr = position + size;
if ((ulong)endAddr < (ulong)position) if ((ulong)endAddr < (ulong)position)
@ -778,7 +778,7 @@ namespace ARMeilleure.Memory
public void CopyBytes(long src, long dst, long size) public void CopyBytes(long src, long dst, long size)
{ {
//Note: This will be moved later. // Note: This will be moved later.
if (IsContiguous(src, size) && if (IsContiguous(src, size) &&
IsContiguous(dst, size)) IsContiguous(dst, size))
{ {

View file

@ -84,7 +84,7 @@ namespace ARMeilleure.Translation
if (visited.Count < blocks.Count) if (visited.Count < blocks.Count)
{ {
//Remove unreachable blocks and renumber. // Remove unreachable blocks and renumber.
int index = 0; int index = 0;
for (LinkedListNode<BasicBlock> node = blocks.First; node != null;) for (LinkedListNode<BasicBlock> node = blocks.First; node != null;)
@ -129,14 +129,14 @@ namespace ARMeilleure.Translation
throw new ArgumentException("Predecessor and successor are not connected."); throw new ArgumentException("Predecessor and successor are not connected.");
} }
//Insert the new block on the list of blocks. // Insert the new block on the list of blocks.
BasicBlock succPrev = successor.Node.Previous?.Value; BasicBlock succPrev = successor.Node.Previous?.Value;
if (succPrev != null && succPrev != predecessor && succPrev.Next == successor) if (succPrev != null && succPrev != predecessor && succPrev.Next == successor)
{ {
//Can't insert after the predecessor or before the successor. // Can't insert after the predecessor or before the successor.
//Here, we insert it before the successor by also spliting another // Here, we insert it before the successor by also spliting another
//edge (the one between the block before "successor" and "successor"). // edge (the one between the block before "successor" and "successor").
BasicBlock splitBlock2 = new BasicBlock(splitBlock.Index + 1); BasicBlock splitBlock2 = new BasicBlock(splitBlock.Index + 1);
succPrev.Next = splitBlock2; succPrev.Next = splitBlock2;

View file

@ -5,8 +5,8 @@ namespace ARMeilleure.Translation
{ {
static class Dominance static class Dominance
{ {
//Those methods are an implementation of the algorithms on "A Simple, Fast Dominance Algorithm". // Those methods are an implementation of the algorithms on "A Simple, Fast Dominance Algorithm".
//https://www.cs.rice.edu/~keith/EMBED/dom.pdf // https://www.cs.rice.edu/~keith/EMBED/dom.pdf
public static void FindDominators(ControlFlowGraph cfg) public static void FindDominators(ControlFlowGraph cfg)
{ {
BasicBlock Intersect(BasicBlock block1, BasicBlock block2) BasicBlock Intersect(BasicBlock block1, BasicBlock block2)

View file

@ -80,7 +80,7 @@ namespace ARMeilleure.Translation
public Operand Call(Delegate func, params Operand[] callArgs) public Operand Call(Delegate func, params Operand[] callArgs)
{ {
//Add the delegate to the cache to ensure it will not be garbage collected. // Add the delegate to the cache to ensure it will not be garbage collected.
func = DelegateCache.GetOrAdd(func); func = DelegateCache.GetOrAdd(func);
IntPtr ptr = Marshal.GetFunctionPointerForDelegate<Delegate>(func); IntPtr ptr = Marshal.GetFunctionPointerForDelegate<Delegate>(func);

View file

@ -93,7 +93,7 @@ namespace ARMeilleure.Translation
if (!JitCache.TryFind(offset, out JitCacheEntry funcEntry)) if (!JitCache.TryFind(offset, out JitCacheEntry funcEntry))
{ {
//Not found. // Not found.
return null; return null;
} }

View file

@ -35,9 +35,9 @@ namespace ARMeilleure.Translation
private Operand GetUnique(Operand operand) private Operand GetUnique(Operand operand)
{ {
//Operand is supposed to be a value or reference type based on kind. // Operand is supposed to be a value or reference type based on kind.
//We differentiate local variables by reference, but everything else // We differentiate local variables by reference, but everything else
//is supposed to be considered the same, if "Value" is the same. // is supposed to be considered the same, if "Value" is the same.
if (operand.Kind != OperandKind.LocalVariable) if (operand.Kind != OperandKind.LocalVariable)
{ {
if (_uniqueOperands.TryGetValue(operand.Value, out Operand prevOperand)) if (_uniqueOperands.TryGetValue(operand.Value, out Operand prevOperand))

View file

@ -70,7 +70,7 @@ namespace ARMeilleure.Translation
public static void RunPass(ControlFlowGraph cfg) public static void RunPass(ControlFlowGraph cfg)
{ {
//Computer local register inputs and outputs used inside blocks. // Computer local register inputs and outputs used inside blocks.
RegisterMask[] localInputs = new RegisterMask[cfg.Blocks.Count]; RegisterMask[] localInputs = new RegisterMask[cfg.Blocks.Count];
RegisterMask[] localOutputs = new RegisterMask[cfg.Blocks.Count]; RegisterMask[] localOutputs = new RegisterMask[cfg.Blocks.Count];
@ -101,7 +101,7 @@ namespace ARMeilleure.Translation
} }
} }
//Compute global register inputs and outputs used across blocks. // Compute global register inputs and outputs used across blocks.
RegisterMask[] globalCmnOutputs = new RegisterMask[cfg.Blocks.Count]; RegisterMask[] globalCmnOutputs = new RegisterMask[cfg.Blocks.Count];
RegisterMask[] globalInputs = new RegisterMask[cfg.Blocks.Count]; RegisterMask[] globalInputs = new RegisterMask[cfg.Blocks.Count];
@ -115,7 +115,7 @@ namespace ARMeilleure.Translation
{ {
modified = false; modified = false;
//Compute register outputs. // Compute register outputs.
for (int index = cfg.PostOrderBlocks.Length - 1; index >= 0; index--) for (int index = cfg.PostOrderBlocks.Length - 1; index >= 0; index--)
{ {
BasicBlock block = cfg.PostOrderBlocks[index]; BasicBlock block = cfg.PostOrderBlocks[index];
@ -162,7 +162,7 @@ namespace ARMeilleure.Translation
} }
} }
//Compute register inputs. // Compute register inputs.
for (int index = 0; index < cfg.PostOrderBlocks.Length; index++) for (int index = 0; index < cfg.PostOrderBlocks.Length; index++)
{ {
BasicBlock block = cfg.PostOrderBlocks[index]; BasicBlock block = cfg.PostOrderBlocks[index];
@ -191,7 +191,7 @@ namespace ARMeilleure.Translation
} }
while (modified); while (modified);
//Insert load and store context instructions where needed. // Insert load and store context instructions where needed.
foreach (BasicBlock block in cfg.Blocks) foreach (BasicBlock block in cfg.Blocks)
{ {
bool hasContextLoad = HasContextLoad(block); bool hasContextLoad = HasContextLoad(block);
@ -201,8 +201,8 @@ namespace ARMeilleure.Translation
block.Operations.RemoveFirst(); block.Operations.RemoveFirst();
} }
//The only block without any predecessor should be the entry block. // The only block without any predecessor should be the entry block.
//It always needs a context load as it is the first block to run. // It always needs a context load as it is the first block to run.
if (block.Predecessors.Count == 0 || hasContextLoad) if (block.Predecessors.Count == 0 || hasContextLoad)
{ {
LoadLocals(block, globalInputs[block.Index].VecMask, RegisterType.Vector); LoadLocals(block, globalInputs[block.Index].VecMask, RegisterType.Vector);
@ -396,7 +396,7 @@ namespace ARMeilleure.Translation
private static long ClearCallerSavedIntRegs(long mask) private static long ClearCallerSavedIntRegs(long mask)
{ {
//TODO: ARM32 support. // TODO: ARM32 support.
mask &= ~(CallerSavedIntRegistersMask | PStateNzcvFlagsMask); mask &= ~(CallerSavedIntRegistersMask | PStateNzcvFlagsMask);
return mask; return mask;
@ -404,7 +404,7 @@ namespace ARMeilleure.Translation
private static long ClearCallerSavedVecRegs(long mask) private static long ClearCallerSavedVecRegs(long mask)
{ {
//TODO: ARM32 support. // TODO: ARM32 support.
mask &= ~CallerSavedVecRegistersMask; mask &= ~CallerSavedVecRegistersMask;
return mask; return mask;

View file

@ -54,7 +54,7 @@ namespace ARMeilleure.Translation
Queue<BasicBlock> dfPhiBlocks = new Queue<BasicBlock>(); Queue<BasicBlock> dfPhiBlocks = new Queue<BasicBlock>();
//First pass, get all defs and locals uses. // First pass, get all defs and locals uses.
foreach (BasicBlock block in cfg.Blocks) foreach (BasicBlock block in cfg.Blocks)
{ {
Operand[] localDefs = new Operand[RegisterConsts.TotalCount]; Operand[] localDefs = new Operand[RegisterConsts.TotalCount];
@ -125,7 +125,7 @@ namespace ARMeilleure.Translation
} }
} }
//Second pass, rename variables with definitions on different blocks. // Second pass, rename variables with definitions on different blocks.
foreach (BasicBlock block in cfg.Blocks) foreach (BasicBlock block in cfg.Blocks)
{ {
Operand[] localDefs = new Operand[RegisterConsts.TotalCount]; Operand[] localDefs = new Operand[RegisterConsts.TotalCount];
@ -213,10 +213,10 @@ namespace ARMeilleure.Translation
private static Operand InsertPhi(DefMap[] globalDefs, BasicBlock block, Operand operand) private static Operand InsertPhi(DefMap[] globalDefs, BasicBlock block, Operand operand)
{ {
//This block has a Phi that has not been materialized yet, but that // This block has a Phi that has not been materialized yet, but that
//would define a new version of the variable we're looking for. We need // would define a new version of the variable we're looking for. We need
//to materialize the Phi, add all the block/operand pairs into the Phi, and // to materialize the Phi, add all the block/operand pairs into the Phi, and
//then use the definition from that Phi. // then use the definition from that Phi.
Operand local = Local(operand.Type); Operand local = Local(operand.Type);
PhiNode phi = new PhiNode(local, block.Predecessors.Count); PhiNode phi = new PhiNode(local, block.Predecessors.Count);

View file

@ -145,11 +145,11 @@ namespace ARMeilleure.Translation
{ {
context.MarkLabel(lblPredicateSkip); context.MarkLabel(lblPredicateSkip);
//If this is the last op on the block, and there's no "next" block // If this is the last op on the block, and there's no "next" block
//after this one, then we have to return right now, with the address // after this one, then we have to return right now, with the address
//of the next instruction to be executed (in the case that the condition // of the next instruction to be executed (in the case that the condition
//is false, and the branch was not taken, as all basic blocks should end // is false, and the branch was not taken, as all basic blocks should end
//with some kind of branch). // with some kind of branch).
if (isLastOp && block.Next == null) if (isLastOp && block.Next == null)
{ {
context.Return(Const(opCode.Address + (ulong)opCode.OpCodeSizeInBytes)); context.Return(Const(opCode.Address + (ulong)opCode.OpCodeSizeInBytes));