PR feedback and potential fixes

This commit is contained in:
Gabriel A 2024-02-27 00:01:04 -03:00
commit 8126ab074d
2 changed files with 14 additions and 11 deletions

View file

@ -325,9 +325,9 @@ namespace Ryujinx.Cpu.Jit
LateMap(); LateMap();
} }
updatePtCallback(va, _baseMemory.GetPointerForProtection(va - Address, size, protection), size); ulong lastPageAddress = EndAddress - GuestPageSize;
if (va + size > EndAddress - GuestPageSize) if (va + size > lastPageAddress)
{ {
// Protections at the last page also applies to the bridge, if we have one. // Protections at the last page also applies to the bridge, if we have one.
// (This is because last page access is always done on the bridge, not on our base mapping, // (This is because last page access is always done on the bridge, not on our base mapping,
@ -337,11 +337,15 @@ namespace Ryujinx.Cpu.Jit
{ {
IntPtr ptPtr = _baseMemory.GetPointerForProtection(Size, _hostPageSize, protection); IntPtr ptPtr = _baseMemory.GetPointerForProtection(Size, _hostPageSize, protection);
updatePtCallback(EndAddress - GuestPageSize, ptPtr + (IntPtr)(_hostPageSize - GuestPageSize), GuestPageSize); updatePtCallback(lastPageAddress, ptPtr + (IntPtr)(_hostPageSize - GuestPageSize), GuestPageSize);
} }
_lastPageProtection = protection; _lastPageProtection = protection;
size = lastPageAddress - va;
} }
updatePtCallback(va, _baseMemory.GetPointerForProtection(va - Address, size, protection), size);
} }
public IntPtr GetPointer(ulong va, ulong size) public IntPtr GetPointer(ulong va, ulong size)
@ -349,9 +353,9 @@ namespace Ryujinx.Cpu.Jit
Debug.Assert(va >= Address); Debug.Assert(va >= Address);
Debug.Assert(va + size <= EndAddress); Debug.Assert(va + size <= EndAddress);
if (va >= EndAddress - _hostPageSize && _hasBridgeAtEnd) if (va >= EndAddress - GuestPageSize && _hasBridgeAtEnd)
{ {
return _baseMemory.GetPointer(Size + va - (EndAddress - _hostPageSize), size); return _baseMemory.GetPointer(Size + (_hostPageSize - GuestPageSize), size);
} }
return _baseMemory.GetPointer(va - Address, size); return _baseMemory.GetPointer(va - Address, size);
@ -383,7 +387,6 @@ namespace Ryujinx.Cpu.Jit
_baseMemory.Reprotect(Size, _hostPageSize, _lastPageProtection, false); _baseMemory.Reprotect(Size, _hostPageSize, _lastPageProtection, false);
ptPtr = _baseMemory.GetPointer(Size, _hostPageSize); ptPtr = _baseMemory.GetPointer(Size, _hostPageSize);
;
} }
updatePtCallback(EndAddress - GuestPageSize, ptPtr + (IntPtr)(_hostPageSize - GuestPageSize), GuestPageSize); updatePtCallback(EndAddress - GuestPageSize, ptPtr + (IntPtr)(_hostPageSize - GuestPageSize), GuestPageSize);
@ -554,7 +557,7 @@ namespace Ryujinx.Cpu.Jit
switch (type) switch (type)
{ {
case MappingType.None: case MappingType.None:
ulong alignment = MemoryBlock.GetPageSize(); ulong alignment = _hostPageSize;
bool unmappedBefore = map.Predecessor == null || bool unmappedBefore = map.Predecessor == null ||
(map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment)); (map.Predecessor.Type == MappingType.None && map.Predecessor.Address <= BitUtils.AlignDown(va, alignment));
@ -611,7 +614,7 @@ namespace Ryujinx.Cpu.Jit
{ {
ulong endAddress = va + size; ulong endAddress = va + size;
ulong alignment = MemoryBlock.GetPageSize(); ulong alignment = _hostPageSize;
// Expand the range outwards based on page size to ensure that at least the requested region is mapped. // Expand the range outwards based on page size to ensure that at least the requested region is mapped.
ulong vaAligned = BitUtils.AlignDown(va, alignment); ulong vaAligned = BitUtils.AlignDown(va, alignment);
@ -635,7 +638,7 @@ namespace Ryujinx.Cpu.Jit
map = newMap; map = newMap;
} }
map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, MemoryBlock.GetPageSize())); map.Map(_baseMemory, Address, _privateMemoryAllocator.Allocate(map.Size, _hostPageSize));
} }
if (map.EndAddress >= endAddressAligned) if (map.EndAddress >= endAddressAligned)
@ -649,7 +652,7 @@ namespace Ryujinx.Cpu.Jit
{ {
ulong endAddress = va + size; ulong endAddress = va + size;
ulong alignment = MemoryBlock.GetPageSize(); ulong alignment = _hostPageSize;
// If the adjacent mappings are unmapped, expand the range outwards, // If the adjacent mappings are unmapped, expand the range outwards,
// otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use. // otherwise shrink it inwards. We must ensure we won't unmap pages that might still be in use.

View file

@ -182,7 +182,7 @@ namespace Ryujinx.Cpu.Jit
if (address < _nativePageTable.Size - _hostPageSize) if (address < _nativePageTable.Size - _hostPageSize)
{ {
// Some prefetch instructions do not cause faults with invalid addresses. // Some prefetch instructions do not cause faults with invalid addresses.
// Retry if we are hiting a case where the page table is unmapped, the next // Retry if we are hitting a case where the page table is unmapped, the next
// run will execute the actual instruction. // run will execute the actual instruction.
// The address loaded from the page table will be invalid, and it should hit the else case // The address loaded from the page table will be invalid, and it should hit the else case
// if the instruction faults on unmapped or protected memory. // if the instruction faults on unmapped or protected memory.