Optimized memory modified check

This was initially in some cases more expensive than plainly sending the data. Now it should have way better performance.
This commit is contained in:
gdkchan 2018-12-11 02:29:15 +01:00 committed by Ryada Productions
parent 36e8e074c9
commit ee5699838b
3 changed files with 71 additions and 145 deletions

View file

@ -17,7 +17,7 @@ namespace ChocolArm64.Memory
{
private const int PtLvl0Bits = 13;
private const int PtLvl1Bits = 14;
private const int PtPageBits = 12;
public const int PtPageBits = 12;
private const int PtLvl0Size = 1 << PtLvl0Bits;
private const int PtLvl1Size = 1 << PtLvl1Bits;
@ -55,6 +55,8 @@ namespace ChocolArm64.Memory
public event EventHandler<InvalidAccessEventArgs> InvalidAccess;
public event EventHandler<InvalidAccessEventArgs> ObservedAccess;
public MemoryManager(IntPtr ram)
{
_monitors = new Dictionary<int, ArmMonitor>();
@ -728,14 +730,18 @@ Unmapped:
{
long key = position >> PtPageBits;
InvalidAccessEventArgs e = new InvalidAccessEventArgs(position);
if (_observedPages.TryGetValue(key, out IntPtr ptr))
{
SetPtEntry(position, (byte*)ptr);
ObservedAccess?.Invoke(this, e);
return (byte*)ptr + (position & PageMask);
}
InvalidAccess?.Invoke(this, new InvalidAccessEventArgs(position));
InvalidAccess?.Invoke(this, e);
throw new VmmPageFaultException(position);
}
@ -784,53 +790,20 @@ Unmapped:
_pageTable[l0][l1] = ptr;
}
public (bool[], int) IsRegionModified(long position, long size)
public void StartObservingRegion(long position, long size)
{
long endPosition = (position + size + PageMask) & ~PageMask;
position &= ~PageMask;
size = endPosition - position;
bool[] modified = new bool[size >> PtPageBits];
int count = 0;
lock (_observedPages)
while ((ulong)position < (ulong)endPosition)
{
for (int page = 0; page < modified.Length; page++)
{
byte* ptr = Translate(position);
_observedPages[position >> PtPageBits] = (IntPtr)Translate(position);
if (_observedPages.TryAdd(position >> PtPageBits, (IntPtr)ptr))
{
modified[page] = true;
SetPtEntry(position, null);
count++;
}
else
{
long l0 = (position >> PtLvl0Bit) & PtLvl0Mask;
long l1 = (position >> PtLvl1Bit) & PtLvl1Mask;
byte** lvl1 = _pageTable[l0];
if (lvl1 != null)
{
if (modified[page] = lvl1[l1] != null)
{
count++;
}
}
}
SetPtEntry(position, null);
position += PageSize;
}
position += PageSize;
}
return (modified, count);
}
public void StopObservingRegion(long position, long size)

View file

@ -36,7 +36,7 @@ namespace Ryujinx.Graphics.Memory
{
this.Memory = Memory;
Cache = new NvGpuVmmCache();
Cache = new NvGpuVmmCache(Memory);
PageTable = new long[PTLvl0Size][];
}
@ -262,7 +262,7 @@ namespace Ryujinx.Graphics.Memory
public bool IsRegionModified(long PA, long Size, NvGpuBufferType BufferType)
{
return Cache.IsRegionModified(Memory, BufferType, PA, Size);
return Cache.IsRegionModified(PA, Size, BufferType);
}
public bool TryGetHostAddress(long Position, long Size, out IntPtr Ptr)

View file

@ -1,130 +1,83 @@
using ChocolArm64.Events;
using ChocolArm64.Memory;
using System;
using System.Collections.Concurrent;
namespace Ryujinx.Graphics.Memory
{
class NvGpuVmmCache
{
private struct CachedResource
private const int PageBits = MemoryManager.PtPageBits;
private const long PageSize = MemoryManager.PageSize;
private const long PageMask = MemoryManager.PageMask;
private ConcurrentDictionary<long, int>[] CachedPages;
private MemoryManager _memory;
public NvGpuVmmCache(MemoryManager memory)
{
public long Key;
public int Mask;
_memory = memory;
public CachedResource(long Key, int Mask)
{
this.Key = Key;
this.Mask = Mask;
}
_memory.ObservedAccess += MemoryAccessHandler;
public override int GetHashCode()
{
return (int)(Key * 23 + Mask);
}
public override bool Equals(object obj)
{
return obj is CachedResource Cached && Equals(Cached);
}
public bool Equals(CachedResource other)
{
return Key == other.Key && Mask == other.Mask;
}
CachedPages = new ConcurrentDictionary<long, int>[1 << 20];
}
private ValueRangeSet<CachedResource> CachedRanges;
public NvGpuVmmCache()
private void MemoryAccessHandler(object sender, InvalidAccessEventArgs e)
{
CachedRanges = new ValueRangeSet<CachedResource>();
long pa = _memory.GetPhysicalAddress(e.Position);
CachedPages[pa >> PageBits]?.Clear();
}
public bool IsRegionModified(MemoryManager Memory, NvGpuBufferType BufferType, long Start, long Size)
public bool IsRegionModified(long position, long size, NvGpuBufferType bufferType)
{
(bool[] Modified, long ModifiedCount) = Memory.IsRegionModified(Start, Size);
long pa = _memory.GetPhysicalAddress(position);
//Remove all modified ranges.
int Index = 0;
long addr = pa;
long Position = Start & ~NvGpuVmm.PageMask;
long endAddr = (addr + size + PageMask) & ~PageMask;
while (ModifiedCount > 0)
int newBuffMask = 1 << (int)bufferType;
_memory.StartObservingRegion(position, size);
long cachedPagesCount = 0;
while (addr < endAddr)
{
if (Modified[Index++])
{
CachedRanges.Remove(new ValueRange<CachedResource>(Position, Position + NvGpuVmm.PageSize));
long page = addr >> PageBits;
ModifiedCount--;
ConcurrentDictionary<long, int> dictionary = CachedPages[page];
if (dictionary == null)
{
dictionary = new ConcurrentDictionary<long, int>();
CachedPages[page] = dictionary;
}
Position += NvGpuVmm.PageSize;
if (dictionary.TryGetValue(pa, out int currBuffMask))
{
if ((currBuffMask & newBuffMask) != 0)
{
cachedPagesCount++;
}
else
{
dictionary[pa] |= newBuffMask;
}
}
else
{
dictionary[pa] = newBuffMask;
}
addr += PageSize;
}
//Mask has the bit set for the current resource type.
//If the region is not yet present on the list, then a new ValueRange
//is directly added with the current resource type as the only bit set.
//Otherwise, it just sets the bit for this new resource type on the current mask.
//The physical address of the resource is used as key, those keys are used to keep
//track of resources that are already on the cache. A resource may be inside another
//resource, and in this case we should return true if the "sub-resource" was not
//yet cached.
int Mask = 1 << (int)BufferType;
CachedResource NewCachedValue = new CachedResource(Start, Mask);
ValueRange<CachedResource> NewCached = new ValueRange<CachedResource>(Start, Start + Size);
ValueRange<CachedResource>[] Ranges = CachedRanges.GetAllIntersections(NewCached);
bool IsKeyCached = Ranges.Length > 0 && Ranges[0].Value.Key == Start;
long LastEnd = NewCached.Start;
long Coverage = 0;
for (Index = 0; Index < Ranges.Length; Index++)
{
ValueRange<CachedResource> Current = Ranges[Index];
CachedResource Cached = Current.Value;
long RgStart = Math.Max(Current.Start, NewCached.Start);
long RgEnd = Math.Min(Current.End, NewCached.End);
if ((Cached.Mask & Mask) != 0)
{
Coverage += RgEnd - RgStart;
}
//Highest key value has priority, this prevents larger resources
//for completely invalidating smaller ones on the cache. For example,
//consider that a resource in the range [100, 200) was added, and then
//another one in the range [50, 200). We prevent the new resource from
//completely replacing the old one by spliting it like this:
//New resource key is added at [50, 100), old key is still present at [100, 200).
if (Cached.Key < Start)
{
Cached.Key = Start;
}
Cached.Mask |= Mask;
CachedRanges.Add(new ValueRange<CachedResource>(RgStart, RgEnd, Cached));
if (RgStart > LastEnd)
{
CachedRanges.Add(new ValueRange<CachedResource>(LastEnd, RgStart, NewCachedValue));
}
LastEnd = RgEnd;
}
if (LastEnd < NewCached.End)
{
CachedRanges.Add(new ValueRange<CachedResource>(LastEnd, NewCached.End, NewCachedValue));
}
return !IsKeyCached || Coverage != Size;
return cachedPagesCount != (endAddr - pa + PageMask) >> PageBits;
}
}
}