Common/MemArenaWin: Rewrite LazyMemoryRegion to manually map memory blocks into the large memory region as needed.

Internal details: The large region is split into individual same-sized blocks of memory. On creation, we allocate a single block of memory that will always remain zero, and map that into the entire memory region. Then, the first time any of these blocks is written to, we swap the mapped zero block out with a newly allocated block of memory. On clear, we swap back to the zero block and deallocate the data blocks. That way we only actually allocate one zero block as well as a handful of real data blocks where the JitCache actually writes to.
This commit is contained in:
Admiral H. Curtiss 2023-11-28 21:17:12 +01:00
commit 3364d571cc
No known key found for this signature in database
GPG key ID: F051B4C4044F33FB
3 changed files with 184 additions and 7 deletions

View file

@ -121,9 +121,14 @@ void JitBaseBlockCache::FinalizeBlock(JitBlock& block, bool block_link,
{
size_t index = FastLookupIndexForAddress(block.effectiveAddress, block.feature_flags);
if (m_entry_points_ptr)
{
m_entry_points_arena.EnsureMemoryPageWritable(index * sizeof(u8*));
m_entry_points_ptr[index] = block.normalEntry;
}
else
{
m_fast_block_map_fallback[index] = █
}
block.fast_block_map_index = index;
block.physical_addresses = physical_addresses;
@ -485,9 +490,14 @@ JitBlock* JitBaseBlockCache::MoveBlockIntoFastCache(u32 addr, CPUEmuFeatureFlags
// And create a new one
size_t index = FastLookupIndexForAddress(addr, feature_flags);
if (m_entry_points_ptr)
{
m_entry_points_arena.EnsureMemoryPageWritable(index * sizeof(u8*));
m_entry_points_ptr[index] = block->normalEntry;
}
else
{
m_fast_block_map_fallback[index] = block;
}
block->fast_block_map_index = index;
return block;