Core: Don't create page table mappings before R/C bits are set

This gets rid of the hack of setting the R and C bits pessimistically,
more or less reversing the performance regression in Rogue Squadron 3.
This commit is contained in:
JosJuice 2025-08-02 20:52:48 +02:00
commit 6aaa4a37ce
9 changed files with 214 additions and 90 deletions

View file

@ -102,10 +102,22 @@ public:
/// from.
/// @param size Size of the region to map.
/// @param base Address within the memory region from ReserveMemoryRegion() where to map it.
/// @param writeable Whether the region should be both readable and writeable, or just readable.
///
/// @return The address we actually ended up mapping, which should be the given 'base'.
///
void* MapInMemoryRegion(s64 offset, size_t size, void* base);
void* MapInMemoryRegion(s64 offset, size_t size, void* base, bool writeable);
///
/// Changes whether a section mapped by MapInMemoryRegion is writeable.
///
/// @param view The address returned by MapInMemoryRegion.
/// @param size The size passed to MapInMemoryRegion.
/// @param writeable Whether the region should be both readable and writeable, or just readable.
///
/// @return Whether the operation succeeded.
///
bool ChangeMappingProtection(void* view, size_t size, bool writeable);
///
/// Unmap a memory region previously mapped with MapInMemoryRegion().

View file

@ -123,9 +123,13 @@ void MemArena::ReleaseMemoryRegion()
}
}
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base, bool writeable)
{
void* retval = mmap(base, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, m_shm_fd, offset);
int prot = PROT_READ;
if (writeable)
prot |= PROT_WRITE;
void* retval = mmap(base, size, prot, MAP_SHARED | MAP_FIXED, m_shm_fd, offset);
if (retval == MAP_FAILED)
{
NOTICE_LOG_FMT(MEMMAP, "mmap failed");
@ -137,6 +141,18 @@ void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
}
}
bool MemArena::ChangeMappingProtection(void* view, size_t size, bool writeable)
{
int prot = PROT_READ;
if (writeable)
prot |= PROT_WRITE;
int retval = mprotect(view, size, prot);
if (retval != 0)
NOTICE_LOG_FMT(MEMMAP, "mprotect failed");
return retval == 0;
}
void MemArena::UnmapFromMemoryRegion(void* view, size_t size)
{
void* retval = mmap(view, size, PROT_NONE, MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);

View file

@ -123,7 +123,7 @@ void MemArena::ReleaseMemoryRegion()
m_region_size = 0;
}
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base, bool writeable)
{
if (m_shm_address == 0)
{
@ -132,11 +132,13 @@ void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
}
vm_address_t address = reinterpret_cast<vm_address_t>(base);
constexpr vm_prot_t prot = VM_PROT_READ | VM_PROT_WRITE;
vm_prot_t prot = VM_PROT_READ;
if (writeable)
prot |= VM_PROT_WRITE;
kern_return_t retval =
vm_map(mach_task_self(), &address, size, 0, VM_FLAGS_FIXED | VM_FLAGS_OVERWRITE, m_shm_entry,
offset, false, prot, prot, VM_INHERIT_DEFAULT);
offset, false, prot, VM_PROT_READ | VM_PROT_WRITE, VM_INHERIT_DEFAULT);
if (retval != KERN_SUCCESS)
{
ERROR_LOG_FMT(MEMMAP, "MapInMemoryRegion failed: vm_map returned {0:#x}", retval);
@ -146,6 +148,20 @@ void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
return reinterpret_cast<void*>(address);
}
bool MemArena::ChangeMappingProtection(void* view, size_t size, bool writeable)
{
vm_address_t address = reinterpret_cast<vm_address_t>(view);
vm_prot_t prot = VM_PROT_READ;
if (writeable)
prot |= VM_PROT_WRITE;
kern_return_t retval = vm_protect(mach_task_self(), address, size, false, prot);
if (retval != KERN_SUCCESS)
ERROR_LOG_FMT(MEMMAP, "ChangeMappingProtection failed: vm_protect returned {0:#x}", retval);
return retval == KERN_SUCCESS;
}
void MemArena::UnmapFromMemoryRegion(void* view, size_t size)
{
vm_address_t address = reinterpret_cast<vm_address_t>(view);

View file

@ -89,9 +89,13 @@ void MemArena::ReleaseMemoryRegion()
}
}
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base, bool writeable)
{
void* retval = mmap(base, size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, m_shm_fd, offset);
int prot = PROT_READ;
if (writeable)
prot |= PROT_WRITE;
void* retval = mmap(base, size, prot, MAP_SHARED | MAP_FIXED, m_shm_fd, offset);
if (retval == MAP_FAILED)
{
NOTICE_LOG_FMT(MEMMAP, "mmap failed");
@ -103,6 +107,18 @@ void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
}
}
bool MemArena::ChangeMappingProtection(void* view, size_t size, bool writeable)
{
int prot = PROT_READ;
if (writeable)
prot |= PROT_WRITE;
int retval = mprotect(view, size, prot);
if (retval != 0)
NOTICE_LOG_FMT(MEMMAP, "mprotect failed");
return retval == 0;
}
void MemArena::UnmapFromMemoryRegion(void* view, size_t size)
{
void* retval = mmap(view, size, PROT_NONE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0);

View file

@ -318,8 +318,10 @@ WindowsMemoryRegion* MemArena::EnsureSplitRegionForMapping(void* start_address,
}
}
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base, bool writeable)
{
void* result;
if (m_memory_functions.m_api_ms_win_core_memory_l1_1_6_handle.IsOpen())
{
WindowsMemoryRegion* const region = EnsureSplitRegionForMapping(base, size);
@ -329,10 +331,10 @@ void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
return nullptr;
}
void* rv = static_cast<PMapViewOfFile3>(m_memory_functions.m_address_MapViewOfFile3)(
result = static_cast<PMapViewOfFile3>(m_memory_functions.m_address_MapViewOfFile3)(
m_memory_handle, nullptr, base, offset, size, MEM_REPLACE_PLACEHOLDER, PAGE_READWRITE,
nullptr, 0);
if (rv)
if (result)
{
region->m_is_mapped = true;
}
@ -342,11 +344,37 @@ void* MemArena::MapInMemoryRegion(s64 offset, size_t size, void* base)
// revert the split, if any
JoinRegionsAfterUnmap(base, size);
return nullptr;
}
return rv;
}
else
{
result =
MapViewOfFileEx(m_memory_handle, FILE_MAP_ALL_ACCESS, 0, (DWORD)((u64)offset), size, base);
if (!result)
return nullptr;
}
return MapViewOfFileEx(m_memory_handle, FILE_MAP_ALL_ACCESS, 0, (DWORD)((u64)offset), size, base);
if (!writeable)
{
// If we want to use PAGE_READONLY for now while still being able to switch to PAGE_READWRITE
// later, we have to call MapViewOfFile with PAGE_READWRITE and then switch to PAGE_READONLY.
ChangeMappingProtection(base, size, writeable);
}
return result;
}
bool MemArena::ChangeMappingProtection(void* view, size_t size, bool writeable)
{
DWORD old_protect;
const int retval =
VirtualProtect(view, size, writeable ? PAGE_READWRITE : PAGE_READONLY, &old_protect);
if (retval == 0)
PanicAlertFmt("VirtualProtect failed: {}", GetLastErrorString());
return retval != 0;
}
bool MemArena::JoinRegionsAfterUnmap(void* start_address, size_t size)

View file

@ -221,7 +221,7 @@ bool MemoryManager::InitFastmemArena()
continue;
u8* base = m_physical_base + region.physical_address;
u8* view = (u8*)m_arena.MapInMemoryRegion(region.shm_position, region.size, base);
u8* view = (u8*)m_arena.MapInMemoryRegion(region.shm_position, region.size, base, true);
if (base != view)
{
@ -239,7 +239,7 @@ bool MemoryManager::InitFastmemArena()
void MemoryManager::UpdateDBATMappings(const PowerPC::BatTable& dbat_table)
{
for (auto& entry : m_dbat_mapped_entries)
for (const auto& [logical_address, entry] : m_dbat_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
@ -291,7 +291,7 @@ void MemoryManager::UpdateDBATMappings(const PowerPC::BatTable& dbat_table)
u8* base = m_logical_base + logical_address + intersection_start - translated_address;
u32 mapped_size = intersection_end - intersection_start;
void* mapped_pointer = m_arena.MapInMemoryRegion(position, mapped_size, base);
void* mapped_pointer = m_arena.MapInMemoryRegion(position, mapped_size, base, true);
if (!mapped_pointer)
{
PanicAlertFmt("Memory::UpdateDBATMappings(): Failed to map memory region at 0x{:08X} "
@ -299,7 +299,8 @@ void MemoryManager::UpdateDBATMappings(const PowerPC::BatTable& dbat_table)
intersection_start, mapped_size, logical_address);
continue;
}
m_dbat_mapped_entries.push_back({mapped_pointer, mapped_size, logical_address});
m_dbat_mapped_entries.emplace(logical_address,
LogicalMemoryView{mapped_pointer, mapped_size});
}
m_logical_page_mappings[i] =
@ -310,45 +311,61 @@ void MemoryManager::UpdateDBATMappings(const PowerPC::BatTable& dbat_table)
}
}
void MemoryManager::AddPageTableMappings(const std::map<u32, u32>& mappings)
void MemoryManager::AddPageTableMapping(u32 logical_address, u32 translated_address, bool writeable)
{
if (m_page_size > PowerPC::HW_PAGE_SIZE)
return;
for (const auto [logical_address, translated_address] : mappings)
if (logical_address % m_page_alignment != 0)
return;
constexpr u32 logical_size = PowerPC::HW_PAGE_SIZE;
for (const auto& physical_region : m_physical_regions)
{
if (logical_address % m_page_alignment != 0)
if (!physical_region.active)
continue;
constexpr u32 logical_size = PowerPC::HW_PAGE_SIZE;
for (const auto& physical_region : m_physical_regions)
u32 mapping_address = physical_region.physical_address;
u32 mapping_end = mapping_address + physical_region.size;
u32 intersection_start = std::max(mapping_address, translated_address);
u32 intersection_end = std::min(mapping_end, translated_address + logical_size);
if (intersection_start < intersection_end)
{
if (!physical_region.active)
continue;
u32 mapping_address = physical_region.physical_address;
u32 mapping_end = mapping_address + physical_region.size;
u32 intersection_start = std::max(mapping_address, translated_address);
u32 intersection_end = std::min(mapping_end, translated_address + logical_size);
if (intersection_start < intersection_end)
// Found an overlapping region; map it.
if (m_is_fastmem_arena_initialized)
{
// Found an overlapping region; map it.
if (m_is_fastmem_arena_initialized)
{
u32 position = physical_region.shm_position + intersection_start - mapping_address;
u8* base = m_logical_base + logical_address + intersection_start - translated_address;
u32 mapped_size = intersection_end - intersection_start;
u32 position = physical_region.shm_position + intersection_start - mapping_address;
u8* base = m_logical_base + logical_address + intersection_start - translated_address;
u32 mapped_size = intersection_end - intersection_start;
void* mapped_pointer = m_arena.MapInMemoryRegion(position, mapped_size, base);
const auto it = m_page_table_mapped_entries.find(logical_address);
if (it != m_page_table_mapped_entries.end())
{
// Update the protection of an existing mapping.
if (it->second.mapped_pointer == base && it->second.mapped_size == mapped_size)
{
if (!m_arena.ChangeMappingProtection(base, mapped_size, writeable))
{
PanicAlertFmt(
"Memory::AddPageTableMapping(): Failed to change protection for memory "
"region at 0x{:08X} (size 0x{:08X}, logical fastmem region at 0x{:08X}).",
intersection_start, mapped_size, logical_address);
}
}
}
else
{
// Create a new mapping.
void* mapped_pointer = m_arena.MapInMemoryRegion(position, mapped_size, base, writeable);
if (!mapped_pointer)
{
PanicAlertFmt(
"Memory::UpdatePageTableMappings(): Failed to map memory region at 0x{:08X} "
"(size 0x{:08X}) into logical fastmem region at 0x{:08X}.",
intersection_start, mapped_size, logical_address);
PanicAlertFmt("Memory::AddPageTableMapping(): Failed to map memory region at 0x{:08X} "
"(size 0x{:08X}) into logical fastmem region at 0x{:08X}.",
intersection_start, mapped_size, logical_address);
continue;
}
m_page_table_mapped_entries.push_back({mapped_pointer, mapped_size, logical_address});
m_page_table_mapped_entries.emplace(logical_address,
LogicalMemoryView{mapped_pointer, mapped_size});
}
}
}
@ -363,8 +380,9 @@ void MemoryManager::RemovePageTableMappings(const std::set<u32>& mappings)
if (mappings.empty())
return;
std::erase_if(m_page_table_mapped_entries, [this, &mappings](const LogicalMemoryView& entry) {
const bool remove = mappings.contains(entry.logical_address);
std::erase_if(m_page_table_mapped_entries, [this, &mappings](const auto& pair) {
const auto& [logical_address, entry] = pair;
const bool remove = mappings.contains(logical_address);
if (remove)
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
return remove;
@ -373,7 +391,7 @@ void MemoryManager::RemovePageTableMappings(const std::set<u32>& mappings)
void MemoryManager::RemoveAllPageTableMappings()
{
for (auto& entry : m_page_table_mapped_entries)
for (const auto& [logical_address, entry] : m_page_table_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
@ -461,13 +479,13 @@ void MemoryManager::ShutdownFastmemArena()
m_arena.UnmapFromMemoryRegion(base, region.size);
}
for (auto& entry : m_dbat_mapped_entries)
for (const auto& [logical_address, entry] : m_dbat_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}
m_dbat_mapped_entries.clear();
for (auto& entry : m_page_table_mapped_entries)
for (const auto& [logical_address, entry] : m_page_table_mapped_entries)
{
m_arena.UnmapFromMemoryRegion(entry.mapped_pointer, entry.mapped_size);
}

View file

@ -9,7 +9,6 @@
#include <set>
#include <span>
#include <string>
#include <vector>
#include "Common/CommonTypes.h"
#include "Common/MathUtil.h"
@ -56,7 +55,6 @@ struct LogicalMemoryView
{
void* mapped_pointer;
u32 mapped_size;
u32 logical_address;
};
class MemoryManager
@ -103,7 +101,7 @@ public:
void DoState(PointerWrap& p);
void UpdateDBATMappings(const PowerPC::BatTable& dbat_table);
void AddPageTableMappings(const std::map<u32, u32>& mappings);
void AddPageTableMapping(u32 logical_address, u32 translated_address, bool writeable);
void RemovePageTableMappings(const std::set<u32>& mappings);
void RemoveAllPageTableMappings();
@ -256,8 +254,9 @@ private:
// TODO: Do we want to handle the mirrors of the GC RAM?
std::array<PhysicalMemoryRegion, 4> m_physical_regions{};
std::vector<LogicalMemoryView> m_dbat_mapped_entries;
std::vector<LogicalMemoryView> m_page_table_mapped_entries;
// The key is the logical address
std::map<u32, LogicalMemoryView> m_dbat_mapped_entries;
std::map<u32, LogicalMemoryView> m_page_table_mapped_entries;
std::array<void*, PowerPC::BAT_PAGE_COUNT> m_physical_page_mappings{};
std::array<void*, PowerPC::BAT_PAGE_COUNT> m_logical_page_mappings{};

View file

@ -1486,9 +1486,11 @@ void MMU::PageTableUpdated()
#else
if (m_ppc_state.m_enable_dcache)
{
// Because fastmem isn't in use when accurate dcache emulation is enabled, setting up mappings
// would be a waste of time. Skipping setting up mappings also comes with the bonus of skipping
// the inaccurate behavior of setting the R and C bits of PTE2 as soon as a page is mapped.
// Because fastmem isn't in use when accurate dcache emulation is enabled,
// keeping track of page table updates would be a waste of time.
m_memory.RemoveAllPageTableMappings();
m_page_table.clear();
m_page_mappings.clear();
return;
}
@ -1526,7 +1528,7 @@ void MMU::ReloadPageTable()
PageTableUpdated(m_temp_page_table);
}
void MMU::PageTableUpdated(std::span<u8> page_table)
void MMU::PageTableUpdated(std::span<const u8> page_table)
{
// PowerPC's priority order for PTEs that have the same logical adress is as follows:
//
@ -1535,7 +1537,8 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
// incorporates the logical address and H. The PTE located first in the PTEG takes priority.
m_removed_mappings.clear();
m_added_mappings.clear();
m_added_readonly_mappings.clear();
m_added_readwrite_mappings.clear();
if (m_page_table.size() != page_table.size())
{
@ -1544,7 +1547,7 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
}
u8* old_page_table = m_page_table.data();
u8* new_page_table = page_table.data();
const u8* new_page_table = page_table.data();
constexpr auto compare_64_bytes = [](const u8* a, const u8* b) -> bool {
#ifdef _M_X86_64
@ -1643,8 +1646,8 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
}
};
const auto try_add_mapping = [this, &get_page_index, page_table](UPTE_Lo pte1, UPTE_Hi pte2,
u32 page_table_offset) {
const auto try_add_mapping = [this, &get_page_index](UPTE_Lo pte1, UPTE_Hi pte2,
u32 page_table_offset) {
EffectiveAddress logical_address = get_page_index(pte1, page_table_offset / 64);
for (u32 i = 0; i < std::size(m_ppc_state.sr); ++i)
@ -1686,14 +1689,15 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
const auto it = m_page_mappings.find(logical_address.Hex);
if (it != m_page_mappings.end()) [[unlikely]]
{
if (priority > it->second.priority)
if (it->second.priority < priority)
{
// An existing mapping has priority.
continue;
}
else
{
// The new mapping has priority over an existing mapping. Replace the existing mapping.
// The new mapping has priority over an existing mapping. Replace the existing
// mapping.
if (it->second.host_mapping)
m_removed_mappings.emplace(it->first);
it->second.Hex = page_mapping.Hex;
@ -1705,24 +1709,13 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
m_page_mappings.emplace(logical_address.Hex, page_mapping);
}
if (host_mapping)
// If the R bit isn't set yet, the actual host mapping will be created once
// TranslatePageAddress sets the R bit.
if (host_mapping && pte2.R)
{
const u32 physical_address = pte2.RPN << 12;
m_added_mappings.emplace(logical_address.Hex, physical_address);
// HACK: We set R and C, which indicate whether a page have been read from and written to
// respectively, when a page is mapped rather than when it's actually accessed. The latter
// is probably possible using some fault handling logic, but for now it seems like more
// work than it's worth.
if (!pte2.R || !pte2.C)
{
pte2.R = 1;
pte2.C = 1;
const u32 pte2_swapped = Common::swap32(pte2.Hex);
std::memcpy(page_table.data() + page_table_offset + 4, &pte2_swapped,
sizeof(pte2_swapped));
}
(pte2.C ? m_added_readwrite_mappings : m_added_readonly_mappings)
.emplace(logical_address.Hex, physical_address);
}
}
};
@ -1789,13 +1782,14 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
}
}
// Pass 2: Add new secondary (H=1) mappings. This is a separate pass because before we can process
// whether a mapping should be added, we first need to check all PTEs that have equal or higher
// priority to see if their mappings should be removed. For adding primary mappings, this ordering
// comes naturally from doing a linear scan of the page table from start to finish. But for adding
// secondary mappings, the primary PTEG that has priority over a given secondary PTEG is in the
// other half of the page table, so we need more than one pass through the page table. But most of
// the time, there are no secondary mappings, letting us skip the second pass.
// Pass 2: Add new secondary (H=1) mappings. This is a separate pass because before we can
// process whether a mapping should be added, we first need to check all PTEs that have
// equal or higher priority to see if their mappings should be removed. For adding primary
// mappings, this ordering comes naturally from doing a linear scan of the page table from
// start to finish. But for adding secondary mappings, the primary PTEG that has priority
// over a given secondary PTEG is in the other half of the page table, so we need more than
// one pass through the page table. But most of the time, there are no secondary mappings,
// letting us skip the second pass.
if (run_pass_2) [[unlikely]]
{
for (u32 i = 0; i < page_table.size(); i += 64)
@ -1823,8 +1817,11 @@ void MMU::PageTableUpdated(std::span<u8> page_table)
if (!m_removed_mappings.empty())
m_memory.RemovePageTableMappings(m_removed_mappings);
if (!m_added_mappings.empty())
m_memory.AddPageTableMappings(m_added_mappings);
for (const auto& [logical_address, physical_address] : m_added_readonly_mappings)
m_memory.AddPageTableMapping(logical_address, physical_address, false);
for (const auto& [logical_address, physical_address] : m_added_readwrite_mappings)
m_memory.AddPageTableMapping(logical_address, physical_address, true);
}
#endif
@ -1895,6 +1892,7 @@ MMU::TranslateAddressResult MMU::TranslatePageAddress(const EffectiveAddress add
if (pte1.Hex == pteg)
{
UPTE_Hi pte2(ReadFromHardware<pte_read_flag, u32, true>(pteg_addr + 4));
const UPTE_Hi old_pte2 = pte2;
// set the access bits
switch (flag)
@ -1914,9 +1912,29 @@ MMU::TranslateAddressResult MMU::TranslatePageAddress(const EffectiveAddress add
break;
}
if (!IsNoExceptionFlag(flag))
if (!IsNoExceptionFlag(flag) && pte2.Hex != old_pte2.Hex)
{
m_memory.Write_U32(pte2.Hex, pteg_addr + 4);
const u32 page_logical_address = address.Hex & ~HW_PAGE_MASK;
const auto it = m_page_mappings.find(page_logical_address);
if (it != m_page_mappings.end())
{
const u32 priority = (pteg_addr % 64 / 8) | (pte1.H << 3);
if (it->second.Hex == PageMapping(pte2.RPN, true, priority).Hex)
{
const u32 swapped_pte1 = Common::swap32(reinterpret_cast<u8*>(&pte1));
std::memcpy(m_page_table.data() + pteg_addr - m_ppc_state.pagetable_base,
&swapped_pte1, sizeof(swapped_pte1));
const u32 swapped_pte2 = Common::swap32(reinterpret_cast<u8*>(&pte2));
std::memcpy(m_page_table.data() + pteg_addr + 4 - m_ppc_state.pagetable_base,
&swapped_pte2, sizeof(swapped_pte2));
const u32 page_translated_address = pte2.RPN << 12;
m_memory.AddPageTableMapping(page_logical_address, page_translated_address, pte2.C);
}
}
}
// We already updated the TLB entry if this was caused by a C bit.

View file

@ -336,7 +336,7 @@ private:
#ifndef _ARCH_32
void ReloadPageTable();
void PageTableUpdated(std::span<u8> page_table);
void PageTableUpdated(std::span<const u8> page_table);
#endif
void UpdateBATs(BatTable& bat_table, u32 base_spr);
@ -373,7 +373,8 @@ private:
// These are kept around just for their memory allocations. They are always cleared before use.
std::vector<u8> m_temp_page_table;
std::set<u32> m_removed_mappings;
std::map<u32, u32> m_added_mappings;
std::map<u32, u32> m_added_readonly_mappings;
std::map<u32, u32> m_added_readwrite_mappings;
BatTable m_ibat_table;
BatTable m_dbat_table;