diff --git a/Utilities/address_range.h b/Utilities/address_range.h index 7e556d69e3..e87579733f 100644 --- a/Utilities/address_range.h +++ b/Utilities/address_range.h @@ -5,33 +5,39 @@ #include #include +#if defined(ARCH_X64) +#define HOST_PAGE_SIZE() 4096u +#else +#define HOST_PAGE_SIZE get_page_size +#endif namespace utils { class address_range_vector; + long get_page_size(); /** - * Constexprs + * Helpers */ - constexpr inline u32 page_start(u32 addr) + static inline u32 page_start(u32 addr) { - return addr & ~4095u; + return addr & ~(HOST_PAGE_SIZE() - 1); } - constexpr inline u32 next_page(u32 addr) + static inline u32 next_page(u32 addr) { - return page_start(addr + 4096u); + return page_start(addr) + HOST_PAGE_SIZE(); } - constexpr inline u32 page_end(u32 addr) + static inline u32 page_end(u32 addr) { return next_page(addr) - 1; } - constexpr inline u32 is_page_aligned(u32 addr) + static inline u32 is_page_aligned(u32 val) { - return page_start(addr) == addr; + return (val & (HOST_PAGE_SIZE() - 1)) == 0; } @@ -186,7 +192,7 @@ namespace utils bool is_page_range() const { - return (valid() && start % 4096u == 0 && length() % 4096u == 0); + return (valid() && is_page_aligned(start) && is_page_aligned(length())); } address_range to_page_range() const diff --git a/rpcs3/Emu/RSX/Common/texture_cache.cpp b/rpcs3/Emu/RSX/Common/texture_cache.cpp index c59c053ffc..d1cdd25a34 100644 --- a/rpcs3/Emu/RSX/Common/texture_cache.cpp +++ b/rpcs3/Emu/RSX/Common/texture_cache.cpp @@ -5,6 +5,8 @@ namespace rsx { + constexpr u32 min_lockable_data_size = 4096; // Increasing this value has worse results even on systems with pages > 4k + void buffered_section::init_lockable_range(const address_range& range) { locked_range = range.to_page_range(); @@ -27,7 +29,7 @@ namespace rsx init_lockable_range(cpu_range); - if (memory_range.length() < 4096) + if (memory_range.length() < min_lockable_data_size) { protection_strat = section_protection_strategy::hash; mem_hash = 0; diff --git a/rpcs3/Emu/RSX/RSXZCULL.cpp b/rpcs3/Emu/RSX/RSXZCULL.cpp index 003a68e54a..006e4c5c9d 100644 --- a/rpcs3/Emu/RSX/RSXZCULL.cpp +++ b/rpcs3/Emu/RSX/RSXZCULL.cpp @@ -23,7 +23,7 @@ namespace rsx { if (p.second.prot != utils::protection::rw) { - utils::memory_protect(vm::base(p.first), 4096, utils::protection::rw); + utils::memory_protect(vm::base(p.first), utils::get_page_size(), utils::protection::rw); } } @@ -790,13 +790,13 @@ namespace rsx if (!m_pages_accessed[location]) [[ likely ]] { - const auto page_address = static_cast(address) & ~0xfff; + const auto page_address = utils::page_start(static_cast(address)); auto& page = m_locked_pages[location][page_address]; page.add_ref(); if (page.prot == utils::protection::rw) { - utils::memory_protect(vm::base(page_address), 4096, utils::protection::no); + utils::memory_protect(vm::base(page_address), utils::get_page_size(), utils::protection::no); page.prot = utils::protection::no; } } @@ -811,7 +811,7 @@ namespace rsx const auto location = rsx::classify_location(address); if (!m_pages_accessed[location]) { - const auto page_address = static_cast(address) & ~0xfff; + const auto page_address = utils::page_start(static_cast(address)); std::scoped_lock lock(m_pages_mutex); if (auto found = m_locked_pages[location].find(page_address); @@ -844,7 +844,7 @@ namespace rsx if (page.prot != utils::protection::rw) { - utils::memory_protect(vm::base(this_address), 4096, utils::protection::rw); + utils::memory_protect(vm::base(this_address), utils::get_page_size(), utils::protection::rw); page.prot = utils::protection::rw; } @@ -860,7 +860,7 @@ namespace rsx bool ZCULL_control::on_access_violation(u32 address) { - const auto page_address = address & ~0xfff; + const auto page_address = utils::page_start(address); const auto location = rsx::classify_location(address); if (m_pages_accessed[location]) @@ -890,7 +890,7 @@ namespace rsx else { // R/W to stale block, unload it and move on - utils::memory_protect(vm::base(page_address), 4096, utils::protection::rw); + utils::memory_protect(vm::base(page_address), utils::get_page_size(), utils::protection::rw); m_locked_pages[location].erase(page_address); return true;