diff --git a/rpcs3/Emu/RSX/GL/GLTextureCache.h b/rpcs3/Emu/RSX/GL/GLTextureCache.h index 7b1a024307..53aa452e1a 100644 --- a/rpcs3/Emu/RSX/GL/GLTextureCache.h +++ b/rpcs3/Emu/RSX/GL/GLTextureCache.h @@ -122,17 +122,19 @@ namespace gl glGenBuffers(1, &pbo_id); + const u32 buffer_size = align(cpu_address_range, 4096); glBindBuffer(GL_PIXEL_PACK_BUFFER, pbo_id); - glBufferStorage(GL_PIXEL_PACK_BUFFER, locked_address_range, nullptr, GL_MAP_READ_BIT); + glBufferStorage(GL_PIXEL_PACK_BUFFER, buffer_size, nullptr, GL_MAP_READ_BIT); - pbo_size = locked_address_range; + pbo_size = buffer_size; } public: void reset(const u32 base, const u32 size, const bool flushable) { - rsx::buffered_section::reset(base, size); + rsx::protection_policy policy = g_cfg.video.strict_rendering_mode ? rsx::protection_policy::protect_policy_full_range : rsx::protection_policy::protect_policy_one_page; + rsx::buffered_section::reset(base, size, policy); if (flushable) init_buffer(); diff --git a/rpcs3/Emu/RSX/VK/VKTextureCache.h b/rpcs3/Emu/RSX/VK/VKTextureCache.h index 94a176aea7..bef9c189b5 100644 --- a/rpcs3/Emu/RSX/VK/VKTextureCache.h +++ b/rpcs3/Emu/RSX/VK/VKTextureCache.h @@ -36,7 +36,8 @@ namespace vk if (length > cpu_address_range) release_dma_resources(); - rsx::buffered_section::reset(base, length); + rsx::protection_policy policy = g_cfg.video.strict_rendering_mode ? rsx::protection_policy::protect_policy_full_range : rsx::protection_policy::protect_policy_one_page; + rsx::buffered_section::reset(base, length, policy); } void create(const u16 w, const u16 h, const u16 depth, const u16 mipmaps, vk::image_view *view, vk::image *image, const u32 native_pitch = 0, bool managed=true) diff --git a/rpcs3/Emu/RSX/rsx_cache.h b/rpcs3/Emu/RSX/rsx_cache.h index 592437bc78..c44c43cda8 100644 --- a/rpcs3/Emu/RSX/rsx_cache.h +++ b/rpcs3/Emu/RSX/rsx_cache.h @@ -38,15 +38,22 @@ namespace rsx u32 rsx_address; }; + enum protection_policy + { + protect_policy_one_page, //Only guard one page, preferrably one where this section 'wholly' fits + protect_policy_full_range //Guard the full memory range. Shared pages may be invalidated by access outside the object we're guarding + }; + class buffered_section { + private: + u32 locked_address_base = 0; + u32 locked_address_range = 0; + protected: u32 cpu_address_base = 0; u32 cpu_address_range = 0; - u32 locked_address_base = 0; - u32 locked_address_range = 0; - utils::protection protection = utils::protection::rw; bool locked = false; @@ -62,7 +69,7 @@ namespace rsx buffered_section() {} ~buffered_section() {} - void reset(u32 base, u32 length) + void reset(u32 base, u32 length, protection_policy protect_policy= protect_policy_full_range) { verify(HERE), locked == false; @@ -70,7 +77,24 @@ namespace rsx cpu_address_range = length; locked_address_base = (base & ~4095); - locked_address_range = align(base + length, 4096) - locked_address_base; + + if (protect_policy == protect_policy_one_page) + { + locked_address_range = 4096; + if (locked_address_base < base) + { + //Try the next page if we can + //TODO: If an object spans a boundary without filling either side, guard the larger page occupancy + const u32 next_page = locked_address_base + 4096; + if ((base + length) >= (next_page + 4096)) + { + //The object spans the entire page. Guard this instead + locked_address_base = next_page; + } + } + } + else + locked_address_range = align(base + length, 4096) - locked_address_base; protection = utils::protection::rw; locked = false;