vk: Refactor vram exhausted handler to minimize risk of UAF hazards

1. A hard sync before starting the routines on fatal will release some memory going in improving chances of a successful eviction elsewhere.
2. A hard sync on exit cleans up, ensuring no UAF (with caveats)
This commit is contained in:
kd-11 2023-05-13 02:31:18 +03:00 committed by kd-11
commit 9a2b06f35f

View file

@ -1083,28 +1083,35 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
ensure(!vk::is_uninterruptible() && rsx::get_current_renderer()->is_current_thread()); ensure(!vk::is_uninterruptible() && rsx::get_current_renderer()->is_current_thread());
bool texture_cache_relieved = false; bool texture_cache_relieved = false;
if (severity >= rsx::problem_severity::fatal && m_texture_cache.is_overallocated()) if (severity >= rsx::problem_severity::fatal)
{ {
// Evict some unused textures. Do not evict any active references // Hard sync before trying to evict anything. This guarantees no UAF crashes in the driver.
std::set<u32> exclusion_list; // As a bonus, we also get a free gc pass
auto scan_array = [&](const auto& texture_array) flush_command_queue(true, true);
if (m_texture_cache.is_overallocated())
{ {
for (auto i = 0ull; i < texture_array.size(); ++i) // Evict some unused textures. Do not evict any active references
std::set<u32> exclusion_list;
auto scan_array = [&](const auto& texture_array)
{ {
const auto& tex = texture_array[i]; for (auto i = 0ull; i < texture_array.size(); ++i)
const auto addr = rsx::get_address(tex.offset(), tex.location()); {
exclusion_list.insert(addr); const auto& tex = texture_array[i];
} const auto addr = rsx::get_address(tex.offset(), tex.location());
}; exclusion_list.insert(addr);
}
};
scan_array(rsx::method_registers.fragment_textures); scan_array(rsx::method_registers.fragment_textures);
scan_array(rsx::method_registers.vertex_textures); scan_array(rsx::method_registers.vertex_textures);
// Hold the secondary lock guard to prevent threads from trying to touch access violation handler stuff // Hold the secondary lock guard to prevent threads from trying to touch access violation handler stuff
std::lock_guard lock(m_secondary_cb_guard); std::lock_guard lock(m_secondary_cb_guard);
rsx_log.warning("Texture cache is overallocated. Will evict unnecessary textures."); rsx_log.warning("Texture cache is overallocated. Will evict unnecessary textures.");
texture_cache_relieved = m_texture_cache.evict_unused(exclusion_list); texture_cache_relieved = m_texture_cache.evict_unused(exclusion_list);
}
} }
texture_cache_relieved |= m_texture_cache.handle_memory_pressure(severity); texture_cache_relieved |= m_texture_cache.handle_memory_pressure(severity);
@ -1160,7 +1167,7 @@ bool VKGSRender::on_vram_exhausted(rsx::problem_severity severity)
} }
const bool any_cache_relieved = (texture_cache_relieved || surface_cache_relieved); const bool any_cache_relieved = (texture_cache_relieved || surface_cache_relieved);
if (any_cache_relieved && severity >= rsx::problem_severity::fatal) if (severity >= rsx::problem_severity::fatal)
{ {
// Imminent crash, full GPU sync is the least of our problems // Imminent crash, full GPU sync is the least of our problems
flush_command_queue(true, true); flush_command_queue(true, true);