rsx/vk: Retire the concept of heap being in "critical" state

- We waste a lot of time trying to manage that. We just swap out the buffer anyway as long as the client has enough RAM.
- This was more of an issue in early days when corrupt allocation requests were common trying to take gigabytes in one draw.
This commit is contained in:
kd-11 2025-04-18 04:27:16 +03:00 committed by kd-11
parent d20f48f876
commit 268de3cd24
8 changed files with 13 additions and 129 deletions

View file

@ -142,11 +142,6 @@ public:
m_get_pos = value;
}
virtual bool is_critical() const
{
return m_min_guard_size >= m_size;
}
void reset_allocation_stats()
{
m_get_pos = get_current_put_pos_minus_one();

View file

@ -54,19 +54,6 @@ namespace vk::data_heap_manager
}
}
bool any_critical()
{
for (auto& heap : g_managed_heaps)
{
if (heap->is_critical())
{
return true;
}
}
return false;
}
void reset()
{
for (auto& heap : g_managed_heaps)

View file

@ -27,9 +27,6 @@ namespace vk
// Reset all managed heap allocations
void reset_heap_allocations();
// Check if any managed heap is in critical state
bool any_critical();
// Cleanup
void reset();
}

View file

@ -270,7 +270,6 @@ void VKGSRender::load_texture_env()
{
if (tex.enabled())
{
check_heap_status(m_texture_upload_buffer_ring_info);
*sampler_state = m_texture_cache.upload_texture(*m_current_command_buffer, tex, m_rtts);
}
else
@ -429,7 +428,6 @@ void VKGSRender::load_texture_env()
{
if (rsx::method_registers.vertex_textures[i].enabled())
{
check_heap_status(m_texture_upload_buffer_ring_info);
*sampler_state = m_texture_cache.upload_texture(*m_current_command_buffer, tex, m_rtts);
}
else
@ -1140,10 +1138,6 @@ void VKGSRender::end()
m_texture_cache.release_uncached_temporary_subresources();
m_frame_stats.textures_upload_time += m_profiler.duration();
// Final heap check...
vk::data_heap* vertex_storage_heaps[] = { &m_attrib_ring_info, &m_index_buffer_ring_info, &m_draw_indirect_count_ring_info };
check_heap_status(vertex_storage_heaps);
u32 sub_index = 0; // RSX subdraw ID
m_current_draw.subdraw_id = 0; // Host subdraw ID. Invalid RSX subdraws do not increment this value

View file

@ -1149,63 +1149,6 @@ void VKGSRender::notify_tile_unbound(u32 tile)
}
}
bool VKGSRender::check_heap_status(const vk::data_heap& heap)
{
if (!heap.heap || !heap.is_critical()) [[ likely ]]
{
return false;
}
handle_heap_critical();
return true;
}
bool VKGSRender::check_heap_status(const std::span<vk::data_heap*>& heaps)
{
for (const auto& heap : heaps)
{
if (!heap->heap || !heap->is_critical()) [[ likely ]]
{
continue;
}
handle_heap_critical();
return true;
}
return false;
}
void VKGSRender::handle_heap_critical()
{
m_profiler.start();
vk::frame_context_t *target_frame = nullptr;
if (!m_queued_frames.empty())
{
if (m_current_frame != &m_aux_frame_context)
{
target_frame = m_queued_frames.front();
}
}
if (target_frame == nullptr)
{
flush_command_queue(true);
m_vertex_cache->purge();
vk::data_heap_manager::reset_heap_allocations();
m_last_heap_sync_time = rsx::get_shared_tag();
}
else
{
// Flush the frame context
frame_context_cleanup(target_frame);
}
m_frame_stats.flip_time += m_profiler.duration();
}
void VKGSRender::check_present_status()
{
while (!m_queued_frames.empty())
@ -2009,8 +1952,6 @@ void VKGSRender::load_program_env()
if (update_vertex_env)
{
check_heap_status(m_vertex_env_ring_info);
// Vertex state
const auto mem = m_vertex_env_ring_info.static_alloc<256>();
auto buf = static_cast<u8*>(m_vertex_env_ring_info.map(mem, 148));
@ -2076,8 +2017,6 @@ void VKGSRender::load_program_env()
if (update_fragment_constants && !m_shader_interpreter.is_interpreter(m_program))
{
check_heap_status(m_fragment_constants_ring_info);
// Fragment constants
if (fragment_constants_size)
{
@ -2098,8 +2037,6 @@ void VKGSRender::load_program_env()
if (update_fragment_env)
{
check_heap_status(m_fragment_env_ring_info);
auto mem = m_fragment_env_ring_info.static_alloc<256>();
auto buf = m_fragment_env_ring_info.map(mem, 32);
@ -2110,8 +2047,6 @@ void VKGSRender::load_program_env()
if (update_fragment_texture_env)
{
check_heap_status(m_fragment_texture_params_ring_info);
auto mem = m_fragment_texture_params_ring_info.static_alloc<256, 768>();
auto buf = m_fragment_texture_params_ring_info.map(mem, 768);
@ -2122,8 +2057,6 @@ void VKGSRender::load_program_env()
if (update_raster_env)
{
check_heap_status(m_raster_env_ring_info);
auto mem = m_raster_env_ring_info.static_alloc<256>();
auto buf = m_raster_env_ring_info.map(mem, 128);
@ -2240,11 +2173,6 @@ void VKGSRender::upload_transform_constants(const rsx::io_buffer& buffer)
if (transform_constants_size)
{
auto& data_source = (current_vertex_program.ctrl & RSX_SHADER_CONTROL_INSTANCED_CONSTANTS)
? m_instancing_buffer_ring_info
: m_transform_constants_ring_info;
check_heap_status(data_source);
buffer.reserve(transform_constants_size);
auto buf = buffer.data();
@ -2732,9 +2660,6 @@ bool VKGSRender::scaled_image_from_memory(const rsx::blit_src_info& src, const r
if (swapchain_unavailable)
return false;
// Verify enough memory exists before attempting to handle data transfer
check_heap_status(m_texture_upload_buffer_ring_info);
if (m_texture_cache.blit(src, dst, interpolate, m_rtts, *m_current_command_buffer))
{
m_samplers_dirty.store(true);

View file

@ -218,10 +218,6 @@ private:
VkRenderPass get_render_pass();
void update_draw_state();
void handle_heap_critical();
bool check_heap_status(const vk::data_heap& heap);
bool check_heap_status(const std::span<vk::data_heap*>& heaps);
void check_present_status();
VkDescriptorSet allocate_descriptor_set();

View file

@ -51,13 +51,6 @@ namespace vk
bool data_heap::grow(usz size)
{
if (shadow)
{
// Shadowed. Growing this can be messy as it requires double allocation (macOS only)
rsx_log.error("[%s] Auto-grow of shadowed heaps is not currently supported. This error should typically only be seen on MacOS.", m_name);
return false;
}
// Create new heap. All sizes are aligned up by 64M, upto 1GiB
const usz size_limit = 1024 * 0x100000;
usz aligned_new_size = utils::align(m_size + size, 64 * 0x100000);
@ -88,7 +81,19 @@ namespace vk
::data_heap::init(aligned_new_size, m_name, m_min_guard_size);
// Discard old heap and create a new one. Old heap will be garbage collected when no longer needed
get_resource_manager()->dispose(heap);
auto gc = get_resource_manager();
if (shadow)
{
rsx_log.warning("Buffer usage %u is not heap-compatible using this driver, explicit staging buffer in use", usage);
gc->dispose(shadow);
shadow = std::make_unique<buffer>(*g_render_device, aligned_new_size, memory_index, memory_flags, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, 0, VMM_ALLOCATION_POOL_SYSTEM);
usage |= VK_BUFFER_USAGE_TRANSFER_DST_BIT;
memory_flags = VK_MEMORY_PROPERTY_DEVICE_LOCAL_BIT;
memory_index = memory_map.device_local;
}
gc->dispose(heap);
heap = std::make_unique<buffer>(*g_render_device, aligned_new_size, memory_index, memory_flags, usage, 0, VMM_ALLOCATION_POOL_SYSTEM);
if (notify_on_grow)
@ -154,20 +159,6 @@ namespace vk
return !dirty_ranges.empty();
}
bool data_heap::is_critical() const
{
if (!::data_heap::is_critical())
return false;
// By default, allow the size to grow upto 8x larger
// This value is arbitrary, theoretically it is possible to allow infinite stretching to improve performance
const usz soft_limit = initial_size * 8;
if ((m_size + m_min_guard_size) < soft_limit)
return false;
return true;
}
data_heap* get_upload_heap()
{
if (!g_upload_heap.heap)

View file

@ -52,7 +52,6 @@ namespace vk
// Properties
bool is_dirty() const;
bool is_critical() const override;
};
extern data_heap* get_upload_heap();