diff --git a/rpcs3/Emu/RSX/Common/texture_cache.h b/rpcs3/Emu/RSX/Common/texture_cache.h index 3c9e3b263e..bcc3aa011f 100644 --- a/rpcs3/Emu/RSX/Common/texture_cache.h +++ b/rpcs3/Emu/RSX/Common/texture_cache.h @@ -566,7 +566,7 @@ namespace rsx */ private: template - void flush_set(commandbuffer_type& cmd, thrashed_set& data, Args&&... extras) + void flush_set(commandbuffer_type& cmd, thrashed_set& data, std::function on_data_transfer_completed, Args&&... extras) { AUDIT(!data.flushed); @@ -608,6 +608,11 @@ namespace rsx cleanup_after_dma_transfers(cmd); } + if (on_data_transfer_completed) + { + on_data_transfer_completed(); + } + for (auto &surface : data.sections_to_flush) { surface->flush(); @@ -900,7 +905,12 @@ namespace rsx //Invalidate range base implementation template - thrashed_set invalidate_range_impl_base(commandbuffer_type& cmd, const address_range &fault_range_in, invalidation_cause cause, Args&&... extras) + thrashed_set invalidate_range_impl_base( + commandbuffer_type& cmd, + const address_range &fault_range_in, + invalidation_cause cause, + std::function on_data_transfer_completed = {}, + Args&&... extras) { #ifdef TEXTURE_CACHE_DEBUG // Check that the cache has the correct protections @@ -1091,7 +1101,7 @@ namespace rsx // or there is nothing to flush but we have something to unprotect if (has_flushables && !cause.skip_flush()) { - flush_set(cmd, result, std::forward(extras)...); + flush_set(cmd, result, on_data_transfer_completed, std::forward(extras)...); } unprotect_set(result); @@ -1386,7 +1396,7 @@ namespace rsx return; std::lock_guard lock(m_cache_mutex); - invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::committed_as_fbo, std::forward(extras)...); + invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::committed_as_fbo, {}, std::forward(extras)...); } template @@ -1474,30 +1484,40 @@ namespace rsx public: template - thrashed_set invalidate_address(commandbuffer_type& cmd, u32 address, invalidation_cause cause, Args&&... extras) + thrashed_set invalidate_address( + commandbuffer_type& cmd, + u32 address, + invalidation_cause cause, + std::function on_data_transfer_completed = {}, + Args&&... extras) { - //Test before trying to acquire the lock + // Test before trying to acquire the lock const auto range = page_for(address); if (!region_intersects_cache(range, !cause.is_read())) return{}; std::lock_guard lock(m_cache_mutex); - return invalidate_range_impl_base(cmd, range, cause, std::forward(extras)...); + return invalidate_range_impl_base(cmd, range, cause, on_data_transfer_completed, std::forward(extras)...); } template - thrashed_set invalidate_range(commandbuffer_type& cmd, const address_range &range, invalidation_cause cause, Args&&... extras) + thrashed_set invalidate_range( + commandbuffer_type& cmd, + const address_range &range, + invalidation_cause cause, + std::function on_data_transfer_completed = {}, + Args&&... extras) { - //Test before trying to acquire the lock + // Test before trying to acquire the lock if (!region_intersects_cache(range, !cause.is_read())) return {}; std::lock_guard lock(m_cache_mutex); - return invalidate_range_impl_base(cmd, range, cause, std::forward(extras)...); + return invalidate_range_impl_base(cmd, range, cause, on_data_transfer_completed, std::forward(extras)...); } template - bool flush_all(commandbuffer_type& cmd, thrashed_set& data, Args&&... extras) + bool flush_all(commandbuffer_type& cmd, thrashed_set& data, std::function on_data_transfer_completed = {}, Args&&... extras) { std::lock_guard lock(m_cache_mutex); @@ -1507,7 +1527,7 @@ namespace rsx if (m_cache_update_tag.load() == data.cache_tag) { //1. Write memory to cpu side - flush_set(cmd, data, std::forward(extras)...); + flush_set(cmd, data, on_data_transfer_completed, std::forward(extras)...); //2. Release all obsolete sections unprotect_set(data); @@ -1515,7 +1535,7 @@ namespace rsx else { // The cache contents have changed between the two readings. This means the data held is useless - invalidate_range_impl_base(cmd, data.fault_range, data.cause.undefer(), std::forward(extras)...); + invalidate_range_impl_base(cmd, data.fault_range, data.cause.undefer(), on_data_transfer_completed, std::forward(extras)...); } return true; @@ -2455,7 +2475,7 @@ namespace rsx // Invalidate const address_range tex_range = address_range::start_length(attributes.address, tex_size); - invalidate_range_impl_base(cmd, tex_range, invalidation_cause::read, std::forward(extras)...); + invalidate_range_impl_base(cmd, tex_range, invalidation_cause::read, {}, std::forward(extras)...); // Upload from CPU. Note that sRGB conversion is handled in the FS auto uploaded = upload_image_from_cpu(cmd, tex_range, attributes.width, attributes.height, attributes.depth, tex.get_exact_mipmap_count(), attributes.pitch, attributes.gcm_format, @@ -3125,7 +3145,7 @@ namespace rsx lock.upgrade(); - invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::read, std::forward(extras)...); + invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::read, {}, std::forward(extras)...); cached_src = upload_image_from_cpu(cmd, rsx_range, image_width, image_height, 1, 1, src.pitch, gcm_format, texture_upload_context::blit_engine_src, subresource_layout, rsx::texture_dimension_extended::texture_dimension_2d, dst.swizzled); @@ -3202,7 +3222,7 @@ namespace rsx // NOTE: Write flag set to remove all other overlapping regions (e.g shader_read or blit_src) // NOTE: This step can potentially invalidate the newly created src image as well. - invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::write, std::forward(extras)...); + invalidate_range_impl_base(cmd, rsx_range, invalidation_cause::write, {}, std::forward(extras)...); if (use_null_region) [[likely]] { diff --git a/rpcs3/Emu/RSX/VK/VKGSRender.cpp b/rpcs3/Emu/RSX/VK/VKGSRender.cpp index 5332c07f13..7fc21d015e 100644 --- a/rpcs3/Emu/RSX/VK/VKGSRender.cpp +++ b/rpcs3/Emu/RSX/VK/VKGSRender.cpp @@ -1075,7 +1075,11 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing) m_flush_requests.producer_wait(); } - m_texture_cache.flush_all(*m_secondary_cb_list.next(), result); + m_texture_cache.flush_all(*m_secondary_cb_list.next(), result, [&]() + { + m_flush_requests.remove_one(); + has_queue_ref = false; + }); if (has_queue_ref) {