Replace recursive_mutex with mutex

This commit is contained in:
offtkp 2024-09-01 17:02:24 +03:00
parent 723068049e
commit 58535250bd
4 changed files with 20 additions and 6 deletions

View file

@ -101,7 +101,7 @@ void MemoryManager::Free(PAddr phys_addr, size_t size) {
}
}
for (const auto& [addr, size] : remove_list) {
UnmapMemory(addr, size);
UnmapMemoryImpl(addr, size);
}
// Mark region as free and attempt to coalesce it with neighbours.
@ -124,7 +124,7 @@ int MemoryManager::Reserve(void** out_addr, VAddr virtual_addr, size_t size, Mem
const auto& vma = FindVMA(mapped_addr)->second;
// If the VMA is mapped, unmap the region first.
if (vma.IsMapped()) {
UnmapMemory(mapped_addr, size);
UnmapMemoryImpl(mapped_addr, size);
}
const size_t remaining_size = vma.base + vma.size - mapped_addr;
ASSERT_MSG(vma.type == VMAType::Free && remaining_size >= size);
@ -233,7 +233,10 @@ int MemoryManager::MapFile(void** out_addr, VAddr virtual_addr, size_t size, Mem
void MemoryManager::UnmapMemory(VAddr virtual_addr, size_t size) {
std::scoped_lock lk{mutex};
UnmapMemoryImpl(virtual_addr, size);
}
void MemoryManager::UnmapMemoryImpl(VAddr virtual_addr, size_t size) {
const auto it = FindVMA(virtual_addr);
const auto& vma_base = it->second;
ASSERT_MSG(vma_base.Contains(virtual_addr, size),

View file

@ -214,11 +214,13 @@ private:
DMemHandle Split(DMemHandle dmem_handle, size_t offset_in_area);
void UnmapMemoryImpl(VAddr virtual_addr, size_t size);
private:
AddressSpace impl;
DMemMap dmem_map;
VMAMap vma_map;
std::recursive_mutex mutex;
std::mutex mutex;
size_t total_direct_size{};
size_t total_flexible_size{};
size_t flexible_usage{};

View file

@ -164,7 +164,7 @@ bool BufferCache::BindVertexBuffers(const Shader::Info& vs_info) {
// Map buffers
for (auto& range : ranges_merged) {
const auto [buffer, offset] = ObtainBuffer(range.base_address, range.GetSize(), false);
const auto [buffer, offset] = ObtainBufferImpl(range.base_address, range.GetSize(), false);
range.vk_buffer = buffer->buffer;
range.offset = offset;
}
@ -222,7 +222,7 @@ u32 BufferCache::BindIndexBuffer(bool& is_indexed, u32 index_offset) {
// Bind index buffer.
const u32 index_buffer_size = regs.num_indices * index_size;
const auto [vk_buffer, offset] = ObtainBuffer(index_address, index_buffer_size, false);
const auto [vk_buffer, offset] = ObtainBufferImpl(index_address, index_buffer_size, false);
const auto cmdbuf = scheduler.CommandBuffer();
cmdbuf.bindIndexBuffer(vk_buffer->Handle(), offset, index_type);
return regs.num_indices;
@ -231,6 +231,11 @@ u32 BufferCache::BindIndexBuffer(bool& is_indexed, u32 index_offset) {
std::pair<Buffer*, u32> BufferCache::ObtainBuffer(VAddr device_addr, u32 size, bool is_written,
bool is_texel_buffer) {
std::scoped_lock lk{mutex};
return ObtainBufferImpl(device_addr, size, is_written, is_texel_buffer);
}
std::pair<Buffer*, u32> BufferCache::ObtainBufferImpl(VAddr device_addr, u32 size, bool is_written,
bool is_texel_buffer) {
static constexpr u64 StreamThreshold = CACHING_PAGESIZE;
const bool is_gpu_dirty = memory_tracker.IsRegionGpuModified(device_addr, size);
if (!is_written && !is_texel_buffer && size <= StreamThreshold && !is_gpu_dirty) {

View file

@ -120,13 +120,17 @@ private:
void DeleteBuffer(BufferId buffer_id, bool do_not_mark = false);
[[nodiscard]] std::pair<Buffer*, u32> ObtainBufferImpl(VAddr gpu_addr, u32 size,
bool is_written,
bool is_texel_buffer = false);
const Vulkan::Instance& instance;
Vulkan::Scheduler& scheduler;
const AmdGpu::Liverpool* liverpool;
PageManager& tracker;
StreamBuffer staging_buffer;
StreamBuffer stream_buffer;
std::recursive_mutex mutex;
std::mutex mutex;
Common::SlotVector<Buffer> slot_buffers;
MemoryTracker memory_tracker;
PageTable page_table;