Rewrite memory pool to be thread safe.

This commit is contained in:
Robbie 2017-07-11 13:34:38 -05:00 committed by Ivan
parent 4dbc546e7b
commit f9e996e006

View file

@ -1,10 +1,17 @@
#include "stdafx.h"
#include "Utilities/StrUtil.h"
#include "Emu/System.h"
#include "Emu/IdManager.h"
#include "Emu/Cell/PPUModule.h"
#include "Emu/Cell/lv2/sys_mutex.h"
#include "Emu/Cell/lv2/sys_cond.h"
#include "sysPrxForUser.h"
extern logs::channel sysPrxForUser;
using sys_mempool_t = u32;
@ -15,6 +22,9 @@ struct memory_pool_t
static const u32 id_step = 1;
static const u32 id_count = 1023;
u32 mutexid;
u32 condid;
vm::ptr<void> chunk;
u64 chunk_size;
u64 block_size;
@ -22,13 +32,7 @@ struct memory_pool_t
std::vector<vm::ptr<void>> free_blocks;
};
vm::ptr<void> sys_mempool_allocate_block(sys_mempool_t mempool)
{
sysPrxForUser.todo("sys_mempool_allocate_block(mempool=%d)", mempool);
return vm::null;
}
s32 sys_mempool_create(vm::ptr<sys_mempool_t> mempool, vm::ptr<void> chunk, const u64 chunk_size, const u64 block_size, const u64 ralignment)
s32 sys_mempool_create(ppu_thread& ppu, vm::ptr<sys_mempool_t> mempool, vm::ptr<void> chunk, const u64 chunk_size, const u64 block_size, const u64 ralignment)
{
sysPrxForUser.warning("sys_mempool_create(mempool=*0x%x, chunk=*0x%x, chunk_size=%d, block_size=%d, ralignment=%d)", mempool, chunk, chunk_size, block_size, ralignment);
@ -72,18 +76,65 @@ s32 sys_mempool_create(vm::ptr<sys_mempool_t> mempool, vm::ptr<void> chunk, cons
{
memory_pool->free_blocks[i] = vm::ptr<void>::make(chunk.addr() + i * block_size);
}
// Create synchronization variables
vm::var<u32> mutexid;
vm::var<sys_mutex_attribute_t> attr;
attr->protocol = SYS_SYNC_PRIORITY;
attr->recursive = SYS_SYNC_NOT_RECURSIVE;
attr->pshared = SYS_SYNC_NOT_PROCESS_SHARED;
attr->adaptive = SYS_SYNC_NOT_ADAPTIVE;
attr->ipc_key = 0; // No idea what this is
attr->flags = 0; // Also no idea what this is.
strcpy_trunc(attr->name, "mp_m" + std::to_string(*mempool));
error_code ret = sys_mutex_create(mutexid, attr);
if (ret != 0)
{ // TODO: Better exception handling.
fmt::throw_exception("mempool %x failed to create mutex", mempool);
}
memory_pool->mutexid = *mutexid;
vm::var<u32> condid;
vm::var<sys_cond_attribute_t> condAttr;
condAttr->pshared = SYS_SYNC_NOT_PROCESS_SHARED;
condAttr->flags = 0; // No idea what this is
condAttr->ipc_key = 0; // Also no idea what this is
strcpy_trunc(condAttr->name, "mp_c" + std::to_string(*mempool));
ret = sys_cond_create(condid, *mutexid, condAttr);
if (ret != CELL_OK)
{ // TODO: Better exception handling.
fmt::throw_exception("mempool %x failed to create condition variable", mempool);
}
memory_pool->condid = *condid;
return CELL_OK;
}
void sys_mempool_destroy(sys_mempool_t mempool)
void sys_mempool_destroy(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_destroy(mempool=%d)", mempool);
idm::remove<memory_pool_t>(mempool);
auto memory_pool = idm::get<memory_pool_t>(mempool);
if (memory_pool)
{
u32 condid = memory_pool->condid;
u32 mutexid = memory_pool->mutexid;
sys_mutex_lock(ppu, memory_pool->mutexid, 0);
idm::remove<memory_pool_t>(mempool);
sys_mutex_unlock(ppu, mutexid);
sys_mutex_destroy(mutexid);
sys_cond_destroy(condid);
}
else
{
sysPrxForUser.error("Trying to destroy an already destroyed mempool=%d", mempool);
}
}
s32 sys_mempool_free_block(sys_mempool_t mempool, vm::ptr<void> block)
s32 sys_mempool_free_block(ppu_thread& ppu, sys_mempool_t mempool, vm::ptr<void> block)
{
sysPrxForUser.warning("sys_mempool_free_block(mempool=%d, block=*0x%x)", mempool, block);
@ -93,16 +144,21 @@ s32 sys_mempool_free_block(sys_mempool_t mempool, vm::ptr<void> block)
return CELL_EINVAL;
}
sys_mutex_lock(ppu, memory_pool->mutexid, 0);
// Cannot free a block not belonging to this memory pool
if (block.addr() > memory_pool->chunk.addr() + memory_pool->chunk_size)
{
sys_mutex_unlock(ppu, memory_pool->mutexid);
return CELL_EINVAL;
}
memory_pool->free_blocks.push_back(block);
sys_cond_signal(ppu, memory_pool->condid);
sys_mutex_unlock(ppu, memory_pool->mutexid);
return CELL_OK;
}
u64 sys_mempool_get_count(sys_mempool_t mempool)
u64 sys_mempool_get_count(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_get_count(mempool=%d)", mempool);
@ -111,11 +167,40 @@ u64 sys_mempool_get_count(sys_mempool_t mempool)
{
return CELL_EINVAL;
}
return memory_pool->free_blocks.size();
sys_mutex_lock(ppu, memory_pool->mutexid, 0);
u64 ret = memory_pool->free_blocks.size();
sys_mutex_unlock(ppu, memory_pool->mutexid);
return ret;
}
vm::ptr<void> sys_mempool_try_allocate_block(sys_mempool_t mempool)
vm::ptr<void> sys_mempool_allocate_block(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_allocate_block(mempool=%d)", mempool);
auto memory_pool = idm::get<memory_pool_t>(mempool);
if (!memory_pool)
{ // if the memory pool gets deleted-- is null, clearly it's impossible to allocate memory.
return vm::null;
}
sys_mutex_lock(ppu, memory_pool->mutexid, 0);
while (memory_pool->free_blocks.size() == 0) // while is to guard against spurious wakeups
{
sys_cond_wait(ppu, memory_pool->condid, 0);
memory_pool = idm::get<memory_pool_t>(mempool);
if (!memory_pool) // in case spurious wake up was from delete, don't die by accessing a freed pool.
{ // No need to unlock as if the pool is freed, the lock was freed as well.
return vm::null;
}
}
auto block_ptr = memory_pool->free_blocks.back();
memory_pool->free_blocks.pop_back();
sys_mutex_unlock(ppu, memory_pool->mutexid);
return block_ptr;
}
vm::ptr<void> sys_mempool_try_allocate_block(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_try_allocate_block(mempool=%d)", mempool);
@ -126,12 +211,14 @@ vm::ptr<void> sys_mempool_try_allocate_block(sys_mempool_t mempool)
return vm::null;
}
sys_mutex_lock(ppu, memory_pool->mutexid, 0);
auto block_ptr = memory_pool->free_blocks.back();
memory_pool->free_blocks.pop_back();
sys_mutex_unlock(ppu, memory_pool->mutexid);
return block_ptr;
}
void sysPrxForUser_sys_mempool_init()
{
REG_FUNC(sysPrxForUser, sys_mempool_allocate_block);