mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-20 11:36:13 +00:00
"Memory" eliminated
This commit is contained in:
parent
aad97aaea1
commit
48c75105e2
33 changed files with 562 additions and 910 deletions
|
@ -24,7 +24,7 @@ std::array<std::atomic<u32>, TLS_MAX> g_armv7_tls_owners;
|
|||
|
||||
void armv7_init_tls()
|
||||
{
|
||||
g_armv7_tls_start = Emu.GetTLSMemsz() ? Memory.PSV.RAM.AllocAlign(Emu.GetTLSMemsz() * TLS_MAX, 4096) : 0;
|
||||
g_armv7_tls_start = Emu.GetTLSMemsz() ? vm::alloc(Emu.GetTLSMemsz() * TLS_MAX, vm::main) : 0;
|
||||
|
||||
for (auto& v : g_armv7_tls_owners)
|
||||
{
|
||||
|
@ -128,7 +128,7 @@ void ARMv7Thread::InitStack()
|
|||
throw EXCEPTION("Invalid stack size");
|
||||
}
|
||||
|
||||
stack_addr = Memory.Alloc(stack_size, 4096);
|
||||
stack_addr = vm::alloc(stack_size, vm::main);
|
||||
|
||||
if (!stack_addr)
|
||||
{
|
||||
|
@ -141,7 +141,7 @@ void ARMv7Thread::CloseStack()
|
|||
{
|
||||
if (stack_addr)
|
||||
{
|
||||
Memory.Free(stack_addr);
|
||||
vm::dealloc(stack_addr, vm::main);
|
||||
stack_addr = 0;
|
||||
}
|
||||
}
|
||||
|
@ -291,7 +291,7 @@ cpu_thread& armv7_thread::args(std::initializer_list<std::string> values)
|
|||
argc++;
|
||||
}
|
||||
|
||||
argv = Memory.PSV.RAM.AllocAlign(argv_size, 4096); // allocate arg list
|
||||
argv = vm::alloc(argv_size, vm::main); // allocate arg list
|
||||
memcpy(vm::get_ptr(argv), argv_data.data(), argv_size); // copy arg list
|
||||
|
||||
return *this;
|
||||
|
|
|
@ -2502,7 +2502,7 @@ void Compiler::LWARX(u32 rd, u32 ra, u32 rb) {
|
|||
auto addr_i32 = m_ir_builder->CreateTrunc(addr_i64, m_ir_builder->getInt32Ty());
|
||||
auto val_i32_ptr = m_ir_builder->CreateAlloca(m_ir_builder->getInt32Ty());
|
||||
val_i32_ptr->setAlignment(4);
|
||||
Call<bool>("vm.reservation_acquire_no_cb", vm::reservation_acquire_no_cb, m_ir_builder->CreateBitCast(val_i32_ptr, m_ir_builder->getInt8PtrTy()), addr_i32, m_ir_builder->getInt32(4));
|
||||
Call<bool>("vm.reservation_acquire", vm::reservation_acquire, m_ir_builder->CreateBitCast(val_i32_ptr, m_ir_builder->getInt8PtrTy()), addr_i32, m_ir_builder->getInt32(4));
|
||||
auto val_i32 = (Value *)m_ir_builder->CreateLoad(val_i32_ptr);
|
||||
val_i32 = m_ir_builder->CreateCall(Intrinsic::getDeclaration(m_module, Intrinsic::bswap, m_ir_builder->getInt32Ty()), val_i32);
|
||||
auto val_i64 = m_ir_builder->CreateZExt(val_i32, m_ir_builder->getInt64Ty());
|
||||
|
@ -2773,7 +2773,7 @@ void Compiler::LDARX(u32 rd, u32 ra, u32 rb) {
|
|||
auto addr_i32 = m_ir_builder->CreateTrunc(addr_i64, m_ir_builder->getInt32Ty());
|
||||
auto val_i64_ptr = m_ir_builder->CreateAlloca(m_ir_builder->getInt64Ty());
|
||||
val_i64_ptr->setAlignment(8);
|
||||
Call<bool>("vm.reservation_acquire_no_cb", vm::reservation_acquire_no_cb, m_ir_builder->CreateBitCast(val_i64_ptr, m_ir_builder->getInt8PtrTy()), addr_i32, m_ir_builder->getInt32(8));
|
||||
Call<bool>("vm.reservation_acquire", vm::reservation_acquire, m_ir_builder->CreateBitCast(val_i64_ptr, m_ir_builder->getInt8PtrTy()), addr_i32, m_ir_builder->getInt32(8));
|
||||
auto val_i64 = (Value *)m_ir_builder->CreateLoad(val_i64_ptr);
|
||||
val_i64 = m_ir_builder->CreateCall(Intrinsic::getDeclaration(m_module, Intrinsic::bswap, m_ir_builder->getInt64Ty()), val_i64);
|
||||
SetGpr(rd, val_i64);
|
||||
|
|
|
@ -545,7 +545,7 @@ void PPUThread::InitStack()
|
|||
throw EXCEPTION("Invalid stack size");
|
||||
}
|
||||
|
||||
stack_addr = Memory.StackMem.AllocAlign(stack_size, 4096);
|
||||
stack_addr = vm::alloc(stack_size, vm::stack);
|
||||
|
||||
if (!stack_addr)
|
||||
{
|
||||
|
@ -558,7 +558,7 @@ void PPUThread::CloseStack()
|
|||
{
|
||||
if (stack_addr)
|
||||
{
|
||||
Memory.StackMem.Free(stack_addr);
|
||||
vm::dealloc(stack_addr, vm::stack);
|
||||
stack_addr = 0;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -11,14 +11,20 @@ thread_local spu_mfc_arg_t raw_spu_mfc[8] = {};
|
|||
RawSPUThread::RawSPUThread(const std::string& name, u32 index)
|
||||
: SPUThread(CPU_THREAD_RAW_SPU, name, COPY_EXPR(fmt::format("RawSPU_%d[0x%x] Thread (%s)[0x%08x]", index, GetId(), GetName(), PC)), index, RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * index)
|
||||
{
|
||||
vm::page_map(offset, 0x40000, vm::page_readable | vm::page_writable);
|
||||
if (!vm::falloc(offset, 0x40000))
|
||||
{
|
||||
throw EXCEPTION("Failed to allocate RawSPU local storage");
|
||||
}
|
||||
}
|
||||
|
||||
RawSPUThread::~RawSPUThread()
|
||||
{
|
||||
join();
|
||||
|
||||
vm::page_unmap(offset, 0x40000);
|
||||
if (!vm::dealloc(offset))
|
||||
{
|
||||
throw EXCEPTION("Failed to deallocate RawSPU local storage");
|
||||
}
|
||||
}
|
||||
|
||||
void RawSPUThread::start()
|
||||
|
|
|
@ -67,8 +67,12 @@ SPUThread::SPUThread(CPUThreadType type, const std::string& name, std::function<
|
|||
SPUThread::SPUThread(const std::string& name, u32 index)
|
||||
: CPUThread(CPU_THREAD_SPU, name, WRAP_EXPR(fmt::format("SPU[0x%x] Thread (%s)[0x%08x]", GetId(), GetName(), PC)))
|
||||
, index(index)
|
||||
, offset(Memory.MainMem.AllocAlign(0x40000))
|
||||
, offset(vm::alloc(0x40000, vm::main))
|
||||
{
|
||||
if (!offset)
|
||||
{
|
||||
throw EXCEPTION("Failed to allocate SPU local storage");
|
||||
}
|
||||
}
|
||||
|
||||
SPUThread::~SPUThread()
|
||||
|
@ -77,7 +81,10 @@ SPUThread::~SPUThread()
|
|||
{
|
||||
join();
|
||||
|
||||
Memory.MainMem.Free(offset);
|
||||
if (!vm::dealloc(offset, vm::main))
|
||||
{
|
||||
throw EXCEPTION("Failed to deallocate SPU local storage");
|
||||
}
|
||||
}
|
||||
else if (joinable())
|
||||
{
|
||||
|
@ -425,11 +432,7 @@ void SPUThread::process_mfc_cmd(u32 cmd)
|
|||
break;
|
||||
}
|
||||
|
||||
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), VM_CAST(ch_mfc_args.ea), 128, [this]()
|
||||
{
|
||||
ch_event_stat |= SPU_EVENT_LR;
|
||||
cv.notify_one();
|
||||
});
|
||||
vm::reservation_acquire(vm::get_ptr(offset + ch_mfc_args.lsa), VM_CAST(ch_mfc_args.ea), 128);
|
||||
|
||||
ch_atomic_stat.push_uncond(MFC_GETLLAR_SUCCESS);
|
||||
return;
|
||||
|
|
|
@ -2,388 +2,26 @@
|
|||
#include "Utilities/Log.h"
|
||||
#include "Memory.h"
|
||||
|
||||
MemoryBase Memory;
|
||||
VirtualMemoryBlock RSXIOMem;
|
||||
|
||||
void MemoryBase::Init(MemoryType type)
|
||||
VirtualMemoryBlock* VirtualMemoryBlock::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
if (m_inited) return;
|
||||
m_inited = true;
|
||||
|
||||
LOG_NOTICE(MEMORY, "Initializing memory: g_base_addr = 0x%llx, g_priv_addr = 0x%llx", (u64)vm::g_base_addr, (u64)vm::g_priv_addr);
|
||||
|
||||
#ifdef _WIN32
|
||||
if (!vm::g_base_addr || !vm::g_priv_addr)
|
||||
#else
|
||||
if ((s64)vm::g_base_addr == (s64)-1 || (s64)vm::g_priv_addr == (s64)-1)
|
||||
#endif
|
||||
{
|
||||
LOG_ERROR(MEMORY, "Initializing memory failed");
|
||||
return;
|
||||
}
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case Memory_PS3:
|
||||
MemoryBlocks.push_back(MainMem.SetRange(0x00010000, 0x1FFF0000));
|
||||
MemoryBlocks.push_back(UserMemory = Userspace.SetRange(0x20000000, 0x10000000));
|
||||
MemoryBlocks.push_back(RSXFBMem.SetRange(0xC0000000, 0x10000000));
|
||||
MemoryBlocks.push_back(StackMem.SetRange(0xD0000000, 0x10000000));
|
||||
break;
|
||||
|
||||
case Memory_PSV:
|
||||
MemoryBlocks.push_back(PSV.RAM.SetRange(0x81000000, 0x10000000));
|
||||
MemoryBlocks.push_back(UserMemory = PSV.Userspace.SetRange(0x91000000, 0x2F000000));
|
||||
break;
|
||||
|
||||
case Memory_PSP:
|
||||
MemoryBlocks.push_back(PSP.Scratchpad.SetRange(0x00010000, 0x00004000));
|
||||
MemoryBlocks.push_back(PSP.VRAM.SetRange(0x04000000, 0x00200000));
|
||||
MemoryBlocks.push_back(PSP.RAM.SetRange(0x08000000, 0x02000000));
|
||||
MemoryBlocks.push_back(PSP.Kernel.SetRange(0x88000000, 0x00800000));
|
||||
MemoryBlocks.push_back(UserMemory = PSP.Userspace.SetRange(0x08800000, 0x01800000));
|
||||
break;
|
||||
}
|
||||
|
||||
LOG_NOTICE(MEMORY, "Memory initialized.");
|
||||
}
|
||||
|
||||
void MemoryBase::Close()
|
||||
{
|
||||
if (!m_inited) return;
|
||||
m_inited = false;
|
||||
|
||||
LOG_NOTICE(MEMORY, "Closing memory...");
|
||||
|
||||
for (auto block : MemoryBlocks)
|
||||
{
|
||||
block->Delete();
|
||||
}
|
||||
|
||||
RSXIOMem.Delete();
|
||||
|
||||
MemoryBlocks.clear();
|
||||
}
|
||||
|
||||
bool MemoryBase::Map(const u32 addr, const u32 size)
|
||||
{
|
||||
if (!size || (size | addr) % 4096)
|
||||
{
|
||||
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x)", addr, size);
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
for (auto& block : MemoryBlocks)
|
||||
{
|
||||
if (block->GetStartAddr() >= addr && block->GetStartAddr() <= addr + size - 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (addr >= block->GetStartAddr() && addr <= block->GetEndAddr())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (vm::check_addr(i * 4096, 4096))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
MemoryBlocks.push_back((new DynamicMemoryBlock())->SetRange(addr, size));
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
bool MemoryBase::Unmap(const u32 addr)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
for (u32 i = 0; i < MemoryBlocks.size(); i++)
|
||||
{
|
||||
if (MemoryBlocks[i]->GetStartAddr() == addr)
|
||||
{
|
||||
delete MemoryBlocks[i];
|
||||
MemoryBlocks.erase(MemoryBlocks.begin() + i);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
MemoryBlock* MemoryBase::Get(const u32 addr)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
for (auto& block : MemoryBlocks)
|
||||
{
|
||||
if (block->GetStartAddr() == addr)
|
||||
{
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
MemBlockInfo::MemBlockInfo(u32 addr, u32 size)
|
||||
: MemInfo(addr, size)
|
||||
{
|
||||
vm::page_map(addr, size, vm::page_readable | vm::page_writable | vm::page_executable);
|
||||
}
|
||||
|
||||
void MemBlockInfo::Free()
|
||||
{
|
||||
if (addr && size)
|
||||
{
|
||||
vm::page_unmap(addr, size);
|
||||
addr = size = 0;
|
||||
}
|
||||
}
|
||||
|
||||
//MemoryBlock
|
||||
MemoryBlock::MemoryBlock() : mem_inf(nullptr)
|
||||
{
|
||||
Init();
|
||||
}
|
||||
|
||||
MemoryBlock::~MemoryBlock()
|
||||
{
|
||||
Delete();
|
||||
}
|
||||
|
||||
void MemoryBlock::Init()
|
||||
{
|
||||
range_start = 0;
|
||||
range_size = 0;
|
||||
}
|
||||
|
||||
void MemoryBlock::InitMemory()
|
||||
{
|
||||
if (range_size)
|
||||
{
|
||||
Free();
|
||||
mem_inf = new MemBlockInfo(range_start, range_size);
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlock::Free()
|
||||
{
|
||||
if (mem_inf)
|
||||
{
|
||||
delete mem_inf;
|
||||
mem_inf = nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
void MemoryBlock::Delete()
|
||||
{
|
||||
Free();
|
||||
Init();
|
||||
}
|
||||
|
||||
MemoryBlock* MemoryBlock::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
range_start = start;
|
||||
range_size = size;
|
||||
|
||||
InitMemory();
|
||||
return this;
|
||||
}
|
||||
|
||||
DynamicMemoryBlockBase::DynamicMemoryBlockBase()
|
||||
: MemoryBlock()
|
||||
, m_max_size(0)
|
||||
{
|
||||
}
|
||||
|
||||
const u32 DynamicMemoryBlockBase::GetUsedSize() const
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
u32 size = 0;
|
||||
|
||||
for (u32 i = 0; i<m_allocated.size(); ++i)
|
||||
{
|
||||
size += m_allocated[i].size;
|
||||
}
|
||||
|
||||
return size;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::IsInMyRange(const u32 addr, const u32 size)
|
||||
{
|
||||
return addr >= MemoryBlock::GetStartAddr() && addr + size - 1 <= MemoryBlock::GetEndAddr();
|
||||
}
|
||||
|
||||
MemoryBlock* DynamicMemoryBlockBase::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
m_max_size = PAGE_4K(size);
|
||||
if (!MemoryBlock::SetRange(start, 0))
|
||||
{
|
||||
assert(0);
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
void DynamicMemoryBlockBase::Delete()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
m_allocated.clear();
|
||||
m_max_size = 0;
|
||||
|
||||
MemoryBlock::Delete();
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::AllocFixed(u32 addr, u32 size)
|
||||
{
|
||||
assert(size);
|
||||
|
||||
size = PAGE_4K(size + (addr & 4095)); // align size
|
||||
|
||||
addr &= ~4095; // align start address
|
||||
|
||||
if (!IsInMyRange(addr, size))
|
||||
{
|
||||
assert(0);
|
||||
return false;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
for (u32 i = 0; i<m_allocated.size(); ++i)
|
||||
{
|
||||
if (addr >= m_allocated[i].addr && addr <= m_allocated[i].addr + m_allocated[i].size - 1) return false;
|
||||
}
|
||||
|
||||
AppendMem(addr, size);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void DynamicMemoryBlockBase::AppendMem(u32 addr, u32 size) /* private */
|
||||
{
|
||||
m_allocated.emplace_back(addr, size);
|
||||
}
|
||||
|
||||
u32 DynamicMemoryBlockBase::AllocAlign(u32 size, u32 align)
|
||||
{
|
||||
assert(size && align);
|
||||
|
||||
if (!MemoryBlock::GetStartAddr())
|
||||
{
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlockBase::AllocAlign(size=0x%x, align=0x%x): memory block not initialized", size, align);
|
||||
return 0;
|
||||
}
|
||||
|
||||
size = PAGE_4K(size);
|
||||
u32 exsize;
|
||||
|
||||
if (align <= 4096)
|
||||
{
|
||||
align = 0;
|
||||
exsize = size;
|
||||
}
|
||||
else
|
||||
{
|
||||
align &= ~4095;
|
||||
exsize = size + align - 1;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
for (u32 addr = MemoryBlock::GetStartAddr(); addr <= MemoryBlock::GetEndAddr() - exsize;)
|
||||
{
|
||||
bool is_good_addr = true;
|
||||
|
||||
for (u32 i = 0; i<m_allocated.size(); ++i)
|
||||
{
|
||||
if ((addr >= m_allocated[i].addr && addr <= m_allocated[i].addr + m_allocated[i].size - 1) ||
|
||||
(m_allocated[i].addr >= addr && m_allocated[i].addr <= addr + exsize - 1))
|
||||
{
|
||||
is_good_addr = false;
|
||||
addr = m_allocated[i].addr + m_allocated[i].size;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (!is_good_addr) continue;
|
||||
|
||||
if (align)
|
||||
{
|
||||
addr = (addr + (align - 1)) & ~(align - 1);
|
||||
}
|
||||
|
||||
//LOG_NOTICE(MEMORY, "AllocAlign(size=0x%x) -> 0x%x", size, addr);
|
||||
|
||||
AppendMem(addr, size);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::Alloc()
|
||||
{
|
||||
return AllocAlign(GetSize() - GetUsedSize()) != 0;
|
||||
}
|
||||
|
||||
bool DynamicMemoryBlockBase::Free(u32 addr)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(Memory.mutex);
|
||||
|
||||
for (u32 num = 0; num < m_allocated.size(); num++)
|
||||
{
|
||||
if (addr == m_allocated[num].addr)
|
||||
{
|
||||
//LOG_NOTICE(MEMORY, "Free(0x%x)", addr);
|
||||
|
||||
m_allocated.erase(m_allocated.begin() + num);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
LOG_ERROR(MEMORY, "DynamicMemoryBlock::Free(addr=0x%x): failed", addr);
|
||||
for (u32 i = 0; i < m_allocated.size(); i++)
|
||||
{
|
||||
LOG_NOTICE(MEMORY, "*** Memory Block: addr = 0x%x, size = 0x%x", m_allocated[i].addr, m_allocated[i].size);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
|
||||
VirtualMemoryBlock::VirtualMemoryBlock() : MemoryBlock(), m_reserve_size(0)
|
||||
{
|
||||
}
|
||||
|
||||
MemoryBlock* VirtualMemoryBlock::SetRange(const u32 start, const u32 size)
|
||||
{
|
||||
range_start = start;
|
||||
range_size = size;
|
||||
m_range_start = start;
|
||||
m_range_size = size;
|
||||
|
||||
return this;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::IsInMyRange(const u32 addr, const u32 size)
|
||||
{
|
||||
return addr >= GetStartAddr() && addr + size - 1 <= GetEndAddr() - GetReservedAmount();
|
||||
return addr >= m_range_start && addr + size - 1 <= m_range_start + m_range_size - 1 - GetReservedAmount();
|
||||
}
|
||||
|
||||
u32 VirtualMemoryBlock::Map(u32 realaddr, u32 size)
|
||||
{
|
||||
assert(size);
|
||||
|
||||
for (u32 addr = GetStartAddr(); addr <= GetEndAddr() - GetReservedAmount() - size;)
|
||||
for (u32 addr = m_range_start; addr <= m_range_start + m_range_size - 1 - GetReservedAmount() - size;)
|
||||
{
|
||||
bool is_good_addr = true;
|
||||
|
||||
|
@ -505,16 +143,9 @@ u32 VirtualMemoryBlock::getMappedAddress(u32 realAddress)
|
|||
return 0;
|
||||
}
|
||||
|
||||
void VirtualMemoryBlock::Delete()
|
||||
{
|
||||
m_mapped_memory.clear();
|
||||
|
||||
MemoryBlock::Delete();
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::Reserve(u32 size)
|
||||
{
|
||||
if (size + GetReservedAmount() > GetEndAddr() - GetStartAddr())
|
||||
if (size + GetReservedAmount() > m_range_size)
|
||||
return false;
|
||||
|
||||
m_reserve_size += size;
|
||||
|
|
|
@ -2,88 +2,6 @@
|
|||
|
||||
#include "MemoryBlock.h"
|
||||
|
||||
enum MemoryType
|
||||
{
|
||||
Memory_PS3,
|
||||
Memory_PSV,
|
||||
Memory_PSP,
|
||||
};
|
||||
|
||||
class MemoryBase
|
||||
{
|
||||
std::vector<MemoryBlock*> MemoryBlocks;
|
||||
|
||||
public:
|
||||
std::mutex mutex;
|
||||
|
||||
MemoryBlock* UserMemory;
|
||||
|
||||
DynamicMemoryBlock MainMem;
|
||||
DynamicMemoryBlock Userspace;
|
||||
DynamicMemoryBlock RSXFBMem;
|
||||
DynamicMemoryBlock StackMem;
|
||||
VirtualMemoryBlock RSXIOMem;
|
||||
|
||||
struct
|
||||
{
|
||||
DynamicMemoryBlock RAM;
|
||||
DynamicMemoryBlock Userspace;
|
||||
}
|
||||
PSV;
|
||||
|
||||
struct
|
||||
{
|
||||
DynamicMemoryBlock Scratchpad;
|
||||
DynamicMemoryBlock VRAM;
|
||||
DynamicMemoryBlock RAM;
|
||||
DynamicMemoryBlock Kernel;
|
||||
DynamicMemoryBlock Userspace;
|
||||
}
|
||||
PSP;
|
||||
|
||||
bool m_inited;
|
||||
|
||||
MemoryBase()
|
||||
{
|
||||
m_inited = false;
|
||||
}
|
||||
|
||||
~MemoryBase()
|
||||
{
|
||||
Close();
|
||||
}
|
||||
|
||||
void Init(MemoryType type);
|
||||
|
||||
void Close();
|
||||
|
||||
u32 GetUserMemTotalSize()
|
||||
{
|
||||
return UserMemory->GetSize();
|
||||
}
|
||||
|
||||
u32 GetUserMemAvailSize()
|
||||
{
|
||||
return UserMemory->GetSize() - UserMemory->GetUsedSize();
|
||||
}
|
||||
|
||||
u32 Alloc(const u32 size, const u32 align)
|
||||
{
|
||||
return UserMemory->AllocAlign(size, align);
|
||||
}
|
||||
|
||||
bool Free(const u32 addr)
|
||||
{
|
||||
return UserMemory->Free(addr);
|
||||
}
|
||||
|
||||
bool Map(const u32 addr, const u32 size);
|
||||
|
||||
bool Unmap(const u32 addr);
|
||||
|
||||
MemoryBlock* Get(const u32 addr);
|
||||
};
|
||||
|
||||
extern MemoryBase Memory;
|
||||
extern VirtualMemoryBlock RSXIOMem;
|
||||
|
||||
#include "vm.h"
|
||||
|
|
|
@ -1,7 +1,5 @@
|
|||
#pragma once
|
||||
|
||||
#define PAGE_4K(x) (x + 4095) & ~(4095)
|
||||
|
||||
struct MemInfo
|
||||
{
|
||||
u32 addr;
|
||||
|
@ -20,39 +18,6 @@ struct MemInfo
|
|||
}
|
||||
};
|
||||
|
||||
struct MemBlockInfo : public MemInfo
|
||||
{
|
||||
MemBlockInfo(u32 addr, u32 size);
|
||||
|
||||
void Free();
|
||||
|
||||
MemBlockInfo(MemBlockInfo &other) = delete;
|
||||
|
||||
MemBlockInfo(MemBlockInfo &&other)
|
||||
: MemInfo(other.addr,other.size)
|
||||
{
|
||||
other.addr = 0;
|
||||
other.size = 0;
|
||||
}
|
||||
|
||||
MemBlockInfo& operator =(MemBlockInfo &other) = delete;
|
||||
|
||||
MemBlockInfo& operator =(MemBlockInfo &&other)
|
||||
{
|
||||
Free();
|
||||
this->addr = other.addr;
|
||||
this->size = other.size;
|
||||
other.addr = 0;
|
||||
other.size = 0;
|
||||
return *this;
|
||||
}
|
||||
|
||||
~MemBlockInfo()
|
||||
{
|
||||
Free();
|
||||
}
|
||||
};
|
||||
|
||||
struct VirtualMemInfo : public MemInfo
|
||||
{
|
||||
u32 realAddress;
|
||||
|
@ -70,95 +35,40 @@ struct VirtualMemInfo : public MemInfo
|
|||
}
|
||||
};
|
||||
|
||||
class MemoryBlock
|
||||
{
|
||||
protected:
|
||||
u32 range_start;
|
||||
u32 range_size;
|
||||
|
||||
public:
|
||||
MemoryBlock();
|
||||
virtual ~MemoryBlock();
|
||||
|
||||
private:
|
||||
MemBlockInfo* mem_inf;
|
||||
void Init();
|
||||
void Free();
|
||||
void InitMemory();
|
||||
|
||||
public:
|
||||
virtual void Delete();
|
||||
|
||||
virtual MemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
|
||||
const u32 GetStartAddr() const { return range_start; }
|
||||
const u32 GetEndAddr() const { return GetStartAddr() + GetSize() - 1; }
|
||||
virtual const u32 GetSize() const { return range_size; }
|
||||
virtual const u32 GetUsedSize() const { return GetSize(); }
|
||||
|
||||
virtual bool AllocFixed(u32 addr, u32 size) { return false; }
|
||||
virtual u32 AllocAlign(u32 size, u32 align = 1) { return 0; }
|
||||
virtual bool Alloc() { return false; }
|
||||
virtual bool Free(u32 addr) { return false; }
|
||||
};
|
||||
|
||||
class DynamicMemoryBlockBase : public MemoryBlock
|
||||
{
|
||||
std::vector<MemBlockInfo> m_allocated; // allocation info
|
||||
u32 m_max_size;
|
||||
|
||||
public:
|
||||
DynamicMemoryBlockBase();
|
||||
|
||||
const u32 GetSize() const { return m_max_size; }
|
||||
const u32 GetUsedSize() const;
|
||||
|
||||
virtual bool IsInMyRange(const u32 addr, const u32 size = 1);
|
||||
|
||||
virtual MemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
|
||||
virtual void Delete();
|
||||
|
||||
virtual bool AllocFixed(u32 addr, u32 size);
|
||||
virtual u32 AllocAlign(u32 size, u32 align = 1);
|
||||
virtual bool Alloc();
|
||||
virtual bool Free(u32 addr);
|
||||
|
||||
private:
|
||||
void AppendMem(u32 addr, u32 size);
|
||||
};
|
||||
|
||||
class VirtualMemoryBlock : public MemoryBlock
|
||||
class VirtualMemoryBlock
|
||||
{
|
||||
std::vector<VirtualMemInfo> m_mapped_memory;
|
||||
u32 m_reserve_size;
|
||||
u32 m_reserve_size = 0;
|
||||
u32 m_range_start = 0;
|
||||
u32 m_range_size = 0;
|
||||
|
||||
public:
|
||||
VirtualMemoryBlock();
|
||||
VirtualMemoryBlock() = default;
|
||||
|
||||
virtual MemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
virtual bool IsInMyRange(const u32 addr, const u32 size = 1);
|
||||
virtual void Delete();
|
||||
VirtualMemoryBlock* SetRange(const u32 start, const u32 size);
|
||||
u32 GetStartAddr() const { return m_range_start; }
|
||||
u32 GetSize() const { return m_range_size; }
|
||||
bool IsInMyRange(const u32 addr, const u32 size);
|
||||
|
||||
// maps real address to virtual address space, returns the mapped address or 0 on failure (if no address is specified the
|
||||
// first mappable space is used)
|
||||
virtual bool Map(u32 realaddr, u32 size, u32 addr);
|
||||
virtual u32 Map(u32 realaddr, u32 size);
|
||||
bool Map(u32 realaddr, u32 size, u32 addr);
|
||||
u32 Map(u32 realaddr, u32 size);
|
||||
|
||||
// Unmap real address (please specify only starting point, no midway memory will be unmapped), returns the size of the unmapped area
|
||||
virtual bool UnmapRealAddress(u32 realaddr, u32& size);
|
||||
bool UnmapRealAddress(u32 realaddr, u32& size);
|
||||
|
||||
// Unmap address (please specify only starting point, no midway memory will be unmapped), returns the size of the unmapped area
|
||||
virtual bool UnmapAddress(u32 addr, u32& size);
|
||||
bool UnmapAddress(u32 addr, u32& size);
|
||||
|
||||
// Reserve a certain amount so no one can use it, returns true on succces, false on failure
|
||||
virtual bool Reserve(u32 size);
|
||||
bool Reserve(u32 size);
|
||||
|
||||
// Unreserve a certain amount of bytes, returns true on succcess, false if size is bigger than the reserved amount
|
||||
virtual bool Unreserve(u32 size);
|
||||
bool Unreserve(u32 size);
|
||||
|
||||
// Return the total amount of reserved memory
|
||||
virtual u32 GetReservedAmount();
|
||||
u32 GetReservedAmount();
|
||||
|
||||
bool Read32(const u32 addr, u32* value);
|
||||
|
||||
|
@ -178,5 +88,3 @@ public:
|
|||
// return the mapped address given a real address, if not mapped return 0
|
||||
u32 getMappedAddress(u32 realAddress);
|
||||
};
|
||||
|
||||
typedef DynamicMemoryBlockBase DynamicMemoryBlock;
|
||||
|
|
|
@ -40,13 +40,13 @@ namespace vm
|
|||
|
||||
if (memory_handle == -1)
|
||||
{
|
||||
printf("shm_open('/rpcs3_vm') failed\n");
|
||||
std::printf("shm_open('/rpcs3_vm') failed\n");
|
||||
return (void*)-1;
|
||||
}
|
||||
|
||||
if (ftruncate(memory_handle, 0x100000000) == -1)
|
||||
{
|
||||
printf("ftruncate(memory_handle) failed\n");
|
||||
std::printf("ftruncate(memory_handle) failed\n");
|
||||
shm_unlink("/rpcs3_vm");
|
||||
return (void*)-1;
|
||||
}
|
||||
|
@ -56,6 +56,8 @@ namespace vm
|
|||
|
||||
shm_unlink("/rpcs3_vm");
|
||||
|
||||
std::printf("/rpcs3_vm: g_base_addr = %p, g_priv_addr = %p\n", base_addr, g_priv_addr);
|
||||
|
||||
return base_addr;
|
||||
#endif
|
||||
}
|
||||
|
@ -71,10 +73,10 @@ namespace vm
|
|||
#endif
|
||||
}
|
||||
|
||||
void* g_base_addr = (atexit(finalize), initialize());
|
||||
void* const g_base_addr = (atexit(finalize), initialize());
|
||||
void* g_priv_addr;
|
||||
|
||||
std::array<atomic_t<u8>, 0x100000000ull / 4096> g_page_info = {}; // information about every page
|
||||
std::array<atomic_t<u8>, 0x100000000ull / 4096> g_pages = {}; // information about every page
|
||||
|
||||
class reservation_mutex_t
|
||||
{
|
||||
|
@ -131,7 +133,6 @@ namespace vm
|
|||
|
||||
};
|
||||
|
||||
std::function<void()> g_reservation_cb = nullptr;
|
||||
const thread_ctrl_t* g_reservation_owner = nullptr;
|
||||
|
||||
u32 g_reservation_addr = 0;
|
||||
|
@ -152,7 +153,7 @@ namespace vm
|
|||
}
|
||||
}
|
||||
|
||||
bool _reservation_break(u32 addr)
|
||||
void _reservation_break(u32 addr)
|
||||
{
|
||||
if (g_reservation_addr >> 12 == addr >> 12)
|
||||
{
|
||||
|
@ -166,85 +167,64 @@ namespace vm
|
|||
throw EXCEPTION("System failure (addr=0x%x)", addr);
|
||||
}
|
||||
|
||||
if (g_reservation_cb)
|
||||
{
|
||||
g_reservation_cb();
|
||||
g_reservation_cb = nullptr;
|
||||
}
|
||||
|
||||
g_reservation_owner = nullptr;
|
||||
g_reservation_addr = 0;
|
||||
g_reservation_size = 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
bool reservation_break(u32 addr)
|
||||
void reservation_break(u32 addr)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
return _reservation_break(addr);
|
||||
_reservation_break(addr);
|
||||
}
|
||||
|
||||
bool reservation_acquire(void* data, u32 addr, u32 size, std::function<void()> callback)
|
||||
void reservation_acquire(void* data, u32 addr, u32 size)
|
||||
{
|
||||
bool broken = false;
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 128);
|
||||
assert((addr + size - 1 & ~0xfff) == (addr & ~0xfff));
|
||||
|
||||
const u8 flags = g_pages[addr >> 12].load();
|
||||
|
||||
if (!(flags & page_writable) || !(flags & page_allocated) || (flags & page_no_reservations))
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
u8 flags = g_page_info[addr >> 12].load();
|
||||
if (!(flags & page_writable) || !(flags & page_allocated) || (flags & page_no_reservations))
|
||||
{
|
||||
throw EXCEPTION("Invalid page flags (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
|
||||
}
|
||||
|
||||
// silent unlocking to prevent priority boost for threads going to break reservation
|
||||
//g_reservation_mutex.do_notify = false;
|
||||
|
||||
// break previous reservation
|
||||
if (g_reservation_owner)
|
||||
{
|
||||
broken = _reservation_break(g_reservation_addr);
|
||||
}
|
||||
|
||||
// change memory protection to read-only
|
||||
_reservation_set(addr);
|
||||
|
||||
// may not be necessary
|
||||
_mm_mfence();
|
||||
|
||||
// set additional information
|
||||
g_reservation_addr = addr;
|
||||
g_reservation_size = size;
|
||||
g_reservation_owner = get_current_thread_ctrl();
|
||||
g_reservation_cb = std::move(callback);
|
||||
|
||||
// copy data
|
||||
memcpy(data, vm::get_ptr(addr), size);
|
||||
throw EXCEPTION("Invalid page flags (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
|
||||
}
|
||||
|
||||
return broken;
|
||||
}
|
||||
// silent unlocking to prevent priority boost for threads going to break reservation
|
||||
//g_reservation_mutex.do_notify = false;
|
||||
|
||||
bool reservation_acquire_no_cb(void* data, u32 addr, u32 size)
|
||||
{
|
||||
return reservation_acquire(data, addr, size, nullptr);
|
||||
// break previous reservation
|
||||
if (g_reservation_owner)
|
||||
{
|
||||
_reservation_break(g_reservation_addr);
|
||||
}
|
||||
|
||||
// change memory protection to read-only
|
||||
_reservation_set(addr);
|
||||
|
||||
// may not be necessary
|
||||
_mm_mfence();
|
||||
|
||||
// set additional information
|
||||
g_reservation_addr = addr;
|
||||
g_reservation_size = size;
|
||||
g_reservation_owner = get_current_thread_ctrl();
|
||||
|
||||
// copy data
|
||||
memcpy(data, vm::get_ptr(addr), size);
|
||||
}
|
||||
|
||||
bool reservation_update(u32 addr, const void* data, u32 size)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 128);
|
||||
assert((addr + size - 1 & ~0xfff) == (addr & ~0xfff));
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
{
|
||||
// atomic update failed
|
||||
|
@ -257,9 +237,6 @@ namespace vm
|
|||
// update memory using privileged access
|
||||
memcpy(vm::priv_ptr(addr), data, size);
|
||||
|
||||
// remove callback to not call it on successful update
|
||||
g_reservation_cb = nullptr;
|
||||
|
||||
// free the reservation and restore memory protection
|
||||
_reservation_break(addr);
|
||||
|
||||
|
@ -295,7 +272,7 @@ namespace vm
|
|||
|
||||
void reservation_free()
|
||||
{
|
||||
if (g_reservation_owner == get_current_thread_ctrl())
|
||||
if (g_reservation_owner && g_reservation_owner == get_current_thread_ctrl())
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
|
@ -305,11 +282,11 @@ namespace vm
|
|||
|
||||
void reservation_op(u32 addr, u32 size, std::function<void()> proc)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
assert(size == 1 || size == 2 || size == 4 || size == 8 || size == 128);
|
||||
assert((addr + size - 1 & ~0xfff) == (addr & ~0xfff));
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
// break previous reservation
|
||||
if (g_reservation_owner != get_current_thread_ctrl() || g_reservation_addr != addr || g_reservation_size != size)
|
||||
{
|
||||
|
@ -326,7 +303,6 @@ namespace vm
|
|||
g_reservation_addr = addr;
|
||||
g_reservation_size = size;
|
||||
g_reservation_owner = get_current_thread_ctrl();
|
||||
g_reservation_cb = nullptr;
|
||||
|
||||
// may not be necessary
|
||||
_mm_mfence();
|
||||
|
@ -338,15 +314,13 @@ namespace vm
|
|||
_reservation_break(addr);
|
||||
}
|
||||
|
||||
void page_map(u32 addr, u32 size, u8 flags)
|
||||
void _page_map(u32 addr, u32 size, u8 flags)
|
||||
{
|
||||
assert(size && (size | addr) % 4096 == 0 && flags < page_allocated);
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (g_page_info[i].load())
|
||||
if (g_pages[i].load())
|
||||
{
|
||||
throw EXCEPTION("Memory already mapped (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
|
||||
}
|
||||
|
@ -368,7 +342,7 @@ namespace vm
|
|||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (g_page_info[i].exchange(flags | page_allocated))
|
||||
if (g_pages[i].exchange(flags | page_allocated))
|
||||
{
|
||||
throw EXCEPTION("Concurrent access (addr=0x%x, size=0x%x, flags=0x%x, current_addr=0x%x)", addr, size, flags, i * 4096);
|
||||
}
|
||||
|
@ -379,17 +353,17 @@ namespace vm
|
|||
|
||||
bool page_protect(u32 addr, u32 size, u8 flags_test, u8 flags_set, u8 flags_clear)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
u8 flags_inv = flags_set & flags_clear;
|
||||
|
||||
assert(size && (size | addr) % 4096 == 0);
|
||||
|
||||
flags_test |= page_allocated;
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if ((g_page_info[i].load() & flags_test) != (flags_test | page_allocated))
|
||||
if ((g_pages[i].load() & flags_test) != (flags_test | page_allocated))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -404,9 +378,9 @@ namespace vm
|
|||
{
|
||||
_reservation_break(i * 4096);
|
||||
|
||||
const u8 f1 = g_page_info[i]._or(flags_set & ~flags_inv) & (page_writable | page_readable);
|
||||
g_page_info[i]._and_not(flags_clear & ~flags_inv);
|
||||
const u8 f2 = (g_page_info[i] ^= flags_inv) & (page_writable | page_readable);
|
||||
const u8 f1 = g_pages[i]._or(flags_set & ~flags_inv) & (page_writable | page_readable);
|
||||
g_pages[i]._and_not(flags_clear & ~flags_inv);
|
||||
const u8 f2 = (g_pages[i] ^= flags_inv) & (page_writable | page_readable);
|
||||
|
||||
if (f1 != f2)
|
||||
{
|
||||
|
@ -432,13 +406,13 @@ namespace vm
|
|||
|
||||
void page_unmap(u32 addr, u32 size)
|
||||
{
|
||||
assert(size && (size | addr) % 4096 == 0);
|
||||
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
assert(size && (size | addr) % 4096 == 0);
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (!(g_page_info[i].load() & page_allocated))
|
||||
if (!(g_pages[i].load() & page_allocated))
|
||||
{
|
||||
throw EXCEPTION("Memory not mapped (addr=0x%x, size=0x%x, current_addr=0x%x)", addr, size, i * 4096);
|
||||
}
|
||||
|
@ -448,7 +422,7 @@ namespace vm
|
|||
{
|
||||
_reservation_break(i * 4096);
|
||||
|
||||
if (!(g_page_info[i].exchange(0) & page_allocated))
|
||||
if (!(g_pages[i].exchange(0) & page_allocated))
|
||||
{
|
||||
throw EXCEPTION("Concurrent access (addr=0x%x, size=0x%x, current_addr=0x%x)", addr, size, i * 4096);
|
||||
}
|
||||
|
@ -469,8 +443,6 @@ namespace vm
|
|||
}
|
||||
}
|
||||
|
||||
// Not checked if address is writable/readable. Checking address before using it is unsafe.
|
||||
// The only safe way to check it is to protect both actions (checking and using) with mutex that is used for mapping/allocation.
|
||||
bool check_addr(u32 addr, u32 size)
|
||||
{
|
||||
assert(size);
|
||||
|
@ -482,7 +454,7 @@ namespace vm
|
|||
|
||||
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
|
||||
{
|
||||
if ((g_page_info[i].load() & page_allocated) != page_allocated)
|
||||
if ((g_pages[i].load() & page_allocated) != page_allocated)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -491,78 +463,266 @@ namespace vm
|
|||
return true;
|
||||
}
|
||||
|
||||
//TODO
|
||||
bool map(u32 addr, u32 size, u32 flags)
|
||||
std::vector<std::shared_ptr<block_t>> g_locations;
|
||||
|
||||
u32 alloc(u32 size, memory_location_t location, u32 align)
|
||||
{
|
||||
return Memory.Map(addr, size);
|
||||
const auto block = get(location);
|
||||
|
||||
if (!block)
|
||||
{
|
||||
throw EXCEPTION("Invalid memory location (%d)", location);
|
||||
}
|
||||
|
||||
return block->alloc(size, align);
|
||||
}
|
||||
|
||||
bool unmap(u32 addr, u32 size, u32 flags)
|
||||
u32 falloc(u32 addr, u32 size, memory_location_t location)
|
||||
{
|
||||
return Memory.Unmap(addr);
|
||||
const auto block = get(location, addr);
|
||||
|
||||
if (!block)
|
||||
{
|
||||
throw EXCEPTION("Invalid memory location (%d, addr=0x%x)", location, addr);
|
||||
}
|
||||
|
||||
return block->falloc(addr, size);
|
||||
}
|
||||
|
||||
u32 alloc(u32 addr, u32 size, memory_location location)
|
||||
bool dealloc(u32 addr, memory_location_t location)
|
||||
{
|
||||
return g_locations[location].fixed_allocator(addr, size);
|
||||
const auto block = get(location, addr);
|
||||
|
||||
if (!block)
|
||||
{
|
||||
throw EXCEPTION("Invalid memory location (%d, addr=0x%x)", location, addr);
|
||||
}
|
||||
|
||||
return block->dealloc(addr);
|
||||
}
|
||||
|
||||
u32 alloc(u32 size, memory_location location)
|
||||
bool block_t::try_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return g_locations[location].allocator(size);
|
||||
// check if memory area is already mapped
|
||||
for (u32 i = addr / 4096; i <= (addr + size - 1) / 4096; i++)
|
||||
{
|
||||
if (g_pages[i].load())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// try to reserve "physical" memory
|
||||
if (!used.atomic_op([=](u32& used) -> bool
|
||||
{
|
||||
if (used > this->size)
|
||||
{
|
||||
throw EXCEPTION("Unexpected memory amount used (0x%x)", used);
|
||||
}
|
||||
|
||||
if (used + size > this->size)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
used += size;
|
||||
|
||||
return true;
|
||||
}))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// map memory pages
|
||||
_page_map(addr, size, page_readable | page_writable);
|
||||
|
||||
// add entry
|
||||
m_map[addr] = size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
void dealloc(u32 addr, memory_location location)
|
||||
block_t::~block_t()
|
||||
{
|
||||
return g_locations[location].deallocator(addr);
|
||||
// deallocate all memory
|
||||
for (auto& entry : m_map)
|
||||
{
|
||||
// unmap memory pages
|
||||
vm::page_unmap(entry.first, entry.second);
|
||||
}
|
||||
}
|
||||
|
||||
u32 block_t::alloc(u32 size, u32 align)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
// align to minimal page size
|
||||
size = ::align(size, 4096);
|
||||
|
||||
// check alignment (it's page allocation, so passing small values there is just silly)
|
||||
if (align < 4096 || align != (0x80000000u >> cntlz32(align)))
|
||||
{
|
||||
throw EXCEPTION("Invalid alignment (size=0x%x, align=0x%x)", size, align);
|
||||
}
|
||||
|
||||
// return if size is invalid
|
||||
if (!size || size > this->size)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
// search for an appropriate place (unoptimized)
|
||||
for (u32 addr = ::align(this->addr, align); addr < this->addr + this->size - 1; addr += align)
|
||||
{
|
||||
if (try_alloc(addr, size))
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
u32 block_t::falloc(u32 addr, u32 size)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
// align to minimal page size
|
||||
size = ::align(size, 4096);
|
||||
|
||||
// return if addr or size is invalid
|
||||
if (!size || size > this->size || addr < this->addr || addr + size - 1 >= this->addr + this->size - 1)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (!try_alloc(addr, size))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
bool block_t::dealloc(u32 addr)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
const auto found = m_map.find(addr);
|
||||
|
||||
if (found != m_map.end())
|
||||
{
|
||||
const u32 size = found->second;
|
||||
|
||||
// unmap memory pages
|
||||
vm::page_unmap(addr, size);
|
||||
|
||||
// remove entry
|
||||
m_map.erase(found);
|
||||
|
||||
// return "physical" memory
|
||||
used -= size;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<block_t> map(u32 addr, u32 size, u32 flags)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
if (!size || (size | addr) % 4096 || flags)
|
||||
{
|
||||
throw EXCEPTION("Invalid arguments (addr=0x%x, size=0x%x, flags=0x%x)", addr, size, flags);
|
||||
}
|
||||
|
||||
for (auto& block : g_locations)
|
||||
{
|
||||
if (block->addr >= addr && block->addr <= addr + size - 1)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
if (addr >= block->addr && addr <= block->addr + block->size - 1)
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
|
||||
{
|
||||
if (g_pages[i].load())
|
||||
{
|
||||
throw EXCEPTION("Unexpected memory usage");
|
||||
}
|
||||
}
|
||||
|
||||
auto block = std::make_shared<block_t>(addr, size);
|
||||
|
||||
g_locations.emplace_back(block);
|
||||
|
||||
return block;
|
||||
}
|
||||
|
||||
std::shared_ptr<block_t> unmap(u32 addr)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
for (auto it = g_locations.begin(); it != g_locations.end(); it++)
|
||||
{
|
||||
if (*it && (*it)->addr == addr)
|
||||
{
|
||||
auto block = std::move(*it);
|
||||
g_locations.erase(it);
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<block_t> get(memory_location_t location, u32 addr)
|
||||
{
|
||||
std::lock_guard<reservation_mutex_t> lock(g_reservation_mutex);
|
||||
|
||||
if (location != any)
|
||||
{
|
||||
// return selected location
|
||||
if (location < g_locations.size())
|
||||
{
|
||||
return g_locations[location];
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
// search location by address
|
||||
for (auto& block : g_locations)
|
||||
{
|
||||
if (addr >= block->addr && addr <= block->addr + block->size - 1)
|
||||
{
|
||||
return block;
|
||||
}
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
namespace ps3
|
||||
{
|
||||
u32 main_alloc(u32 size)
|
||||
{
|
||||
return Memory.MainMem.AllocAlign(size, 1);
|
||||
}
|
||||
u32 main_fixed_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return Memory.MainMem.AllocFixed(addr, size) ? addr : 0;
|
||||
}
|
||||
void main_dealloc(u32 addr)
|
||||
{
|
||||
Memory.MainMem.Free(addr);
|
||||
}
|
||||
|
||||
u32 user_space_alloc(u32 size)
|
||||
{
|
||||
return Memory.Userspace.AllocAlign(size, 1);
|
||||
}
|
||||
u32 user_space_fixed_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return Memory.Userspace.AllocFixed(addr, size) ? addr : 0;
|
||||
}
|
||||
void user_space_dealloc(u32 addr)
|
||||
{
|
||||
Memory.Userspace.Free(addr);
|
||||
}
|
||||
|
||||
u32 g_stack_offset = 0;
|
||||
|
||||
u32 stack_alloc(u32 size)
|
||||
{
|
||||
return Memory.StackMem.AllocAlign(size, 0x10);
|
||||
}
|
||||
u32 stack_fixed_alloc(u32 addr, u32 size)
|
||||
{
|
||||
return Memory.StackMem.AllocFixed(addr, size) ? addr : 0;
|
||||
}
|
||||
void stack_dealloc(u32 addr)
|
||||
{
|
||||
Memory.StackMem.Free(addr);
|
||||
}
|
||||
|
||||
void init()
|
||||
{
|
||||
Memory.Init(Memory_PS3);
|
||||
g_locations =
|
||||
{
|
||||
std::make_shared<block_t>(0x00010000, 0x1FFF0000), // main
|
||||
std::make_shared<block_t>(0x20000000, 0x10000000), // user
|
||||
std::make_shared<block_t>(0xC0000000, 0x10000000), // video
|
||||
std::make_shared<block_t>(0xD0000000, 0x10000000), // stack
|
||||
|
||||
std::make_shared<block_t>(0xE0000000, 0x20000000), // RawSPU
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -570,7 +730,13 @@ namespace vm
|
|||
{
|
||||
void init()
|
||||
{
|
||||
Memory.Init(Memory_PSV);
|
||||
g_locations =
|
||||
{
|
||||
std::make_shared<block_t>(0x81000000, 0x10000000), // RAM
|
||||
std::make_shared<block_t>(0x91000000, 0x2F000000), // user
|
||||
nullptr, // video
|
||||
nullptr, // stack
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -578,20 +744,22 @@ namespace vm
|
|||
{
|
||||
void init()
|
||||
{
|
||||
Memory.Init(Memory_PSP);
|
||||
g_locations =
|
||||
{
|
||||
std::make_shared<block_t>(0x08000000, 0x02000000), // RAM
|
||||
std::make_shared<block_t>(0x08800000, 0x01800000), // user
|
||||
std::make_shared<block_t>(0x04000000, 0x00200000), // VRAM
|
||||
nullptr, // stack
|
||||
|
||||
std::make_shared<block_t>(0x00010000, 0x00004000), // scratchpad
|
||||
std::make_shared<block_t>(0x88000000, 0x00800000), // kernel
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
location_info g_locations[memory_location_count] =
|
||||
{
|
||||
{ 0x00010000, 0x1FFF0000, ps3::main_alloc, ps3::main_fixed_alloc, ps3::main_dealloc },
|
||||
{ 0x20000000, 0x10000000, ps3::user_space_alloc, ps3::user_space_fixed_alloc, ps3::user_space_dealloc },
|
||||
{ 0xD0000000, 0x10000000, ps3::stack_alloc, ps3::stack_fixed_alloc, ps3::stack_dealloc },
|
||||
};
|
||||
|
||||
void close()
|
||||
{
|
||||
Memory.Close();
|
||||
g_locations.clear();
|
||||
}
|
||||
|
||||
u32 stack_push(CPUThread& CPU, u32 size, u32 align_v, u32& old_pos)
|
||||
|
|
|
@ -1,20 +1,23 @@
|
|||
#pragma once
|
||||
#include "Memory.h"
|
||||
|
||||
#include "stdafx.h"
|
||||
|
||||
class CPUThread;
|
||||
|
||||
namespace vm
|
||||
{
|
||||
extern void* g_base_addr; // base address of ps3/psv virtual memory for common access
|
||||
extern void* const g_base_addr; // base address of ps3/psv virtual memory for common access
|
||||
extern void* g_priv_addr; // base address of ps3/psv virtual memory for privileged access
|
||||
|
||||
enum memory_location : uint
|
||||
enum memory_location_t : uint
|
||||
{
|
||||
main,
|
||||
user_space,
|
||||
video,
|
||||
stack,
|
||||
|
||||
memory_location_count
|
||||
|
||||
memory_location_max,
|
||||
any = 0xffffffff,
|
||||
};
|
||||
|
||||
enum page_info_t : u8
|
||||
|
@ -29,40 +32,82 @@ namespace vm
|
|||
page_allocated = (1 << 7),
|
||||
};
|
||||
|
||||
static void set_stack_size(u32 size) {}
|
||||
static void initialize_stack() {}
|
||||
// Unconditionally break the reservation at specified address
|
||||
void reservation_break(u32 addr);
|
||||
|
||||
// break the reservation, return true if it was successfully broken
|
||||
bool reservation_break(u32 addr);
|
||||
// read memory and reserve it for further atomic update, return true if the previous reservation was broken
|
||||
bool reservation_acquire(void* data, u32 addr, u32 size, std::function<void()> callback = nullptr);
|
||||
// same as reservation_acquire but does not have the callback argument
|
||||
// used by the PPU LLVM JIT since creating a std::function object in LLVM IR is too complicated
|
||||
bool reservation_acquire_no_cb(void* data, u32 addr, u32 size);
|
||||
// attempt to atomically update reserved memory
|
||||
// Reserve memory at the specified address for further atomic update
|
||||
void reservation_acquire(void* data, u32 addr, u32 size);
|
||||
|
||||
// Attempt to atomically update previously reserved memory
|
||||
bool reservation_update(u32 addr, const void* data, u32 size);
|
||||
// for internal use
|
||||
|
||||
// Process a memory access error if it's caused by the reservation
|
||||
bool reservation_query(u32 addr, u32 size, bool is_writing, std::function<bool()> callback);
|
||||
// for internal use
|
||||
|
||||
// Break all reservations created by the current thread
|
||||
void reservation_free();
|
||||
// perform complete operation
|
||||
|
||||
// Perform atomic operation unconditionally
|
||||
void reservation_op(u32 addr, u32 size, std::function<void()> proc);
|
||||
|
||||
// for internal use
|
||||
void page_map(u32 addr, u32 size, u8 flags);
|
||||
// for internal use
|
||||
// Change memory protection of specified memory region
|
||||
bool page_protect(u32 addr, u32 size, u8 flags_test = 0, u8 flags_set = 0, u8 flags_clear = 0);
|
||||
// for internal use
|
||||
void page_unmap(u32 addr, u32 size);
|
||||
|
||||
// unsafe address check
|
||||
// Check if existing memory range is allocated. Checking address before using it is very unsafe.
|
||||
// Return value may be wrong. Even if it's true and correct, actual memory protection may be read-only and no-access.
|
||||
bool check_addr(u32 addr, u32 size = 1);
|
||||
|
||||
bool map(u32 addr, u32 size, u32 flags);
|
||||
bool unmap(u32 addr, u32 size = 0, u32 flags = 0);
|
||||
u32 alloc(u32 size, memory_location location = user_space);
|
||||
u32 alloc(u32 addr, u32 size, memory_location location = user_space);
|
||||
void dealloc(u32 addr, memory_location location = user_space);
|
||||
// Search and map memory in specified memory location (don't pass alignment smaller than 4096)
|
||||
u32 alloc(u32 size, memory_location_t location, u32 align = 4096);
|
||||
|
||||
// Map memory at specified address (in optionally specified memory location)
|
||||
u32 falloc(u32 addr, u32 size, memory_location_t location = any);
|
||||
|
||||
// Unmap memory at specified address (in optionally specified memory location)
|
||||
bool dealloc(u32 addr, memory_location_t location = any);
|
||||
|
||||
class block_t
|
||||
{
|
||||
std::map<u32, u32> m_map; // addr -> size mapping of mapped locations
|
||||
std::mutex m_mutex;
|
||||
|
||||
bool try_alloc(u32 addr, u32 size);
|
||||
|
||||
public:
|
||||
block_t() = delete;
|
||||
|
||||
block_t(u32 addr, u32 size)
|
||||
: addr(addr)
|
||||
, size(size)
|
||||
{
|
||||
}
|
||||
|
||||
~block_t();
|
||||
|
||||
public:
|
||||
const u32 addr; // start address
|
||||
const u32 size; // total size
|
||||
|
||||
atomic_t<u32> used{}; // amount of memory used, may be increased manually prevent some memory from allocating
|
||||
|
||||
// Search and map memory (don't pass alignment smaller than 4096)
|
||||
u32 alloc(u32 size, u32 align = 4096);
|
||||
|
||||
// Try to map memory at fixed location
|
||||
u32 falloc(u32 addr, u32 size);
|
||||
|
||||
// Unmap memory at specified location previously returned by alloc()
|
||||
bool dealloc(u32 addr);
|
||||
};
|
||||
|
||||
// create new memory block with specified parameters and return it
|
||||
std::shared_ptr<block_t> map(u32 addr, u32 size, u32 flags);
|
||||
|
||||
// delete existing memory block with specified start address
|
||||
std::shared_ptr<block_t> unmap(u32 addr);
|
||||
|
||||
// get memory block associated with optionally specified memory location or optionally specified address
|
||||
std::shared_ptr<block_t> get(memory_location_t location, u32 addr = 0);
|
||||
|
||||
template<typename T = void> T* get_ptr(u32 addr)
|
||||
{
|
||||
|
@ -316,39 +361,6 @@ namespace vm
|
|||
|
||||
namespace vm
|
||||
{
|
||||
struct location_info
|
||||
{
|
||||
u32 addr_offset;
|
||||
u32 size;
|
||||
|
||||
u32(*allocator)(u32 size);
|
||||
u32(*fixed_allocator)(u32 addr, u32 size);
|
||||
void(*deallocator)(u32 addr);
|
||||
|
||||
u32 alloc_offset;
|
||||
|
||||
template<typename T = char>
|
||||
_ptr_base<T> alloc(u32 count = 1) const
|
||||
{
|
||||
return{ allocator(count * sizeof32(T)) };
|
||||
}
|
||||
|
||||
template<typename T = char>
|
||||
_ptr_base<T> fixed_alloc(u32 addr, u32 count = 1) const
|
||||
{
|
||||
return{ fixed_allocator(addr, count * sizeof32(T)) };
|
||||
}
|
||||
};
|
||||
|
||||
extern location_info g_locations[memory_location_count];
|
||||
|
||||
template<memory_location location = main>
|
||||
location_info& get()
|
||||
{
|
||||
assert(location < memory_location_count);
|
||||
return g_locations[location];
|
||||
}
|
||||
|
||||
class stack
|
||||
{
|
||||
u32 m_begin;
|
||||
|
|
|
@ -317,7 +317,7 @@ public:
|
|||
|
||||
size_t size = f.size();
|
||||
vm::ps3::init();
|
||||
ptr = vm::alloc(size);
|
||||
ptr = vm::alloc(size, vm::main);
|
||||
f.read(vm::get_ptr(ptr), size);
|
||||
}
|
||||
|
||||
|
|
|
@ -31,12 +31,12 @@ u32 GetAddress(u32 offset, u32 location)
|
|||
{
|
||||
case CELL_GCM_LOCATION_LOCAL:
|
||||
{
|
||||
res = (u32)Memory.RSXFBMem.GetStartAddr() + offset;
|
||||
res = 0xC0000000 + offset;
|
||||
break;
|
||||
}
|
||||
case CELL_GCM_LOCATION_MAIN:
|
||||
{
|
||||
res = (u32)Memory.RSXIOMem.RealAddr(offset); // TODO: Error Check?
|
||||
res = RSXIOMem.RealAddr(offset); // TODO: Error Check?
|
||||
if (res == 0)
|
||||
{
|
||||
throw EXCEPTION("RSXIO memory not mapped (offset=0x%x)", offset);
|
||||
|
@ -2554,7 +2554,7 @@ void RSXThread::Task()
|
|||
continue;
|
||||
}
|
||||
|
||||
auto args = vm::ptr<u32>::make((u32)Memory.RSXIOMem.RealAddr(get + 4));
|
||||
auto args = vm::ptr<u32>::make((u32)RSXIOMem.RealAddr(get + 4));
|
||||
|
||||
for (u32 i = 0; i < count; i++)
|
||||
{
|
||||
|
@ -2595,7 +2595,7 @@ u32 RSXThread::ReadIO32(u32 addr)
|
|||
{
|
||||
u32 value;
|
||||
|
||||
if (!Memory.RSXIOMem.Read32(addr, &value))
|
||||
if (!RSXIOMem.Read32(addr, &value))
|
||||
{
|
||||
throw EXCEPTION("RSXIO memory not mapped (addr=0x%x)", addr);
|
||||
}
|
||||
|
@ -2605,7 +2605,7 @@ u32 RSXThread::ReadIO32(u32 addr)
|
|||
|
||||
void RSXThread::WriteIO32(u32 addr, u32 value)
|
||||
{
|
||||
if (!Memory.RSXIOMem.Write32(addr, value))
|
||||
if (!RSXIOMem.Write32(addr, value))
|
||||
{
|
||||
throw EXCEPTION("RSXIO memory not mapped (addr=0x%x)", addr);
|
||||
}
|
||||
|
|
|
@ -80,7 +80,7 @@ void CallbackManager::Init()
|
|||
}
|
||||
};
|
||||
|
||||
if (Memory.PSV.RAM.GetStartAddr())
|
||||
if (vm::get(vm::main)->addr != 0x10000)
|
||||
{
|
||||
auto thread = Emu.GetIdManager().make_ptr<ARMv7Thread>("Callback Thread");
|
||||
|
||||
|
|
|
@ -41,8 +41,8 @@ s32 cellAudioInit()
|
|||
g_audio.start_time = get_system_time();
|
||||
|
||||
// alloc memory (only once until the emulator is stopped)
|
||||
g_audio.buffer = g_audio.buffer ? g_audio.buffer : Memory.MainMem.AllocAlign(AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT, 4096);
|
||||
g_audio.indexes = g_audio.indexes ? g_audio.indexes : Memory.MainMem.AllocAlign(sizeof32(u64) * AUDIO_PORT_COUNT, alignof32(u64));
|
||||
g_audio.buffer = g_audio.buffer ? g_audio.buffer : vm::alloc(AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT, vm::main);
|
||||
g_audio.indexes = g_audio.indexes ? g_audio.indexes : vm::alloc(sizeof32(u64) * AUDIO_PORT_COUNT, vm::main);
|
||||
|
||||
// clear memory
|
||||
memset(vm::get_ptr<void>(g_audio.buffer), 0, AUDIO_PORT_OFFSET * AUDIO_PORT_COUNT);
|
||||
|
|
|
@ -90,7 +90,7 @@ s32 cellFontOpenFontFile(vm::ptr<CellFontLibrary> library, vm::cptr<char> fontPa
|
|||
return CELL_FONT_ERROR_FONT_OPEN_FAILED;
|
||||
|
||||
u32 fileSize = (u32)f.GetSize();
|
||||
u32 bufferAddr = (u32)Memory.Alloc(fileSize, 1); // Freed in cellFontCloseFont
|
||||
u32 bufferAddr = vm::alloc(fileSize, vm::main); // Freed in cellFontCloseFont
|
||||
f.Read(vm::get_ptr<void>(bufferAddr), fileSize);
|
||||
s32 ret = cellFontOpenFontMemory(library, bufferAddr, fileSize, subNum, uniqueId, font);
|
||||
font->origin = CELL_FONT_OPEN_FONT_FILE;
|
||||
|
@ -222,7 +222,7 @@ void cellFontRenderSurfaceInit(vm::ptr<CellFontRenderSurface> surface, vm::ptr<v
|
|||
surface->height = h;
|
||||
|
||||
if (!buffer)
|
||||
surface->buffer_addr = (u32)Memory.Alloc(bufferWidthByte * h, 1); // TODO: Huge memory leak
|
||||
surface->buffer_addr = vm::alloc(bufferWidthByte * h, vm::main); // TODO: Huge memory leak
|
||||
}
|
||||
|
||||
void cellFontRenderSurfaceSetScissor(vm::ptr<CellFontRenderSurface> surface, s32 x0, s32 y0, s32 w, s32 h)
|
||||
|
@ -385,7 +385,7 @@ s32 cellFontCloseFont(vm::ptr<CellFont> font)
|
|||
if (font->origin == CELL_FONT_OPEN_FONTSET ||
|
||||
font->origin == CELL_FONT_OPEN_FONT_FILE ||
|
||||
font->origin == CELL_FONT_OPEN_MEMORY)
|
||||
Memory.Free(font->fontdata_addr);
|
||||
vm::dealloc(font->fontdata_addr, vm::main);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -16,7 +16,7 @@ s32 cellFontInitLibraryFreeTypeWithRevision(u64 revisionFlags, vm::ptr<CellFontL
|
|||
//if (s_fontInternalInstance->m_bInitialized)
|
||||
//return CELL_FONT_ERROR_UNINITIALIZED;
|
||||
|
||||
lib->set(Memory.Alloc(sizeof(CellFontLibrary), 1));
|
||||
lib->set(vm::alloc(sizeof(CellFontLibrary), vm::main));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -63,8 +63,8 @@ CellGcmOffsetTable offsetTable;
|
|||
|
||||
void InitOffsetTable()
|
||||
{
|
||||
offsetTable.ioAddress.set((u32)Memory.Alloc(3072 * sizeof(u16), 1));
|
||||
offsetTable.eaAddress.set((u32)Memory.Alloc(512 * sizeof(u16), 1));
|
||||
offsetTable.ioAddress.set(vm::alloc(3072 * sizeof(u16), vm::main));
|
||||
offsetTable.eaAddress.set(vm::alloc(512 * sizeof(u16), vm::main));
|
||||
|
||||
memset(offsetTable.ioAddress.get_ptr(), 0xFF, 3072 * sizeof(u16));
|
||||
memset(offsetTable.eaAddress.get_ptr(), 0xFF, 512 * sizeof(u16));
|
||||
|
@ -89,7 +89,7 @@ vm::ptr<CellGcmReportData> cellGcmGetReportDataAddressLocation(u32 index, u32 lo
|
|||
cellGcmSys.Error("cellGcmGetReportDataAddressLocation: Wrong local index (%d)", index);
|
||||
return vm::null;
|
||||
}
|
||||
return vm::ptr<CellGcmReportData>::make((u32)Memory.RSXFBMem.GetStartAddr() + index * 0x10);
|
||||
return vm::ptr<CellGcmReportData>::make(0xC0000000 + index * 0x10);
|
||||
}
|
||||
|
||||
if (location == CELL_GCM_LOCATION_MAIN) {
|
||||
|
@ -113,7 +113,7 @@ u64 cellGcmGetTimeStamp(u32 index)
|
|||
cellGcmSys.Error("cellGcmGetTimeStamp: Wrong local index (%d)", index);
|
||||
return 0;
|
||||
}
|
||||
return vm::read64(Memory.RSXFBMem.GetStartAddr() + index * 0x10);
|
||||
return vm::read64(0xC0000000 + index * 0x10);
|
||||
}
|
||||
|
||||
s32 cellGcmGetCurrentField()
|
||||
|
@ -140,7 +140,7 @@ u32 cellGcmGetNotifyDataAddress(u32 index)
|
|||
*/
|
||||
vm::ptr<CellGcmReportData> _cellGcmFunc12()
|
||||
{
|
||||
return vm::ptr<CellGcmReportData>::make(Memory.RSXFBMem.GetStartAddr()); // TODO
|
||||
return vm::ptr<CellGcmReportData>::make(0xC0000000); // TODO
|
||||
}
|
||||
|
||||
u32 cellGcmGetReport(u32 type, u32 index)
|
||||
|
@ -168,7 +168,7 @@ u32 cellGcmGetReportDataAddress(u32 index)
|
|||
cellGcmSys.Error("cellGcmGetReportDataAddress: Wrong local index (%d)", index);
|
||||
return 0;
|
||||
}
|
||||
return (u32)Memory.RSXFBMem.GetStartAddr() + index * 0x10;
|
||||
return 0xC0000000 + index * 0x10;
|
||||
}
|
||||
|
||||
u32 cellGcmGetReportDataLocation(u32 index, u32 location)
|
||||
|
@ -188,7 +188,7 @@ u64 cellGcmGetTimeStampLocation(u32 index, u32 location)
|
|||
cellGcmSys.Error("cellGcmGetTimeStampLocation: Wrong local index (%d)", index);
|
||||
return 0;
|
||||
}
|
||||
return vm::read64(Memory.RSXFBMem.GetStartAddr() + index * 0x10);
|
||||
return vm::read64(0xC0000000 + index * 0x10);
|
||||
}
|
||||
|
||||
if (location == CELL_GCM_LOCATION_MAIN) {
|
||||
|
@ -327,8 +327,8 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
|||
if(!local_size && !local_addr)
|
||||
{
|
||||
local_size = 0xf900000; // TODO: Get sdk_version in _cellGcmFunc15 and pass it to gcmGetLocalMemorySize
|
||||
local_addr = (u32)Memory.RSXFBMem.GetStartAddr();
|
||||
Memory.RSXFBMem.AllocAlign(local_size);
|
||||
local_addr = 0xC0000000;
|
||||
vm::falloc(0xC0000000, local_size, vm::video);
|
||||
}
|
||||
|
||||
cellGcmSys.Warning("*** local memory(addr=0x%x, size=0x%x)", local_addr, local_size);
|
||||
|
@ -337,12 +337,12 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
|||
if (system_mode == CELL_GCM_SYSTEM_MODE_IOMAP_512MB)
|
||||
{
|
||||
cellGcmSys.Warning("cellGcmInit(): 512MB io address space used");
|
||||
Memory.RSXIOMem.SetRange(0, 0x20000000 /*512MB*/);
|
||||
RSXIOMem.SetRange(0, 0x20000000 /*512MB*/);
|
||||
}
|
||||
else
|
||||
{
|
||||
cellGcmSys.Warning("cellGcmInit(): 256MB io address space used");
|
||||
Memory.RSXIOMem.SetRange(0, 0x10000000 /*256MB*/);
|
||||
RSXIOMem.SetRange(0, 0x10000000 /*256MB*/);
|
||||
}
|
||||
|
||||
if (gcmMapEaIoAddress(ioAddress, 0, ioSize, false) != CELL_OK)
|
||||
|
@ -370,10 +370,10 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
|||
current_context.current = current_context.begin;
|
||||
current_context.callback.set(Emu.GetRSXCallback() - 4);
|
||||
|
||||
gcm_info.context_addr = Memory.MainMem.AllocAlign(0x1000);
|
||||
gcm_info.context_addr = vm::alloc(0x1000, vm::main);
|
||||
gcm_info.control_addr = gcm_info.context_addr + 0x40;
|
||||
|
||||
gcm_info.label_addr = Memory.MainMem.AllocAlign(0x1000); // ???
|
||||
gcm_info.label_addr = vm::alloc(0x1000, vm::main); // ???
|
||||
|
||||
vm::get_ref<CellGcmContextData>(gcm_info.context_addr) = current_context;
|
||||
vm::write32(context.addr(), gcm_info.context_addr);
|
||||
|
@ -385,9 +385,9 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
|||
|
||||
auto& render = Emu.GetGSManager().GetRender();
|
||||
render.m_ctxt_addr = context.addr();
|
||||
render.m_gcm_buffers_addr = (u32)Memory.Alloc(sizeof(CellGcmDisplayInfo) * 8, sizeof(CellGcmDisplayInfo));
|
||||
render.m_zculls_addr = (u32)Memory.Alloc(sizeof(CellGcmZcullInfo) * 8, sizeof(CellGcmZcullInfo));
|
||||
render.m_tiles_addr = (u32)Memory.Alloc(sizeof(CellGcmTileInfo) * 15, sizeof(CellGcmTileInfo));
|
||||
render.m_gcm_buffers_addr = vm::alloc(sizeof(CellGcmDisplayInfo) * 8, vm::main);
|
||||
render.m_zculls_addr = vm::alloc(sizeof(CellGcmZcullInfo) * 8, vm::main);
|
||||
render.m_tiles_addr = vm::alloc(sizeof(CellGcmTileInfo) * 15, vm::main);
|
||||
render.m_gcm_buffers_count = 0;
|
||||
render.m_gcm_current_buffer = 0;
|
||||
render.m_main_mem_addr = 0;
|
||||
|
@ -816,9 +816,9 @@ s32 cellGcmAddressToOffset(u32 address, vm::ptr<u32> offset)
|
|||
u32 result;
|
||||
|
||||
// Address in local memory
|
||||
if (Memory.RSXFBMem.IsInMyRange(address))
|
||||
if ((address >> 28) == 0xC)
|
||||
{
|
||||
result = address - Memory.RSXFBMem.GetStartAddr();
|
||||
result = address - 0xC0000000;
|
||||
}
|
||||
// Address in main memory else check
|
||||
else
|
||||
|
@ -844,7 +844,7 @@ u32 cellGcmGetMaxIoMapSize()
|
|||
{
|
||||
cellGcmSys.Log("cellGcmGetMaxIoMapSize()");
|
||||
|
||||
return (u32)(Memory.RSXIOMem.GetEndAddr() - Memory.RSXIOMem.GetReservedAmount());
|
||||
return RSXIOMem.GetSize() - RSXIOMem.GetReservedAmount();
|
||||
}
|
||||
|
||||
void cellGcmGetOffsetTable(vm::ptr<CellGcmOffsetTable> table)
|
||||
|
@ -861,7 +861,7 @@ s32 cellGcmIoOffsetToAddress(u32 ioOffset, vm::ptr<u32> address)
|
|||
|
||||
u32 realAddr;
|
||||
|
||||
if (!Memory.RSXIOMem.getRealAddr(ioOffset, realAddr))
|
||||
if (!RSXIOMem.getRealAddr(ioOffset, realAddr))
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
*address = realAddr;
|
||||
|
@ -874,7 +874,7 @@ s32 gcmMapEaIoAddress(u32 ea, u32 io, u32 size, bool is_strict)
|
|||
if ((ea & 0xFFFFF) || (io & 0xFFFFF) || (size & 0xFFFFF)) return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
// Check if the mapping was successfull
|
||||
if (Memory.RSXIOMem.Map(ea, size, io))
|
||||
if (RSXIOMem.Map(ea, size, io))
|
||||
{
|
||||
// Fill the offset table
|
||||
for (u32 i = 0; i<(size >> 20); i++)
|
||||
|
@ -913,7 +913,7 @@ s32 cellGcmMapLocalMemory(vm::ptr<u32> address, vm::ptr<u32> size)
|
|||
{
|
||||
cellGcmSys.Warning("cellGcmMapLocalMemory(address=*0x%x, size=*0x%x)", address, size);
|
||||
|
||||
if (!local_addr && !local_size && Memory.RSXFBMem.AllocFixed(local_addr = Memory.RSXFBMem.GetStartAddr(), local_size = 0xf900000 /* TODO */))
|
||||
if (!local_addr && !local_size && vm::falloc(local_addr = 0xC0000000, local_size = 0xf900000 /* TODO */, vm::video))
|
||||
{
|
||||
*address = local_addr;
|
||||
*size = local_size;
|
||||
|
@ -933,10 +933,10 @@ s32 cellGcmMapMainMemory(u32 ea, u32 size, vm::ptr<u32> offset)
|
|||
|
||||
if ((ea & 0xFFFFF) || (size & 0xFFFFF)) return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
u32 io = Memory.RSXIOMem.Map(ea, size);
|
||||
u32 io = RSXIOMem.Map(ea, size);
|
||||
|
||||
//check if the mapping was successfull
|
||||
if (Memory.RSXIOMem.RealAddr(io) == ea)
|
||||
if (RSXIOMem.RealAddr(io) == ea)
|
||||
{
|
||||
//fill the offset table
|
||||
for (u32 i = 0; i<(size >> 20); i++)
|
||||
|
@ -975,7 +975,7 @@ s32 cellGcmReserveIoMapSize(u32 size)
|
|||
return CELL_GCM_ERROR_INVALID_VALUE;
|
||||
}
|
||||
|
||||
Memory.RSXIOMem.Reserve(size);
|
||||
RSXIOMem.Reserve(size);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -984,7 +984,7 @@ s32 cellGcmUnmapEaIoAddress(u32 ea)
|
|||
cellGcmSys.Log("cellGcmUnmapEaIoAddress(ea=0x%x)", ea);
|
||||
|
||||
u32 size;
|
||||
if (Memory.RSXIOMem.UnmapRealAddress(ea, size))
|
||||
if (RSXIOMem.UnmapRealAddress(ea, size))
|
||||
{
|
||||
const u32 io = offsetTable.ioAddress[ea >>= 20];
|
||||
|
||||
|
@ -1008,7 +1008,7 @@ s32 cellGcmUnmapIoAddress(u32 io)
|
|||
cellGcmSys.Log("cellGcmUnmapIoAddress(io=0x%x)", io);
|
||||
|
||||
u32 size;
|
||||
if (Memory.RSXIOMem.UnmapAddress(io, size))
|
||||
if (RSXIOMem.UnmapAddress(io, size))
|
||||
{
|
||||
const u32 ea = offsetTable.eaAddress[io >>= 20];
|
||||
|
||||
|
@ -1037,13 +1037,13 @@ s32 cellGcmUnreserveIoMapSize(u32 size)
|
|||
return CELL_GCM_ERROR_INVALID_ALIGNMENT;
|
||||
}
|
||||
|
||||
if (size > Memory.RSXIOMem.GetReservedAmount())
|
||||
if (size > RSXIOMem.GetReservedAmount())
|
||||
{
|
||||
cellGcmSys.Error("cellGcmReserveIoMapSize : CELL_GCM_ERROR_INVALID_VALUE");
|
||||
return CELL_GCM_ERROR_INVALID_VALUE;
|
||||
}
|
||||
|
||||
Memory.RSXIOMem.Unreserve(size);
|
||||
RSXIOMem.Unreserve(size);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -23,7 +23,7 @@ s32 pngDecCreate(
|
|||
vm::cptr<CellPngDecExtThreadInParam> ext = vm::null)
|
||||
{
|
||||
// alloc memory (should probably use param->cbCtrlMallocFunc)
|
||||
auto dec = CellPngDecMainHandle::make(Memory.Alloc(sizeof(PngDecoder), 128));
|
||||
auto dec = CellPngDecMainHandle::make(vm::alloc(sizeof(PngDecoder), vm::main));
|
||||
|
||||
if (!dec)
|
||||
{
|
||||
|
@ -48,7 +48,7 @@ s32 pngDecCreate(
|
|||
|
||||
s32 pngDecDestroy(CellPngDecMainHandle dec)
|
||||
{
|
||||
if (!Memory.Free(dec.addr()))
|
||||
if (!vm::dealloc(dec.addr(), vm::main))
|
||||
{
|
||||
return CELL_PNGDEC_ERROR_FATAL;
|
||||
}
|
||||
|
@ -65,7 +65,7 @@ s32 pngDecOpen(
|
|||
vm::cptr<CellPngDecOpnParam> param = vm::null)
|
||||
{
|
||||
// alloc memory (should probably use dec->malloc)
|
||||
auto stream = CellPngDecSubHandle::make(Memory.Alloc(sizeof(PngStream), 128));
|
||||
auto stream = CellPngDecSubHandle::make(vm::alloc(sizeof(PngStream), vm::main));
|
||||
|
||||
if (!stream)
|
||||
{
|
||||
|
@ -117,7 +117,7 @@ s32 pngDecClose(CellPngDecSubHandle stream)
|
|||
{
|
||||
Emu.GetIdManager().remove<lv2_file_t>(stream->fd);
|
||||
|
||||
if (!Memory.Free(stream.addr()))
|
||||
if (!vm::dealloc(stream.addr(), vm::main))
|
||||
{
|
||||
return CELL_PNGDEC_ERROR_FATAL;
|
||||
}
|
||||
|
|
|
@ -638,7 +638,7 @@ s32 cellSailPlayerCreateDescriptor(vm::ptr<CellSailPlayer> pSelf, s32 streamType
|
|||
cellSail.Warning("cellSailPlayerCreateDescriptor(pSelf_addr=0x%x, streamType=%d, pMediaInfo_addr=0x%x, pUri_addr=0x%x, ppDesc_addr=0x%x)", pSelf.addr(), streamType,
|
||||
pMediaInfo.addr(), pUri.addr(), ppDesc.addr());
|
||||
|
||||
u32 descriptorAddress = Memory.Alloc(sizeof(CellSailDescriptor), 1);
|
||||
u32 descriptorAddress = vm::alloc(sizeof(CellSailDescriptor), vm::main);
|
||||
auto descriptor = vm::ptr<CellSailDescriptor>::make(descriptorAddress);
|
||||
*ppDesc = descriptorAddress;
|
||||
descriptor->streamType = streamType;
|
||||
|
@ -657,11 +657,11 @@ s32 cellSailPlayerCreateDescriptor(vm::ptr<CellSailPlayer> pSelf, s32 streamType
|
|||
vfsFile f;
|
||||
if (f.Open(path)) {
|
||||
u64 size = f.GetSize();
|
||||
u32 buf_ = Memory.Alloc(size, 1);
|
||||
u32 buf_ = vm::alloc(size, vm::main);
|
||||
auto bufPtr = vm::cptr<PamfHeader>::make(buf_);
|
||||
PamfHeader *buf = const_cast<PamfHeader*>(bufPtr.get_ptr());
|
||||
assert(f.Read(buf, size) == size);
|
||||
u32 sp_ = Memory.Alloc(sizeof(CellPamfReader), 1);
|
||||
u32 sp_ = vm::alloc(sizeof(CellPamfReader), vm::main);
|
||||
auto sp = vm::ptr<CellPamfReader>::make(sp_);
|
||||
u32 r = cellPamfReaderInitialize(sp, bufPtr, size, 0);
|
||||
|
||||
|
|
|
@ -1078,7 +1078,7 @@ s32 spursInit(
|
|||
|
||||
// Import SPURS kernel
|
||||
spurs->spuImg.type = SYS_SPU_IMAGE_TYPE_USER;
|
||||
spurs->spuImg.addr = (u32)Memory.Alloc(0x40000, 4096);
|
||||
spurs->spuImg.addr = vm::alloc(0x40000, vm::main);
|
||||
spurs->spuImg.entry_point = isSecond ? CELL_SPURS_KERNEL2_ENTRY_ADDR : CELL_SPURS_KERNEL1_ENTRY_ADDR;
|
||||
spurs->spuImg.nsegs = 1;
|
||||
|
||||
|
|
|
@ -607,7 +607,7 @@ void spursSysServiceIdleHandler(SPUThread & spu, SpursKernelContext * ctxt) {
|
|||
std::unique_lock<std::mutex> lock(spu.mutex, std::defer_lock);
|
||||
|
||||
while (true) {
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128, [&spu](){ spu.cv.notify_one(); });
|
||||
vm::reservation_acquire(vm::get_ptr(spu.offset + 0x100), VM_CAST(ctxt->spurs.addr()), 128);
|
||||
auto spurs = vm::get_ptr<CellSpurs>(spu.offset + 0x100);
|
||||
|
||||
// Find the number of SPUs that are idling in this SPURS instance
|
||||
|
|
|
@ -44,7 +44,7 @@ u32 ppu_get_tls(u32 thread)
|
|||
if (!g_tls_start)
|
||||
{
|
||||
g_tls_size = Emu.GetTLSMemsz() + TLS_SYS;
|
||||
g_tls_start = Memory.MainMem.AllocAlign(g_tls_size * TLS_MAX, 4096); // memory for up to TLS_MAX threads
|
||||
g_tls_start = vm::alloc(g_tls_size * TLS_MAX, vm::main); // memory for up to TLS_MAX threads
|
||||
sysPrxForUser.Notice("Thread Local Storage initialized (g_tls_start=0x%x, user_size=0x%x)\n*** TLS segment addr: 0x%08x\n*** TLS segment size: 0x%08x",
|
||||
g_tls_start, Emu.GetTLSMemsz(), Emu.GetTLSAddr(), Emu.GetTLSFilesz());
|
||||
}
|
||||
|
@ -788,21 +788,21 @@ u32 _sys_heap_malloc(u32 heap, u32 size)
|
|||
{
|
||||
sysPrxForUser.Warning("_sys_heap_malloc(heap=0x%x, size=0x%x)", heap, size);
|
||||
|
||||
return Memory.MainMem.AllocAlign(size, 1);
|
||||
return vm::alloc(size, vm::main);
|
||||
}
|
||||
|
||||
u32 _sys_heap_memalign(u32 heap, u32 align, u32 size)
|
||||
{
|
||||
sysPrxForUser.Warning("_sys_heap_memalign(heap=0x%x, align=0x%x, size=0x%x)", heap, align, size);
|
||||
|
||||
return Memory.MainMem.AllocAlign(size, align);
|
||||
return vm::alloc(size, vm::main, std::max<u32>(align, 4096));
|
||||
}
|
||||
|
||||
s32 _sys_heap_free(u32 heap, u32 addr)
|
||||
{
|
||||
sysPrxForUser.Warning("_sys_heap_free(heap=0x%x, addr=0x%x)", heap, addr);
|
||||
|
||||
Memory.MainMem.Free(addr);
|
||||
vm::dealloc(addr, vm::main);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -831,7 +831,7 @@ s32 sys_process_is_stack(u32 p)
|
|||
sysPrxForUser.Log("sys_process_is_stack(p=0x%x)", p);
|
||||
|
||||
// prx: compare high 4 bits with "0xD"
|
||||
return (p >= Memory.StackMem.GetStartAddr() && p <= Memory.StackMem.GetEndAddr()) ? 1 : 0;
|
||||
return (p >> 28) == 0xD;
|
||||
}
|
||||
|
||||
s64 sys_prx_exitspawn_with_level()
|
||||
|
@ -1121,21 +1121,21 @@ u32 _sys_malloc(u32 size)
|
|||
{
|
||||
sysPrxForUser.Warning("_sys_malloc(size=0x%x)", size);
|
||||
|
||||
return Memory.MainMem.AllocAlign(size, 1);
|
||||
return vm::alloc(size, vm::main);
|
||||
}
|
||||
|
||||
u32 _sys_memalign(u32 align, u32 size)
|
||||
{
|
||||
sysPrxForUser.Warning("_sys_memalign(align=0x%x, size=0x%x)", align, size);
|
||||
|
||||
return Memory.MainMem.AllocAlign(size, align);
|
||||
return vm::alloc(size, vm::main, std::max<u32>(align, 4096));
|
||||
}
|
||||
|
||||
s32 _sys_free(u32 addr)
|
||||
{
|
||||
sysPrxForUser.Warning("_sys_free(addr=0x%x)", addr);
|
||||
|
||||
Memory.MainMem.Free(addr);
|
||||
vm::dealloc(addr, vm::main);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -86,11 +86,7 @@ s32 getLastError()
|
|||
#endif
|
||||
}
|
||||
|
||||
#ifdef _WIN32
|
||||
using pck_len_t = s32;
|
||||
#else
|
||||
using pck_len_t = u32;
|
||||
#endif
|
||||
|
||||
namespace sys_net_func
|
||||
{
|
||||
|
@ -108,7 +104,7 @@ namespace sys_net_func
|
|||
sockaddr _addr;
|
||||
memcpy(&_addr, addr.get_ptr(), sizeof(sockaddr));
|
||||
_addr.sa_family = addr->sa_family;
|
||||
pck_len_t _paddrlen;
|
||||
int _paddrlen;
|
||||
s32 ret = ::accept(s, &_addr, &_paddrlen);
|
||||
*paddrlen = _paddrlen;
|
||||
*g_lastError = getLastError();
|
||||
|
@ -257,7 +253,7 @@ namespace sys_net_func
|
|||
sockaddr _addr;
|
||||
memcpy(&_addr, addr.get_ptr(), sizeof(sockaddr));
|
||||
_addr.sa_family = addr->sa_family;
|
||||
pck_len_t _paddrlen;
|
||||
int _paddrlen;
|
||||
s32 ret = ::recvfrom(s, buf.get_ptr(), len, flags, &_addr, &_paddrlen);
|
||||
*paddrlen = _paddrlen;
|
||||
*g_lastError = getLastError();
|
||||
|
@ -417,7 +413,7 @@ namespace sys_net_func
|
|||
s32 sys_net_initialize_network_ex(vm::ptr<sys_net_initialize_parameter> param)
|
||||
{
|
||||
sys_net.Warning("sys_net_initialize_network_ex(param=*0x%x)", param);
|
||||
g_lastError = vm::ptr<s32>::make((u32)Memory.Alloc(4, 1));
|
||||
g_lastError = vm::ptr<s32>::make(vm::alloc(4, vm::main));
|
||||
#ifdef _WIN32
|
||||
WSADATA wsaData;
|
||||
WORD wVersionRequested = MAKEWORD(1, 1);
|
||||
|
@ -549,7 +545,7 @@ s32 sys_net_show_ifconfig()
|
|||
s32 sys_net_finalize_network()
|
||||
{
|
||||
sys_net.Warning("sys_net_initialize_network_ex()");
|
||||
Memory.Free(g_lastError.addr());
|
||||
vm::dealloc(g_lastError.addr(), vm::main);
|
||||
g_lastError = vm::null;
|
||||
#ifdef _WIN32
|
||||
WSACleanup();
|
||||
|
|
|
@ -55,19 +55,21 @@ s32 sys_memory_allocate(u32 size, u64 flags, vm::ptr<u32> alloc_addr)
|
|||
// Check all containers
|
||||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
||||
{
|
||||
available += ct->size - ct->taken;
|
||||
available += ct->size - ct->used;
|
||||
}
|
||||
|
||||
const auto area = vm::get(vm::user_space);
|
||||
|
||||
// Check available memory
|
||||
if (Memory.GetUserMemAvailSize() < available + size)
|
||||
if (area->size < area->used.load() + available + size)
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
// Allocate memory
|
||||
const u32 addr =
|
||||
flags == SYS_MEMORY_PAGE_SIZE_1M ? Memory.Alloc(size, 0x100000) :
|
||||
flags == SYS_MEMORY_PAGE_SIZE_64K ? Memory.Alloc(size, 0x10000) :
|
||||
flags == SYS_MEMORY_PAGE_SIZE_1M ? area->alloc(size, 0x100000) :
|
||||
flags == SYS_MEMORY_PAGE_SIZE_64K ? area->alloc(size, 0x10000) :
|
||||
throw EXCEPTION("Unexpected flags");
|
||||
|
||||
if (!addr)
|
||||
|
@ -124,21 +126,21 @@ s32 sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32
|
|||
}
|
||||
}
|
||||
|
||||
if (ct->taken > ct->size)
|
||||
if (ct->used > ct->size)
|
||||
{
|
||||
throw EXCEPTION("Unexpected amount of memory taken (0x%x, size=0x%x)", ct->taken.load(), ct->size);
|
||||
throw EXCEPTION("Unexpected amount of memory taken (0x%x, size=0x%x)", ct->used.load(), ct->size);
|
||||
}
|
||||
|
||||
// Check memory availability
|
||||
if (size > ct->size - ct->taken)
|
||||
if (size > ct->size - ct->used)
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
// Allocate memory
|
||||
const u32 addr =
|
||||
flags == SYS_MEMORY_PAGE_SIZE_1M ? Memory.Alloc(size, 0x100000) :
|
||||
flags == SYS_MEMORY_PAGE_SIZE_64K ? Memory.Alloc(size, 0x10000) :
|
||||
flags == SYS_MEMORY_PAGE_SIZE_1M ? vm::alloc(size, vm::user_space, 0x100000) :
|
||||
flags == SYS_MEMORY_PAGE_SIZE_64K ? vm::alloc(size, vm::user_space, 0x10000) :
|
||||
throw EXCEPTION("Unexpected flags");
|
||||
|
||||
if (!addr)
|
||||
|
@ -148,7 +150,7 @@ s32 sys_memory_allocate_from_container(u32 size, u32 cid, u64 flags, vm::ptr<u32
|
|||
|
||||
// Store the address and size in the container
|
||||
ct->allocs.emplace(addr, size);
|
||||
ct->taken += size;
|
||||
ct->used += size;
|
||||
|
||||
// Write back the start address of the allocated area.
|
||||
*alloc_addr = addr;
|
||||
|
@ -169,20 +171,20 @@ s32 sys_memory_free(u32 addr)
|
|||
|
||||
if (found != ct->allocs.end())
|
||||
{
|
||||
if (!Memory.Free(addr))
|
||||
if (!vm::dealloc(addr, vm::user_space))
|
||||
{
|
||||
throw EXCEPTION("Memory not deallocated (cid=0x%x, addr=0x%x, size=0x%x)", ct->id, addr, found->second);
|
||||
}
|
||||
|
||||
// Return memory size
|
||||
ct->taken -= found->second;
|
||||
ct->used -= found->second;
|
||||
ct->allocs.erase(found);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
if (!Memory.Free(addr))
|
||||
if (!vm::dealloc(addr, vm::user_space))
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
@ -217,12 +219,14 @@ s32 sys_memory_get_user_memory_size(vm::ptr<sys_memory_info_t> mem_info)
|
|||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
||||
{
|
||||
reserved += ct->size;
|
||||
available += ct->size - ct->taken;
|
||||
available += ct->size - ct->used;
|
||||
}
|
||||
|
||||
const auto area = vm::get(vm::user_space);
|
||||
|
||||
// Fetch the user memory available
|
||||
mem_info->total_user_memory = Memory.GetUserMemTotalSize() - reserved;
|
||||
mem_info->available_user_memory = Memory.GetUserMemAvailSize() - available;
|
||||
mem_info->total_user_memory = area->size - reserved;
|
||||
mem_info->available_user_memory = area->size - area->used.load() - available;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -248,11 +252,13 @@ s32 sys_memory_container_create(vm::ptr<u32> cid, u32 size)
|
|||
for (auto& ct : Emu.GetIdManager().get_all<lv2_memory_container_t>())
|
||||
{
|
||||
reserved += ct->size;
|
||||
available += ct->size - ct->taken;
|
||||
available += ct->size - ct->used;
|
||||
}
|
||||
|
||||
if (Memory.GetUserMemTotalSize() < reserved + size ||
|
||||
Memory.GetUserMemAvailSize() < available + size)
|
||||
const auto area = vm::get(vm::user_space);
|
||||
|
||||
if (area->size < reserved + size ||
|
||||
area->size - area->used.load() < available + size)
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
@ -277,7 +283,7 @@ s32 sys_memory_container_destroy(u32 cid)
|
|||
}
|
||||
|
||||
// Check if some memory is not deallocated (the container cannot be destroyed in this case)
|
||||
if (ct->taken)
|
||||
if (ct->used.load())
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
@ -301,7 +307,7 @@ s32 sys_memory_container_get_size(vm::ptr<sys_memory_info_t> mem_info, u32 cid)
|
|||
}
|
||||
|
||||
mem_info->total_user_memory = ct->size; // total container memory
|
||||
mem_info->available_user_memory = ct->size - ct->taken; // available container memory
|
||||
mem_info->available_user_memory = ct->size - ct->used.load(); // available container memory
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ struct lv2_memory_container_t
|
|||
const u32 id;
|
||||
|
||||
// amount of memory allocated
|
||||
std::atomic<u32> taken{ 0 };
|
||||
std::atomic<u32> used{ 0 };
|
||||
|
||||
// allocations (addr -> size)
|
||||
std::map<u32, u32> allocs;
|
||||
|
|
|
@ -42,7 +42,7 @@ s32 sys_mmapper_allocate_address(u64 size, u64 flags, u64 alignment, vm::ptr<u32
|
|||
{
|
||||
for (u32 addr = ::align(0x30000000, alignment); addr < 0xC0000000; addr += static_cast<u32>(alignment))
|
||||
{
|
||||
if (Memory.Map(addr, static_cast<u32>(size)))
|
||||
if (const auto area = vm::map(addr, static_cast<u32>(size), 0))
|
||||
{
|
||||
*alloc_addr = addr;
|
||||
|
||||
|
@ -63,7 +63,7 @@ s32 sys_mmapper_allocate_fixed_address()
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
if (!Memory.Map(0xB0000000, 0x10000000))
|
||||
if (!vm::map(0xB0000000, 0x10000000, 0))
|
||||
{
|
||||
return CELL_EEXIST;
|
||||
}
|
||||
|
@ -166,7 +166,7 @@ s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm:
|
|||
}
|
||||
}
|
||||
|
||||
if (ct->size - ct->taken < size)
|
||||
if (ct->size - ct->used < size)
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
@ -176,7 +176,7 @@ s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm:
|
|||
flags & SYS_MEMORY_PAGE_SIZE_64K ? 0x10000 :
|
||||
throw EXCEPTION("Unexpected");
|
||||
|
||||
ct->taken += size;
|
||||
ct->used += size;
|
||||
|
||||
// Generate a new mem ID
|
||||
*mem_id = Emu.GetIdManager().make<lv2_memory_t>(size, align, flags, ct);
|
||||
|
@ -197,19 +197,19 @@ s32 sys_mmapper_free_address(u32 addr)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto area = Memory.Get(addr);
|
||||
const auto area = vm::get(vm::any, addr);
|
||||
|
||||
if (!area)
|
||||
if (!area || addr != area->addr)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (area->GetUsedSize())
|
||||
if (area->used.load())
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
if (Memory.Unmap(addr))
|
||||
if (!vm::unmap(addr))
|
||||
{
|
||||
throw EXCEPTION("Unexpected (failed to unmap memory ad 0x%x)", addr);
|
||||
}
|
||||
|
@ -239,7 +239,7 @@ s32 sys_mmapper_free_memory(u32 mem_id)
|
|||
// Return physical memory to the container if necessary
|
||||
if (mem->ct)
|
||||
{
|
||||
mem->ct->taken -= mem->size;
|
||||
mem->ct->used -= mem->size;
|
||||
}
|
||||
|
||||
// Release the allocated memory and remove the ID
|
||||
|
@ -254,7 +254,7 @@ s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto area = Memory.Get(addr & 0xf0000000);
|
||||
const auto area = vm::get(vm::any, addr);
|
||||
|
||||
if (!area || addr < 0x30000000 || addr >= 0xC0000000)
|
||||
{
|
||||
|
@ -278,7 +278,7 @@ s32 sys_mmapper_map_memory(u32 addr, u32 mem_id, u64 flags)
|
|||
throw EXCEPTION("Already mapped (mem_id=0x%x, addr=0x%x)", mem_id, mem->addr.load());
|
||||
}
|
||||
|
||||
if (!area->AllocFixed(addr, mem->size))
|
||||
if (!area->falloc(addr, mem->size))
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
@ -294,9 +294,9 @@ s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u3
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto area = Memory.Get(start_addr);
|
||||
const auto area = vm::get(vm::any, start_addr);
|
||||
|
||||
if (!area || start_addr < 0x30000000 || start_addr >= 0xC0000000)
|
||||
if (!area || start_addr != area->addr || start_addr < 0x30000000 || start_addr >= 0xC0000000)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
@ -308,7 +308,7 @@ s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, vm::ptr<u3
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
const u32 addr = area->AllocAlign(mem->size, mem->align);
|
||||
const u32 addr = area->alloc(mem->size, mem->align);
|
||||
|
||||
if (!addr)
|
||||
{
|
||||
|
@ -326,9 +326,9 @@ s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto area = Memory.Get(addr);
|
||||
const auto area = vm::get(vm::any, addr);
|
||||
|
||||
if (!area || addr < 0x30000000 || addr >= 0xC0000000)
|
||||
if (!area || addr != area->addr || addr < 0x30000000 || addr >= 0xC0000000)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
@ -337,7 +337,7 @@ s32 sys_mmapper_unmap_memory(u32 addr, vm::ptr<u32> mem_id)
|
|||
{
|
||||
if (mem->addr == addr)
|
||||
{
|
||||
if (!area->Free(addr))
|
||||
if (!area->dealloc(addr))
|
||||
{
|
||||
throw EXCEPTION("Not mapped (mem_id=0x%x, addr=0x%x)", mem->id, addr);
|
||||
}
|
||||
|
|
|
@ -19,7 +19,10 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::p
|
|||
const u32 new_addr = vm::check_addr(0x60000000) ? 0x70000000 : 0x60000000;
|
||||
|
||||
// Map memory
|
||||
if (!Memory.Map(new_addr, vsize))
|
||||
const auto area = vm::map(new_addr, vsize, 0);
|
||||
|
||||
// Alloc memory
|
||||
if (!area || !area->alloc(vsize))
|
||||
{
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
@ -36,7 +39,7 @@ s32 sys_vm_unmap(u32 addr)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
if (!Memory.Unmap(addr))
|
||||
if (!vm::unmap(addr))
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
|
|
@ -402,7 +402,7 @@ void Emulator::Stop()
|
|||
GetModuleManager().Close();
|
||||
|
||||
CurGameInfo.Reset();
|
||||
Memory.Close();
|
||||
vm::close();
|
||||
|
||||
finalize_ppu_exec_map();
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ void KernelExplorer::Update()
|
|||
char name[4096];
|
||||
|
||||
m_tree->DeleteAllItems();
|
||||
const u32 total_memory_usage = Memory.GetUserMemTotalSize() - Memory.GetUserMemAvailSize();
|
||||
const u32 total_memory_usage = vm::get(vm::user_space)->used.load();
|
||||
|
||||
const auto& root = m_tree->AddRoot(fmt::Format("Process, ID = 0x00000001, Total Memory Usage = 0x%x (%0.2f MB)", total_memory_usage, (float)total_memory_usage / (1024 * 1024)));
|
||||
|
||||
|
|
|
@ -44,7 +44,8 @@ void MemoryStringSearcher::Search(wxCommandEvent& event)
|
|||
// Search the address space for the string
|
||||
u32 strIndex = 0;
|
||||
u32 numFound = 0;
|
||||
for (u32 addr = Memory.MainMem.GetStartAddr(); addr < Memory.MainMem.GetEndAddr(); addr++) {
|
||||
const auto area = vm::get(vm::main);
|
||||
for (u32 addr = area->addr; addr < area->addr + area->size; addr++) {
|
||||
if (!vm::check_addr(addr)) {
|
||||
strIndex = 0;
|
||||
continue;
|
||||
|
|
|
@ -333,7 +333,7 @@ void RSXDebugger::GoToGet(wxCommandEvent& event)
|
|||
if (!RSXReady()) return;
|
||||
auto ctrl = vm::get_ptr<CellGcmControl>(Emu.GetGSManager().GetRender().m_ctrlAddress);
|
||||
u32 realAddr;
|
||||
if (Memory.RSXIOMem.getRealAddr(ctrl->get.load(), realAddr)) {
|
||||
if (RSXIOMem.getRealAddr(ctrl->get.load(), realAddr)) {
|
||||
m_addr = realAddr;
|
||||
t_addr->SetValue(wxString::Format("%08x", m_addr));
|
||||
UpdateInformation();
|
||||
|
@ -347,7 +347,7 @@ void RSXDebugger::GoToPut(wxCommandEvent& event)
|
|||
if (!RSXReady()) return;
|
||||
auto ctrl = vm::get_ptr<CellGcmControl>(Emu.GetGSManager().GetRender().m_ctrlAddress);
|
||||
u32 realAddr;
|
||||
if (Memory.RSXIOMem.getRealAddr(ctrl->put.load(), realAddr)) {
|
||||
if (RSXIOMem.getRealAddr(ctrl->put.load(), realAddr)) {
|
||||
m_addr = realAddr;
|
||||
t_addr->SetValue(wxString::Format("%08x", m_addr));
|
||||
UpdateInformation();
|
||||
|
|
|
@ -131,7 +131,7 @@ namespace loader
|
|||
|
||||
initialize_psv_modules();
|
||||
|
||||
auto armv7_thr_stop_data = vm::ptr<u32>::make(Memory.PSV.RAM.AllocAlign(3 * 4));
|
||||
auto armv7_thr_stop_data = vm::ptr<u32>::make(vm::alloc(3 * 4, vm::main));
|
||||
armv7_thr_stop_data[0] = 0xf870; // HACK instruction (Thumb)
|
||||
armv7_thr_stop_data[1] = SFI_HLE_RETURN;
|
||||
Emu.SetCPUThreadStop(armv7_thr_stop_data.addr());
|
||||
|
@ -429,7 +429,7 @@ namespace loader
|
|||
case 0x00000001: //LOAD
|
||||
if (phdr.data_le.p_memsz)
|
||||
{
|
||||
if (machine == MACHINE_ARM && !Memory.PSV.RAM.AllocFixed(vaddr, memsz))
|
||||
if (machine == MACHINE_ARM && !vm::falloc(vaddr, memsz, vm::main))
|
||||
{
|
||||
LOG_ERROR(LOADER, "%s(): AllocFixed(0x%llx, 0x%x) failed", __FUNCTION__, vaddr, memsz);
|
||||
|
||||
|
|
|
@ -487,7 +487,7 @@ namespace loader
|
|||
return res;
|
||||
|
||||
//initialize process
|
||||
auto rsx_callback_data = vm::ptr<u32>::make(Memory.MainMem.AllocAlign(4 * 4));
|
||||
auto rsx_callback_data = vm::ptr<u32>::make(vm::alloc(4 * 4, vm::main));
|
||||
*rsx_callback_data++ = (rsx_callback_data + 1).addr();
|
||||
Emu.SetRSXCallback(rsx_callback_data.addr());
|
||||
|
||||
|
@ -495,7 +495,7 @@ namespace loader
|
|||
rsx_callback_data[1] = SC(0);
|
||||
rsx_callback_data[2] = BLR();
|
||||
|
||||
auto ppu_thr_stop_data = vm::ptr<u32>::make(Memory.MainMem.AllocAlign(2 * 4));
|
||||
auto ppu_thr_stop_data = vm::ptr<u32>::make(vm::alloc(2 * 4, vm::main));
|
||||
ppu_thr_stop_data[0] = SC(3);
|
||||
ppu_thr_stop_data[1] = BLR();
|
||||
Emu.SetCPUThreadStop(ppu_thr_stop_data.addr());
|
||||
|
@ -577,7 +577,7 @@ namespace loader
|
|||
{
|
||||
if (phdr.p_memsz)
|
||||
{
|
||||
if (!vm::alloc(phdr.p_vaddr.addr(), phdr.p_memsz, vm::main))
|
||||
if (!vm::falloc(phdr.p_vaddr.addr(), phdr.p_memsz, vm::main))
|
||||
{
|
||||
LOG_ERROR(LOADER, "%s(): AllocFixed(0x%llx, 0x%llx) failed", __FUNCTION__, phdr.p_vaddr.addr(), phdr.p_memsz);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue