mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-21 03:55:32 +00:00
Merge branch 'master' into nastys-patch-16
This commit is contained in:
commit
b7bf0493ae
7 changed files with 95 additions and 66 deletions
|
@ -188,10 +188,10 @@ struct spu_channel_op_state
|
|||
struct alignas(16) spu_channel
|
||||
{
|
||||
// Low 32 bits contain value
|
||||
atomic_t<u64> data;
|
||||
atomic_t<u64> data{};
|
||||
|
||||
// Pending value to be inserted when it is possible in pop() or pop_wait()
|
||||
atomic_t<u64> jostling_value;
|
||||
atomic_t<u64> jostling_value{};
|
||||
|
||||
public:
|
||||
static constexpr u32 off_wait = 32;
|
||||
|
@ -667,11 +667,11 @@ public:
|
|||
u8* reserv_base_addr = vm::g_reservations;
|
||||
|
||||
// General-Purpose Registers
|
||||
std::array<v128, 128> gpr;
|
||||
SPU_FPSCR fpscr;
|
||||
std::array<v128, 128> gpr{};
|
||||
SPU_FPSCR fpscr{};
|
||||
|
||||
// MFC command data
|
||||
spu_mfc_cmd ch_mfc_cmd;
|
||||
spu_mfc_cmd ch_mfc_cmd{};
|
||||
|
||||
// MFC command queue
|
||||
spu_mfc_cmd mfc_queue[16]{};
|
||||
|
@ -683,9 +683,9 @@ public:
|
|||
u64 mfc_last_timestamp = 0;
|
||||
|
||||
// MFC proxy command data
|
||||
spu_mfc_cmd mfc_prxy_cmd;
|
||||
spu_mfc_cmd mfc_prxy_cmd{};
|
||||
shared_mutex mfc_prxy_mtx;
|
||||
atomic_t<u32> mfc_prxy_mask;
|
||||
atomic_t<u32> mfc_prxy_mask = 0;
|
||||
|
||||
// Tracks writes to MFC proxy command data
|
||||
union
|
||||
|
@ -707,11 +707,11 @@ public:
|
|||
// Range Lock pointer
|
||||
atomic_t<u64, 64>* range_lock{};
|
||||
|
||||
u32 srr0;
|
||||
u32 ch_tag_upd;
|
||||
u32 ch_tag_mask;
|
||||
u32 srr0 = 0;
|
||||
u32 ch_tag_upd = 0;
|
||||
u32 ch_tag_mask = 0;
|
||||
spu_channel ch_tag_stat;
|
||||
u32 ch_stall_mask;
|
||||
u32 ch_stall_mask = 0;
|
||||
spu_channel ch_stall_stat;
|
||||
spu_channel ch_atomic_stat;
|
||||
|
||||
|
@ -736,14 +736,14 @@ public:
|
|||
};
|
||||
|
||||
atomic_t<ch_events_t> ch_events;
|
||||
bool interrupts_enabled;
|
||||
bool interrupts_enabled = false;
|
||||
|
||||
u64 ch_dec_start_timestamp; // timestamp of writing decrementer value
|
||||
u32 ch_dec_value; // written decrementer value
|
||||
u64 ch_dec_start_timestamp = 0; // timestamp of writing decrementer value
|
||||
u32 ch_dec_value = 0; // written decrementer value
|
||||
bool is_dec_frozen = false;
|
||||
std::pair<u32, u32> read_dec() const; // Read decrementer
|
||||
|
||||
atomic_t<u32> run_ctrl; // SPU Run Control register (only provided to get latest data written)
|
||||
atomic_t<u32> run_ctrl = 0; // SPU Run Control register (only provided to get latest data written)
|
||||
shared_mutex run_ctrl_mtx;
|
||||
|
||||
struct alignas(8) status_npc_sync_var
|
||||
|
@ -752,10 +752,10 @@ public:
|
|||
u32 npc; // SPU Next Program Counter register
|
||||
};
|
||||
|
||||
atomic_t<status_npc_sync_var> status_npc;
|
||||
std::array<spu_int_ctrl_t, 3> int_ctrl; // SPU Class 0, 1, 2 Interrupt Management
|
||||
atomic_t<status_npc_sync_var> status_npc{};
|
||||
std::array<spu_int_ctrl_t, 3> int_ctrl{}; // SPU Class 0, 1, 2 Interrupt Management
|
||||
|
||||
std::array<std::pair<u32, std::shared_ptr<lv2_event_queue>>, 32> spuq; // Event Queue Keys for SPU Thread
|
||||
std::array<std::pair<u32, std::shared_ptr<lv2_event_queue>>, 32> spuq{}; // Event Queue Keys for SPU Thread
|
||||
std::shared_ptr<lv2_event_queue> spup[64]; // SPU Ports
|
||||
spu_channel exit_status{}; // Threaded SPU exit status (not a channel, but the interface fits)
|
||||
atomic_t<u32> last_exit_status; // Value to be written in exit_status after checking group termination
|
||||
|
|
|
@ -93,7 +93,7 @@ std::shared_ptr<vm::block_t> reserve_map(u32 alloc_size, u32 align)
|
|||
|
||||
// Todo: fix order of error checks
|
||||
|
||||
error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptr<u32> alloc_addr)
|
||||
error_code sys_memory_allocate(cpu_thread& cpu, u64 size, u64 flags, vm::ptr<u32> alloc_addr)
|
||||
{
|
||||
cpu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -155,7 +155,7 @@ error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptr<u32
|
|||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr)
|
||||
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u64 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr)
|
||||
{
|
||||
cpu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -203,15 +203,15 @@ error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid
|
|||
return {ct.ret, ct->size - ct->used};
|
||||
}
|
||||
|
||||
if (const auto area = reserve_map(size, align))
|
||||
if (const auto area = reserve_map(static_cast<u32>(size), align))
|
||||
{
|
||||
if (const u32 addr = area->alloc(size))
|
||||
if (const u32 addr = area->alloc(static_cast<u32>(size)))
|
||||
{
|
||||
ensure(!g_fxo->get<sys_memory_address_table>().addrs[addr >> 16].exchange(ct.ptr.get()));
|
||||
|
||||
if (alloc_addr)
|
||||
{
|
||||
vm::lock_sudo(addr, size);
|
||||
vm::lock_sudo(addr, static_cast<u32>(size));
|
||||
cpu.check_state();
|
||||
*alloc_addr = addr;
|
||||
return CELL_OK;
|
||||
|
@ -320,7 +320,7 @@ error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_u
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u32 size)
|
||||
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u64 size)
|
||||
{
|
||||
cpu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -345,7 +345,7 @@ error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u32 si
|
|||
}
|
||||
|
||||
// Create the memory container
|
||||
if (const u32 id = idm::make<lv2_memory_container>(size, true))
|
||||
if (const u32 id = idm::make<lv2_memory_container>(static_cast<u32>(size), true))
|
||||
{
|
||||
cpu.check_state();
|
||||
*cid = id;
|
||||
|
|
|
@ -128,13 +128,13 @@ struct sys_memory_user_memory_stat_t
|
|||
};
|
||||
|
||||
// SysCalls
|
||||
error_code sys_memory_allocate(cpu_thread& cpu, u32 size, u64 flags, vm::ptr<u32> alloc_addr);
|
||||
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u32 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr);
|
||||
error_code sys_memory_allocate(cpu_thread& cpu, u64 size, u64 flags, vm::ptr<u32> alloc_addr);
|
||||
error_code sys_memory_allocate_from_container(cpu_thread& cpu, u64 size, u32 cid, u64 flags, vm::ptr<u32> alloc_addr);
|
||||
error_code sys_memory_free(cpu_thread& cpu, u32 start_addr);
|
||||
error_code sys_memory_get_page_attribute(cpu_thread& cpu, u32 addr, vm::ptr<sys_page_attr_t> attr);
|
||||
error_code sys_memory_get_user_memory_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info);
|
||||
error_code sys_memory_get_user_memory_stat(cpu_thread& cpu, vm::ptr<sys_memory_user_memory_stat_t> mem_stat);
|
||||
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u32 size);
|
||||
error_code sys_memory_container_create(cpu_thread& cpu, vm::ptr<u32> cid, u64 size);
|
||||
error_code sys_memory_container_destroy(cpu_thread& cpu, u32 cid);
|
||||
error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_info_t> mem_info, u32 cid);
|
||||
error_code sys_memory_container_destroy_parent_with_childs(cpu_thread& cpu, u32 cid, u32 must_0, vm::ptr<u32> mc_child);
|
||||
|
|
|
@ -562,16 +562,39 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
|
|||
|
||||
sys_spu.warning("sys_spu_thread_initialize(thread=*0x%x, group=0x%x, spu_num=%d, img=*0x%x, attr=*0x%x, arg=*0x%x)", thread, group_id, spu_num, img, attr, arg);
|
||||
|
||||
const u32 option = attr->option;
|
||||
if (!attr)
|
||||
{
|
||||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
if (attr->name_len > 0x80 || option & ~(SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE | SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE))
|
||||
const sys_spu_thread_attribute attr_data = *attr;
|
||||
|
||||
if (attr_data.name_len > 0x80)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
sys_spu_image image;
|
||||
if (!arg)
|
||||
{
|
||||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
switch (img->type)
|
||||
const sys_spu_thread_argument args = *arg;
|
||||
const u32 option = attr_data.option;
|
||||
|
||||
if (option & ~(SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE | SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE))
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (!img)
|
||||
{
|
||||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
sys_spu_image image = *img;
|
||||
|
||||
switch (image.type)
|
||||
{
|
||||
case SYS_SPU_IMAGE_TYPE_KERNEL:
|
||||
{
|
||||
|
@ -591,12 +614,11 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
|
|||
}
|
||||
case SYS_SPU_IMAGE_TYPE_USER:
|
||||
{
|
||||
if (img->entry_point > 0x3fffc || img->nsegs <= 0 || img->nsegs > 0x20)
|
||||
if (image.entry_point > 0x3fffc || image.nsegs <= 0 || image.nsegs > 0x20)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
image = *img;
|
||||
break;
|
||||
}
|
||||
default: return CELL_EINVAL;
|
||||
|
@ -672,7 +694,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
|
|||
}
|
||||
|
||||
// Read thread name
|
||||
const std::string thread_name(attr->name.get_ptr(), std::max<u32>(attr->name_len, 1) - 1);
|
||||
const std::string thread_name(attr_data.name.get_ptr(), std::max<u32>(attr_data.name_len, 1) - 1);
|
||||
|
||||
const auto group = idm::get<lv2_spu_group>(group_id);
|
||||
|
||||
|
@ -725,7 +747,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
|
|||
ensure(vm::get(vm::spu)->falloc(spu->vm_offset(), SPU_LS_SIZE, &spu->shm, static_cast<u64>(vm::page_size_64k) | static_cast<u64>(vm::alloc_hidden)));
|
||||
spu->map_ls(*spu->shm, spu->ls);
|
||||
|
||||
group->args[inited] = {arg->arg1, arg->arg2, arg->arg3, arg->arg4};
|
||||
group->args[inited] = {args.arg1, args.arg2, args.arg3, args.arg4};
|
||||
group->imgs[inited].first = image.entry_point;
|
||||
group->imgs[inited].second = std::move(spu_segs);
|
||||
|
||||
|
@ -800,12 +822,14 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
|
|||
|
||||
const s32 min_prio = g_ps3_process_info.has_root_perm() ? 0 : 16;
|
||||
|
||||
if (attr->nsize > 0x80 || !num)
|
||||
const sys_spu_thread_group_attribute attr_data = *attr;
|
||||
|
||||
if (attr_data.nsize > 0x80 || !num)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const s32 type = attr->type;
|
||||
const s32 type = attr_data.type;
|
||||
|
||||
bool use_scheduler = true;
|
||||
bool use_memct = !!(type & SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER);
|
||||
|
@ -902,7 +926,7 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
|
|||
|
||||
if (use_memct && mem_size)
|
||||
{
|
||||
const auto sct = idm::get<lv2_memory_container>(attr->ct);
|
||||
const auto sct = idm::get<lv2_memory_container>(attr_data.ct);
|
||||
|
||||
if (!sct)
|
||||
{
|
||||
|
@ -936,7 +960,7 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
const auto group = idm::make_ptr<lv2_spu_group>(std::string(attr->name.get_ptr(), std::max<u32>(attr->nsize, 1) - 1), num, prio, type, ct, use_scheduler, mem_size);
|
||||
const auto group = idm::make_ptr<lv2_spu_group>(std::string(attr_data.name.get_ptr(), std::max<u32>(attr_data.nsize, 1) - 1), num, prio, type, ct, use_scheduler, mem_size);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -1909,7 +1933,7 @@ error_code sys_spu_thread_group_disconnect_event(ppu_thread& ppu, u32 id, u32 et
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et, u8 spup)
|
||||
error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et, u32 spup)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -1943,7 +1967,7 @@ error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et,
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_spu_thread_disconnect_event(ppu_thread& ppu, u32 id, u32 et, u8 spup)
|
||||
error_code sys_spu_thread_disconnect_event(ppu_thread& ppu, u32 id, u32 et, u32 spup)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -2144,7 +2168,7 @@ error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread& ppu, u32 i
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread& ppu, u32 id, u8 spup)
|
||||
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread& ppu, u32 id, u32 spup)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
|
|
|
@ -372,7 +372,7 @@ error_code sys_spu_thread_group_get_priority(ppu_thread&, u32 id, vm::ptr<s32> p
|
|||
error_code sys_spu_thread_group_connect_event(ppu_thread&, u32 id, u32 eq, u32 et);
|
||||
error_code sys_spu_thread_group_disconnect_event(ppu_thread&, u32 id, u32 et);
|
||||
error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread&, u32 id, u32 eq_id, u64 req, vm::ptr<u8> spup);
|
||||
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread&, u32 id, u8 spup);
|
||||
error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread&, u32 id, u32 spup);
|
||||
error_code sys_spu_thread_group_set_cooperative_victims(ppu_thread&, u32 id, u32 threads_mask);
|
||||
error_code sys_spu_thread_group_syscall_253(ppu_thread& ppu, u32 id, vm::ptr<sys_spu_thread_group_syscall_253_info> info);
|
||||
error_code sys_spu_thread_group_log(ppu_thread&, s32 command, vm::ptr<s32> stat);
|
||||
|
@ -382,8 +382,8 @@ error_code sys_spu_thread_write_spu_mb(ppu_thread&, u32 id, u32 value);
|
|||
error_code sys_spu_thread_set_spu_cfg(ppu_thread&, u32 id, u64 value);
|
||||
error_code sys_spu_thread_get_spu_cfg(ppu_thread&, u32 id, vm::ptr<u64> value);
|
||||
error_code sys_spu_thread_write_snr(ppu_thread&, u32 id, u32 number, u32 value);
|
||||
error_code sys_spu_thread_connect_event(ppu_thread&, u32 id, u32 eq, u32 et, u8 spup);
|
||||
error_code sys_spu_thread_disconnect_event(ppu_thread&, u32 id, u32 et, u8 spup);
|
||||
error_code sys_spu_thread_connect_event(ppu_thread&, u32 id, u32 eq, u32 et, u32 spup);
|
||||
error_code sys_spu_thread_disconnect_event(ppu_thread&, u32 id, u32 et, u32 spup);
|
||||
error_code sys_spu_thread_bind_queue(ppu_thread&, u32 id, u32 spuq, u32 spuq_num);
|
||||
error_code sys_spu_thread_unbind_queue(ppu_thread&, u32 id, u32 spuq_num);
|
||||
error_code sys_spu_thread_get_exit_status(ppu_thread&, u32 id, vm::ptr<s32> status);
|
||||
|
|
|
@ -48,13 +48,18 @@ sys_vm_t::sys_vm_t(utils::serial& ar)
|
|||
g_fxo->get<sys_vm_global_t>().total_vsize += size;
|
||||
}
|
||||
|
||||
error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
|
||||
error_code sys_vm_memory_map(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
sys_vm.warning("sys_vm_memory_map(vsize=0x%x, psize=0x%x, cid=0x%x, flags=0x%x, policy=0x%x, addr=*0x%x)", vsize, psize, cid, flag, policy, addr);
|
||||
|
||||
if (!vsize || !psize || vsize % 0x2000000 || vsize > 0x10000000 || psize > 0x10000000 || policy != SYS_VM_POLICY_AUTO_RECOMMENDED)
|
||||
if (!vsize || !psize || vsize % 0x200'0000 || vsize > 0x1000'0000 || psize > 0x1000'0000 || psize % 0x1'0000 || psize % policy != SYS_VM_POLICY_AUTO_RECOMMENDED)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (ppu.gpr[11] == 300 && psize < 0x10'0000)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
@ -68,16 +73,16 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!g_fxo->get<sys_vm_global_t>().total_vsize.fetch_op([vsize](u32& size)
|
||||
if (!g_fxo->get<sys_vm_global_t>().total_vsize.fetch_op([vsize, has_root = g_ps3_process_info.has_root_perm()](u32& size)
|
||||
{
|
||||
// A single process can hold up to 256MB of virtual memory, even on DECR
|
||||
// VSH can hold more
|
||||
if ((g_ps3_process_info.has_root_perm() ? 0x1E000000 : 0x10000000) - size < vsize)
|
||||
if ((has_root ? 0x1E000000 : 0x10000000) - size < vsize)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
size += vsize;
|
||||
size += static_cast<u32>(vsize);
|
||||
return true;
|
||||
}).second)
|
||||
{
|
||||
|
@ -86,7 +91,7 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64
|
|||
|
||||
if (!ct->take(psize))
|
||||
{
|
||||
g_fxo->get<sys_vm_global_t>().total_vsize -= vsize;
|
||||
g_fxo->get<sys_vm_global_t>().total_vsize -= static_cast<u32>(vsize);
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
|
@ -96,10 +101,10 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64
|
|||
sys_vm.warning("sys_vm_memory_map(): Found VM 0x%x area (vsize=0x%x)", addr, vsize);
|
||||
|
||||
// Alloc all memory (shall not fail)
|
||||
ensure(area->alloc(vsize));
|
||||
vm::lock_sudo(area->addr, vsize);
|
||||
ensure(area->alloc(static_cast<u32>(vsize)));
|
||||
vm::lock_sudo(area->addr, static_cast<u32>(vsize));
|
||||
|
||||
idm::make<sys_vm_t>(area->addr, vsize, ct, psize);
|
||||
idm::make<sys_vm_t>(area->addr, static_cast<u32>(vsize), ct, static_cast<u32>(psize));
|
||||
|
||||
// Write a pointer for the allocated memory
|
||||
ppu.check_state();
|
||||
|
@ -108,11 +113,11 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64
|
|||
}
|
||||
|
||||
ct->free(psize);
|
||||
g_fxo->get<sys_vm_global_t>().total_vsize -= vsize;
|
||||
g_fxo->get<sys_vm_global_t>().total_vsize -= static_cast<u32>(vsize);
|
||||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
error_code sys_vm_memory_map_different(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
|
||||
error_code sys_vm_memory_map_different(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -153,7 +158,7 @@ error_code sys_vm_unmap(ppu_thread& ppu, u32 addr)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u32 size)
|
||||
error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u64 size)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -176,7 +181,7 @@ error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u32 size)
|
|||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
vmo.psize += size;
|
||||
vmo.psize += static_cast<u32>(size);
|
||||
return {};
|
||||
});
|
||||
|
||||
|
@ -193,7 +198,7 @@ error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u32 size)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u32 size)
|
||||
error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u64 size)
|
||||
{
|
||||
ppu.state += cpu_flag::wait;
|
||||
|
||||
|
@ -213,12 +218,12 @@ error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u32 size)
|
|||
|
||||
auto [_, ok] = vmo.psize.fetch_op([&](u32& value)
|
||||
{
|
||||
if (value < 0x100000ull + size)
|
||||
if (value <= size || value - size < 0x100000ull)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
value -= size;
|
||||
value -= static_cast<u32>(size);
|
||||
return true;
|
||||
});
|
||||
|
||||
|
|
|
@ -58,11 +58,11 @@ struct sys_vm_t
|
|||
class ppu_thread;
|
||||
|
||||
// SysCalls
|
||||
error_code sys_vm_memory_map(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr);
|
||||
error_code sys_vm_memory_map_different(ppu_thread& ppu, u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr);
|
||||
error_code sys_vm_memory_map(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr);
|
||||
error_code sys_vm_memory_map_different(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64 flag, u64 policy, vm::ptr<u32> addr);
|
||||
error_code sys_vm_unmap(ppu_thread& ppu, u32 addr);
|
||||
error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u32 size);
|
||||
error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u32 size);
|
||||
error_code sys_vm_append_memory(ppu_thread& ppu, u32 addr, u64 size);
|
||||
error_code sys_vm_return_memory(ppu_thread& ppu, u32 addr, u64 size);
|
||||
error_code sys_vm_lock(ppu_thread& ppu, u32 addr, u32 size);
|
||||
error_code sys_vm_unlock(ppu_thread& ppu, u32 addr, u32 size);
|
||||
error_code sys_vm_touch(ppu_thread& ppu, u32 addr, u32 size);
|
||||
|
|
Loading…
Add table
Reference in a new issue