This commit is contained in:
digant 2024-12-22 21:15:52 +01:00
parent 92d1f443ac
commit 77513cc84d
124 changed files with 1145 additions and 1052 deletions

View file

@ -1360,7 +1360,7 @@ bool handle_access_violation(u32 addr, bool is_writing, ucontext_t* context) noe
// check if address is RawSPU MMIO register
do if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET)
{
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
if (!thread)
{
@ -1548,7 +1548,7 @@ bool handle_access_violation(u32 addr, bool is_writing, ucontext_t* context) noe
}
}
if (auto pf_port = idm::get<lv2_obj, lv2_event_port>(pf_port_id); pf_port && pf_port->queue)
if (auto pf_port = idm::get_unlocked<lv2_obj, lv2_event_port>(pf_port_id); pf_port && pf_port->queue)
{
// We notify the game that a page fault occurred so it can rectify it.
// Note, for data3, were the memory readable AND we got a page fault, it must be due to a write violation since reads are allowed.
@ -2602,7 +2602,7 @@ bool thread_base::join(bool dtor) const
if (i >= 16 && !(i & (i - 1)) && timeout != atomic_wait_timeout::inf)
{
sig_log.error(u8"Thread [%s] is too sleepy. Waiting for it %.3fµs already!", *m_tname.load(), (utils::get_tsc() - stamp0) / (utils::get_tsc_freq() / 1000000.));
sig_log.error(u8"Thread [%s] is too sleepy. Waiting for it %.3fus already!", *m_tname.load(), (utils::get_tsc() - stamp0) / (utils::get_tsc_freq() / 1000000.));
}
}

View file

@ -121,7 +121,7 @@ public:
void unlock_hle()
{
const u32 value = atomic_storage<u32>::fetch_add_hle_rel(m_value.raw(), 0u - c_one);
const u32 value = atomic_storage<u32>::fetch_add_hle_rel(m_value.raw(), ~c_one + 1);
if (value != c_one) [[unlikely]]
{

View file

@ -1,6 +1,7 @@
#pragma once
#include <string>
#include "Emu/CPU/CPUThread.h"
#include "Utilities/StrFmt.h"
enum class cpu_disasm_mode
@ -22,7 +23,7 @@ protected:
const u8* m_offset{};
const u32 m_start_pc;
std::add_pointer_t<const cpu_thread> m_cpu{};
std::shared_ptr<cpu_thread> m_cpu_handle;
shared_ptr<cpu_thread> m_cpu_handle;
u32 m_op = 0;
void format_by_mode()
@ -81,7 +82,7 @@ public:
return const_cast<cpu_thread*>(m_cpu);
}
void set_cpu_handle(std::shared_ptr<cpu_thread> cpu)
void set_cpu_handle(shared_ptr<cpu_thread> cpu)
{
m_cpu_handle = std::move(cpu);

View file

@ -87,7 +87,7 @@ void fmt_class_string<cpu_threads_emulation_info_dump_t>::format(std::string& ou
const u32 must_have_cpu_id = static_cast<u32>(arg);
// Dump main_thread
const auto main_ppu = idm::get<named_thread<ppu_thread>>(ppu_thread::id_base);
const auto main_ppu = idm::get_unlocked<named_thread<ppu_thread>>(ppu_thread::id_base);
if (main_ppu)
{
@ -99,7 +99,7 @@ void fmt_class_string<cpu_threads_emulation_info_dump_t>::format(std::string& ou
{
if (must_have_cpu_id != ppu_thread::id_base)
{
const auto selected_ppu = idm::get<named_thread<ppu_thread>>(must_have_cpu_id);
const auto selected_ppu = idm::get_unlocked<named_thread<ppu_thread>>(must_have_cpu_id);
if (selected_ppu)
{
@ -110,7 +110,7 @@ void fmt_class_string<cpu_threads_emulation_info_dump_t>::format(std::string& ou
}
else if (must_have_cpu_id >> 24 == spu_thread::id_base >> 24)
{
const auto selected_spu = idm::get<named_thread<spu_thread>>(must_have_cpu_id);
const auto selected_spu = idm::get_unlocked<named_thread<spu_thread>>(must_have_cpu_id);
if (selected_spu)
{
@ -236,7 +236,7 @@ struct cpu_prof
}
// Print info
void print(const std::shared_ptr<cpu_thread>& ptr)
void print(const shared_ptr<cpu_thread>& ptr)
{
if (new_samples < min_print_samples || samples == idle)
{
@ -263,7 +263,7 @@ struct cpu_prof
new_samples = 0;
}
static void print_all(std::unordered_map<std::shared_ptr<cpu_thread>, sample_info>& threads, sample_info& all_info)
static void print_all(std::unordered_map<shared_ptr<cpu_thread>, sample_info>& threads, sample_info& all_info)
{
u64 new_samples = 0;
@ -319,7 +319,7 @@ struct cpu_prof
void operator()()
{
std::unordered_map<std::shared_ptr<cpu_thread>, sample_info> threads;
std::unordered_map<shared_ptr<cpu_thread>, sample_info> threads;
while (thread_ctrl::state() != thread_state::aborting)
{
@ -335,15 +335,15 @@ struct cpu_prof
continue;
}
std::shared_ptr<cpu_thread> ptr;
shared_ptr<cpu_thread> ptr;
if (id >> 24 == 1)
{
ptr = idm::get<named_thread<ppu_thread>>(id);
ptr = idm::get_unlocked<named_thread<ppu_thread>>(id);
}
else if (id >> 24 == 2)
{
ptr = idm::get<named_thread<spu_thread>>(id);
ptr = idm::get_unlocked<named_thread<spu_thread>>(id);
}
else
{
@ -437,7 +437,7 @@ struct cpu_prof
continue;
}
// Wait, roughly for 20µs
// Wait, roughly for 20us
thread_ctrl::wait_for(20, false);
}
@ -1302,7 +1302,7 @@ cpu_thread* cpu_thread::get_next_cpu()
return nullptr;
}
std::shared_ptr<CPUDisAsm> make_disasm(const cpu_thread* cpu, std::shared_ptr<cpu_thread> handle);
std::shared_ptr<CPUDisAsm> make_disasm(const cpu_thread* cpu, shared_ptr<cpu_thread> handle);
void cpu_thread::dump_all(std::string& ret) const
{
@ -1318,7 +1318,7 @@ void cpu_thread::dump_all(std::string& ret) const
if (u32 cur_pc = get_pc(); cur_pc != umax)
{
// Dump a snippet of currently executed code (may be unreliable with non-static-interpreter decoders)
auto disasm = make_disasm(this, nullptr);
auto disasm = make_disasm(this, null_ptr);
const auto rsx = try_get<rsx::thread>();
@ -1558,14 +1558,14 @@ u32 CPUDisAsm::DisAsmBranchTarget(s32 /*imm*/)
return 0;
}
extern bool try_lock_spu_threads_in_a_state_compatible_with_savestates(bool revert_lock, std::vector<std::pair<std::shared_ptr<named_thread<spu_thread>>, u32>>* out_list)
extern bool try_lock_spu_threads_in_a_state_compatible_with_savestates(bool revert_lock, std::vector<std::pair<shared_ptr<named_thread<spu_thread>>, u32>>* out_list)
{
if (out_list)
{
out_list->clear();
}
auto get_spus = [old_counter = u64{umax}, spu_list = std::vector<std::shared_ptr<named_thread<spu_thread>>>()](bool can_collect, bool force_collect) mutable
auto get_spus = [old_counter = u64{umax}, spu_list = std::vector<shared_ptr<named_thread<spu_thread>>>()](bool can_collect, bool force_collect) mutable
{
const u64 new_counter = cpu_thread::g_threads_created + cpu_thread::g_threads_deleted;

View file

@ -556,7 +556,7 @@ void cell_audio_thread::advance(u64 timestamp)
m_dynamic_period = 0;
// send aftermix event (normal audio event)
std::array<std::shared_ptr<lv2_event_queue>, MAX_AUDIO_EVENT_QUEUES> queues;
std::array<shared_ptr<lv2_event_queue>, MAX_AUDIO_EVENT_QUEUES> queues;
u32 queue_count = 0;
event_period++;

View file

@ -400,7 +400,7 @@ public:
u32 flags = 0; // iFlags
u64 source = 0; // Event source
u64 ack_timestamp = 0; // timestamp of last call of cellAudioSendAck
std::shared_ptr<lv2_event_queue> port{}; // Underlying event port
shared_ptr<lv2_event_queue> port{}; // Underlying event port
};
std::vector<key_info> keys{};

View file

@ -1031,7 +1031,7 @@ error_code cellDmuxClose(u32 handle)
{
cellDmux.warning("cellDmuxClose(handle=0x%x)", handle);
const auto dmux = idm::get<Demuxer>(handle);
const auto dmux = idm::get_unlocked<Demuxer>(handle);
if (!dmux)
{
@ -1060,7 +1060,7 @@ error_code cellDmuxSetStream(u32 handle, u32 streamAddress, u32 streamSize, b8 d
{
cellDmux.trace("cellDmuxSetStream(handle=0x%x, streamAddress=0x%x, streamSize=%d, discontinuity=%d, userData=0x%llx)", handle, streamAddress, streamSize, discontinuity, userData);
const auto dmux = idm::get<Demuxer>(handle);
const auto dmux = idm::get_unlocked<Demuxer>(handle);
if (!dmux)
{
@ -1088,7 +1088,7 @@ error_code cellDmuxResetStream(u32 handle)
{
cellDmux.warning("cellDmuxResetStream(handle=0x%x)", handle);
const auto dmux = idm::get<Demuxer>(handle);
const auto dmux = idm::get_unlocked<Demuxer>(handle);
if (!dmux)
{
@ -1103,7 +1103,7 @@ error_code cellDmuxResetStreamAndWaitDone(u32 handle)
{
cellDmux.warning("cellDmuxResetStreamAndWaitDone(handle=0x%x)", handle);
const auto dmux = idm::get<Demuxer>(handle);
const auto dmux = idm::get_unlocked<Demuxer>(handle);
if (!dmux)
{
@ -1164,7 +1164,7 @@ error_code cellDmuxEnableEs(u32 handle, vm::cptr<CellCodecEsFilterId> esFilterId
{
cellDmux.warning("cellDmuxEnableEs(handle=0x%x, esFilterId=*0x%x, esResourceInfo=*0x%x, esCb=*0x%x, esSpecificInfo=*0x%x, esHandle=*0x%x)", handle, esFilterId, esResourceInfo, esCb, esSpecificInfo, esHandle);
const auto dmux = idm::get<Demuxer>(handle);
const auto dmux = idm::get_unlocked<Demuxer>(handle);
if (!dmux)
{
@ -1194,7 +1194,7 @@ error_code cellDmuxDisableEs(u32 esHandle)
{
cellDmux.warning("cellDmuxDisableEs(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1213,7 +1213,7 @@ error_code cellDmuxResetEs(u32 esHandle)
{
cellDmux.trace("cellDmuxResetEs(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1232,7 +1232,7 @@ error_code cellDmuxGetAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpeci
{
cellDmux.trace("cellDmuxGetAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1255,7 +1255,7 @@ error_code cellDmuxPeekAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpec
{
cellDmux.trace("cellDmuxPeekAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1278,7 +1278,7 @@ error_code cellDmuxGetAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> auS
{
cellDmux.trace("cellDmuxGetAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1301,7 +1301,7 @@ error_code cellDmuxPeekAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> au
{
cellDmux.trace("cellDmuxPeekAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1324,7 +1324,7 @@ error_code cellDmuxReleaseAu(u32 esHandle)
{
cellDmux.trace("cellDmuxReleaseAu(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{
@ -1342,7 +1342,7 @@ error_code cellDmuxFlushEs(u32 esHandle)
{
cellDmux.warning("cellDmuxFlushEs(esHandle=0x%x)", esHandle);
const auto es = idm::get<ElementaryStream>(esHandle);
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
if (!es)
{

View file

@ -598,7 +598,7 @@ error_code cellFsSetIoBufferFromDefaultContainer(u32 fd, u32 buffer_size, u32 pa
{
cellFs.todo("cellFsSetIoBufferFromDefaultContainer(fd=%d, buffer_size=%d, page_type=%d)", fd, buffer_size, page_type);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -695,7 +695,7 @@ s32 cellFsStReadInit(u32 fd, vm::cptr<CellFsRingBuffer> ringbuf)
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -716,7 +716,7 @@ s32 cellFsStReadFinish(u32 fd)
{
cellFs.todo("cellFsStReadFinish(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -732,7 +732,7 @@ s32 cellFsStReadGetRingBuf(u32 fd, vm::ptr<CellFsRingBuffer> ringbuf)
{
cellFs.todo("cellFsStReadGetRingBuf(fd=%d, ringbuf=*0x%x)", fd, ringbuf);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -748,7 +748,7 @@ s32 cellFsStReadGetStatus(u32 fd, vm::ptr<u64> status)
{
cellFs.todo("cellFsStReadGetRingBuf(fd=%d, status=*0x%x)", fd, status);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -764,7 +764,7 @@ s32 cellFsStReadGetRegid(u32 fd, vm::ptr<u64> regid)
{
cellFs.todo("cellFsStReadGetRingBuf(fd=%d, regid=*0x%x)", fd, regid);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -780,7 +780,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
{
cellFs.todo("cellFsStReadStart(fd=%d, offset=0x%llx, size=0x%llx)", fd, offset, size);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -796,7 +796,7 @@ s32 cellFsStReadStop(u32 fd)
{
cellFs.todo("cellFsStReadStop(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -812,7 +812,7 @@ s32 cellFsStRead(u32 fd, vm::ptr<u8> buf, u64 size, vm::ptr<u64> rsize)
{
cellFs.todo("cellFsStRead(fd=%d, buf=*0x%x, size=0x%llx, rsize=*0x%x)", fd, buf, size, rsize);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -828,7 +828,7 @@ s32 cellFsStReadGetCurrentAddr(u32 fd, vm::ptr<u32> addr, vm::ptr<u64> size)
{
cellFs.todo("cellFsStReadGetCurrentAddr(fd=%d, addr=*0x%x, size=*0x%x)", fd, addr, size);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -844,7 +844,7 @@ s32 cellFsStReadPutCurrentAddr(u32 fd, vm::ptr<u8> addr, u64 size)
{
cellFs.todo("cellFsStReadPutCurrentAddr(fd=%d, addr=*0x%x, size=0x%llx)", fd, addr, size);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -860,7 +860,7 @@ s32 cellFsStReadWait(u32 fd, u64 size)
{
cellFs.todo("cellFsStReadWait(fd=%d, size=0x%llx)", fd, size);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -876,7 +876,7 @@ s32 cellFsStReadWaitCallback(u32 fd, u64 size, vm::ptr<void(s32 xfd, u64 xsize)>
{
cellFs.todo("cellFsStReadWaitCallback(fd=%d, size=0x%llx, func=*0x%x)", fd, size, func);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -908,7 +908,7 @@ struct fs_aio_thread : ppu_thread
s32 error = CELL_EBADF;
u64 result = 0;
const auto file = idm::get<lv2_fs_object, lv2_file>(aio->fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(aio->fd);
if (!file || (type == 1 && file->flags & CELL_FS_O_WRONLY) || (type == 2 && !(file->flags & CELL_FS_O_ACCMODE)))
{

View file

@ -961,7 +961,7 @@ error_code cellGameContentPermit(ppu_thread& ppu, vm::ptr<char[CELL_GAME_PATH_MA
if (!perm.temp.empty())
{
std::vector<std::shared_ptr<lv2_file>> lv2_files;
std::vector<shared_ptr<lv2_file>> lv2_files;
const std::string real_dir = vfs::get(dir) + "/";

View file

@ -455,7 +455,7 @@ error_code _cellGcmInitBody(ppu_thread& ppu, vm::pptr<CellGcmContextData> contex
vm::var<u64> _tid;
vm::var<char[]> _name = vm::make_str("_gcm_intr_thread");
ppu_execute<&sys_ppu_thread_create>(ppu, +_tid, 0x10000, 0, 1, 0x4000, SYS_PPU_THREAD_CREATE_INTERRUPT, +_name);
render->intr_thread = idm::get<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
render->intr_thread = idm::get_unlocked<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
render->intr_thread->state -= cpu_flag::stop;
thread_ctrl::notify(*render->intr_thread);

View file

@ -288,7 +288,7 @@ error_code cellGifDecReadHeader(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStrea
}
case CELL_GIFDEC_FILE:
{
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
file->file.seek(0);
file->file.read(buffer, sizeof(buffer));
break;
@ -500,7 +500,7 @@ error_code cellGifDecDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStre
case CELL_GIFDEC_FILE:
{
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
file->file.seek(0);
file->file.read(gif.get(), fileSize);
break;

View file

@ -103,7 +103,7 @@ error_code cellJpgDecClose(u32 mainHandle, u32 subHandle)
{
cellJpgDec.warning("cellJpgDecOpen(mainHandle=0x%x, subHandle=0x%x)", mainHandle, subHandle);
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
if (!subHandle_data)
{
@ -120,7 +120,7 @@ error_code cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDe
{
cellJpgDec.trace("cellJpgDecReadHeader(mainHandle=0x%x, subHandle=0x%x, info=*0x%x)", mainHandle, subHandle, info);
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
if (!subHandle_data)
{
@ -142,7 +142,7 @@ error_code cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDe
case CELL_JPGDEC_FILE:
{
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
file->file.seek(0);
file->file.read(buffer.get(), fileSize);
break;
@ -201,7 +201,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
dataOutInfo->status = CELL_JPGDEC_DEC_STATUS_STOP;
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
if (!subHandle_data)
{
@ -223,7 +223,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
case CELL_JPGDEC_FILE:
{
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
file->file.seek(0);
file->file.read(jpg.get(), fileSize);
break;
@ -340,7 +340,7 @@ error_code cellJpgDecSetParameter(u32 mainHandle, u32 subHandle, vm::cptr<CellJp
{
cellJpgDec.trace("cellJpgDecSetParameter(mainHandle=0x%x, subHandle=0x%x, inParam=*0x%x, outParam=*0x%x)", mainHandle, subHandle, inParam, outParam);
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
if (!subHandle_data)
{

View file

@ -93,7 +93,7 @@ void pngDecReadBuffer(png_structp png_ptr, png_bytep out, png_size_t length)
if (buffer.file)
{
// Get the file
auto file = idm::get<lv2_fs_object, lv2_file>(buffer.fd);
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(buffer.fd);
// Read the data
file->file.read(out, length);

View file

@ -94,11 +94,11 @@ struct search_content_t
ENABLE_BITWISE_SERIALIZATION;
};
using content_id_type = std::pair<u64, std::shared_ptr<search_content_t>>;
using content_id_type = std::pair<u64, shared_ptr<search_content_t>>;
struct content_id_map
{
std::unordered_map<u64, std::shared_ptr<search_content_t>> map;
std::unordered_map<u64, shared_ptr<search_content_t>> map;
shared_mutex mutex;
@ -539,7 +539,7 @@ error_code cellSearchStartListSearch(CellSearchListSearchType type, CellSearchSo
sysutil_register_cb([=, &content_map = g_fxo->get<content_id_map>(), &search](ppu_thread& ppu) -> s32
{
auto curr_search = idm::get<search_object_t>(id);
auto curr_search = idm::get_unlocked<search_object_t>(id);
vm::var<CellSearchResultParam> resultParam;
resultParam->searchId = id;
resultParam->resultNum = 0; // Set again later
@ -613,7 +613,7 @@ error_code cellSearchStartListSearch(CellSearchListSearchType type, CellSearchSo
auto found = content_map.map.find(hash);
if (found == content_map.map.end()) // content isn't yet being tracked
{
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
if (item_path.length() > CELL_SEARCH_PATH_LEN_MAX)
{
// TODO: Create mapping which will be resolved to an actual hard link in VFS by cellSearchPrepareFile
@ -800,7 +800,7 @@ error_code cellSearchStartContentSearchInList(vm::cptr<CellSearchContentId> list
sysutil_register_cb([=, list_path = std::string(content_info->infoPath.contentPath), &search, &content_map](ppu_thread& ppu) -> s32
{
auto curr_search = idm::get<search_object_t>(id);
auto curr_search = idm::get_unlocked<search_object_t>(id);
vm::var<CellSearchResultParam> resultParam;
resultParam->searchId = id;
resultParam->resultNum = 0; // Set again later
@ -855,7 +855,7 @@ error_code cellSearchStartContentSearchInList(vm::cptr<CellSearchContentId> list
auto found = content_map.map.find(hash);
if (found == content_map.map.end()) // content isn't yet being tracked
{
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
if (item_path.length() > CELL_SEARCH_PATH_LEN_MAX)
{
// Create mapping which will be resolved to an actual hard link in VFS by cellSearchPrepareFile
@ -1060,7 +1060,7 @@ error_code cellSearchStartContentSearch(CellSearchContentSearchType type, CellSe
sysutil_register_cb([=, &content_map = g_fxo->get<content_id_map>(), &search](ppu_thread& ppu) -> s32
{
auto curr_search = idm::get<search_object_t>(id);
auto curr_search = idm::get_unlocked<search_object_t>(id);
vm::var<CellSearchResultParam> resultParam;
resultParam->searchId = id;
resultParam->resultNum = 0; // Set again later
@ -1096,7 +1096,7 @@ error_code cellSearchStartContentSearch(CellSearchContentSearchType type, CellSe
auto found = content_map.map.find(hash);
if (found == content_map.map.end()) // content isn't yet being tracked
{
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
if (item_path.length() > CELL_SEARCH_PATH_LEN_MAX)
{
// Create mapping which will be resolved to an actual hard link in VFS by cellSearchPrepareFile
@ -1372,7 +1372,7 @@ error_code cellSearchGetContentInfoByOffset(CellSearchId searchId, s32 offset, v
std::memset(outContentId->data + 4, -1, CELL_SEARCH_CONTENT_ID_SIZE - 4);
}
const auto searchObject = idm::get<search_object_t>(searchId);
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
if (!searchObject)
{
@ -1518,7 +1518,7 @@ error_code cellSearchGetOffsetByContentId(CellSearchId searchId, vm::cptr<CellSe
return error;
}
const auto searchObject = idm::get<search_object_t>(searchId);
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
if (!searchObject)
{
@ -1568,7 +1568,7 @@ error_code cellSearchGetContentIdByOffset(CellSearchId searchId, s32 offset, vm:
std::memset(outContentId->data + 4, -1, CELL_SEARCH_CONTENT_ID_SIZE - 4);
}
const auto searchObject = idm::get<search_object_t>(searchId);
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
if (!searchObject)
{
@ -1663,7 +1663,7 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
// Reset values first
std::memset(outContext->data, 0, 4);
const auto searchObject = idm::get<search_object_t>(searchId);
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
if (!searchObject)
{
@ -1690,17 +1690,17 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
const auto& first_content = first_content_id.second;
ensure(first_content);
const auto get_random_content = [&searchObject, &first_content]() -> std::shared_ptr<search_content_t>
const auto get_random_content = [&searchObject, &first_content]() -> shared_ptr<search_content_t>
{
if (searchObject->content_ids.size() == 1)
{
return first_content;
}
std::vector<content_id_type> result;
std::sample(searchObject->content_ids.begin(), searchObject->content_ids.end(), std::back_inserter(result), 1, std::mt19937{std::random_device{}()});
ensure(result.size() == 1);
std::shared_ptr<search_content_t> content = result[0].second;
ensure(!!content);
shared_ptr<search_content_t> content = ensure(result[0].second);
return content;
};
@ -1736,7 +1736,7 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
{
// Select random track
// TODO: whole playlist
std::shared_ptr<search_content_t> content = get_random_content();
shared_ptr<search_content_t> content = get_random_content();
context.playlist.push_back(content->infoPath.contentPath);
cellSearch.notice("cellSearchGetMusicSelectionContext(): Hash=%08X, Assigning random track: Type=0x%x, Path=%s", content_hash, +content->type, context.playlist.back());
}
@ -1757,7 +1757,7 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
{
// Select random track
// TODO: whole playlist
std::shared_ptr<search_content_t> content = get_random_content();
shared_ptr<search_content_t> content = get_random_content();
context.playlist.push_back(content->infoPath.contentPath);
cellSearch.notice("cellSearchGetMusicSelectionContext(): Assigning random track: Type=0x%x, Path=%s", +content->type, context.playlist.back());
}
@ -2044,7 +2044,7 @@ error_code cellSearchCancel(CellSearchId searchId)
{
cellSearch.todo("cellSearchCancel(searchId=0x%x)", searchId);
const auto searchObject = idm::get<search_object_t>(searchId);
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
if (!searchObject)
{
@ -2075,7 +2075,7 @@ error_code cellSearchEnd(CellSearchId searchId)
return error;
}
const auto searchObject = idm::get<search_object_t>(searchId);
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
if (!searchObject)
{
@ -2120,7 +2120,7 @@ error_code music_selection_context::find_content_id(vm::ptr<CellSearchContentId>
// Search for the content that matches our current selection
auto& content_map = g_fxo->get<content_id_map>();
std::shared_ptr<search_content_t> found_content;
shared_ptr<search_content_t> found_content;
u64 hash = 0;
for (const std::string& track : playlist)
@ -2187,7 +2187,7 @@ error_code music_selection_context::find_content_id(vm::ptr<CellSearchContentId>
}
// TODO: check for actual content inside the directory
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
curr_find->type = CELL_SEARCH_CONTENTTYPE_MUSICLIST;
curr_find->repeat_mode = repeat_mode;
curr_find->context_option = context_option;
@ -2243,7 +2243,7 @@ error_code music_selection_context::find_content_id(vm::ptr<CellSearchContentId>
continue;
}
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
curr_find->type = CELL_SEARCH_CONTENTTYPE_MUSIC;
curr_find->repeat_mode = repeat_mode;
curr_find->context_option = context_option;

View file

@ -1265,7 +1265,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
}
// entry point cannot be initialized immediately because SPU LS will be rewritten by sys_spu_thread_group_start()
//idm::get<named_thread<spu_thread>>(spurs->spus[num])->custom_task = [entry = spurs->spuImg.entry_point](spu_thread& spu)
//idm::get_unlocked<named_thread<spu_thread>>(spurs->spus[num])->custom_task = [entry = spurs->spuImg.entry_point](spu_thread& spu)
{
// Disabled
//spu.RegisterHleFunction(entry, spursKernelEntry);

View file

@ -659,7 +659,7 @@ extern bool check_if_vdec_contexts_exist()
extern void vdecEntry(ppu_thread& ppu, u32 vid)
{
idm::get<vdec_context>(vid)->exec(ppu, vid);
idm::get_unlocked<vdec_context>(vid)->exec(ppu, vid);
ppu.state += cpu_flag::exit;
}
@ -886,7 +886,7 @@ static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb>
}
// Create decoder context
std::shared_ptr<vdec_context> vdec;
shared_ptr<vdec_context> vdec;
if (std::unique_lock lock{g_fxo->get<hle_locks_t>(), std::try_to_lock})
{
@ -909,7 +909,7 @@ static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb>
ppu_execute<&sys_ppu_thread_create>(ppu, +_tid, 0x10000, vid, +res->ppuThreadPriority, +res->ppuThreadStackSize, SYS_PPU_THREAD_CREATE_INTERRUPT, +_name);
*handle = vid;
const auto thrd = idm::get<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
const auto thrd = idm::get_unlocked<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
thrd->cmd_list
({
@ -949,7 +949,7 @@ error_code cellVdecClose(ppu_thread& ppu, u32 handle)
return {};
}
auto vdec = idm::get<vdec_context>(handle);
auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec)
{
@ -1003,7 +1003,7 @@ error_code cellVdecStartSeq(ppu_thread& ppu, u32 handle)
cellVdec.warning("cellVdecStartSeq(handle=0x%x)", handle);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec)
{
@ -1055,7 +1055,7 @@ error_code cellVdecEndSeq(ppu_thread& ppu, u32 handle)
cellVdec.warning("cellVdecEndSeq(handle=0x%x)", handle);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec)
{
@ -1088,7 +1088,7 @@ error_code cellVdecDecodeAu(ppu_thread& ppu, u32 handle, CellVdecDecodeMode mode
cellVdec.trace("cellVdecDecodeAu(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, +mode, auInfo);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec || !auInfo || !auInfo->size || !auInfo->startAddr)
{
@ -1136,7 +1136,7 @@ error_code cellVdecDecodeAuEx2(ppu_thread& ppu, u32 handle, CellVdecDecodeMode m
cellVdec.todo("cellVdecDecodeAuEx2(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, +mode, auInfo);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec || !auInfo || !auInfo->size || !auInfo->startAddr)
{
@ -1192,7 +1192,7 @@ error_code cellVdecGetPictureExt(ppu_thread& ppu, u32 handle, vm::cptr<CellVdecP
cellVdec.trace("cellVdecGetPictureExt(handle=0x%x, format=*0x%x, outBuff=*0x%x, arg4=*0x%x)", handle, format, outBuff, arg4);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec || !format)
{
@ -1245,7 +1245,7 @@ error_code cellVdecGetPictureExt(ppu_thread& ppu, u32 handle, vm::cptr<CellVdecP
if (notify)
{
auto vdec_ppu = idm::get<named_thread<ppu_thread>>(vdec->ppu_tid);
auto vdec_ppu = idm::get_unlocked<named_thread<ppu_thread>>(vdec->ppu_tid);
if (vdec_ppu) thread_ctrl::notify(*vdec_ppu);
}
@ -1354,7 +1354,7 @@ error_code cellVdecGetPicItem(ppu_thread& ppu, u32 handle, vm::pptr<CellVdecPicI
cellVdec.trace("cellVdecGetPicItem(handle=0x%x, picItem=**0x%x)", handle, picItem);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec || !picItem)
{
@ -1596,7 +1596,7 @@ error_code cellVdecSetFrameRate(u32 handle, CellVdecFrameRate frameRateCode)
{
cellVdec.trace("cellVdecSetFrameRate(handle=0x%x, frameRateCode=0x%x)", handle, +frameRateCode);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
// 0x80 seems like a common prefix
if (!vdec || (frameRateCode & 0xf8) != 0x80)
@ -1659,7 +1659,7 @@ error_code cellVdecSetPts(u32 handle, vm::ptr<void> unk)
{
cellVdec.error("cellVdecSetPts(handle=0x%x, unk=*0x%x)", handle, unk);
const auto vdec = idm::get<vdec_context>(handle);
const auto vdec = idm::get_unlocked<vdec_context>(handle);
if (!vdec || !unk)
{

View file

@ -205,7 +205,7 @@ error_code cellVpostClose(u32 handle)
{
cellVpost.warning("cellVpostClose(handle=0x%x)", handle);
const auto vpost = idm::get<VpostInstance>(handle);
const auto vpost = idm::get_unlocked<VpostInstance>(handle);
if (!vpost)
{
@ -220,7 +220,7 @@ error_code cellVpostExec(u32 handle, vm::cptr<u8> inPicBuff, vm::cptr<CellVpostC
{
cellVpost.trace("cellVpostExec(handle=0x%x, inPicBuff=*0x%x, ctrlParam=*0x%x, outPicBuff=*0x%x, picInfo=*0x%x)", handle, inPicBuff, ctrlParam, outPicBuff, picInfo);
const auto vpost = idm::get<VpostInstance>(handle);
const auto vpost = idm::get_unlocked<VpostInstance>(handle);
if (!vpost)
{

View file

@ -510,7 +510,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
libmixer.warning("*** surMixer created (ch1=%d, ch2=%d, ch6=%d, ch8=%d)", config->chStrips1, config->chStrips2, config->chStrips6, config->chStrips8);
//auto thread = idm::make_ptr<ppu_thread>("Surmixer Thread");
//auto thread = idm::make_ptr<named_thread<ppu_thread>>("Surmixer Thread");
return CELL_OK;
}

View file

@ -652,7 +652,7 @@ error_code npDrmIsAvailable(vm::cptr<u8> k_licensee_addr, vm::cptr<char> drm_pat
std::string enc_drm_path;
ensure(vm::read_string(drm_path.addr(), 0x100, enc_drm_path, true), "Secret access violation");
sceNp.warning(u8"npDrmIsAvailable(): drm_path=“%s”", enc_drm_path);
sceNp.warning("npDrmIsAvailable(): drm_path=\"%s\"", enc_drm_path);
auto& npdrmkeys = g_fxo->get<loaded_npdrm_keys>();
@ -5347,7 +5347,7 @@ error_code sceNpScoreCreateTransactionCtx(s32 titleCtxId)
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto score = idm::get<score_ctx>(titleCtxId);
auto score = idm::get_unlocked<score_ctx>(titleCtxId);
if (!score)
{
@ -5399,24 +5399,12 @@ error_code sceNpScoreSetTimeout(s32 ctxId, usecond_t timeout)
return SCE_NP_COMMUNITY_ERROR_INVALID_ARGUMENT;
}
const u32 idm_id = static_cast<u32>(ctxId);
if (idm_id >= score_transaction_ctx::id_base && idm_id < (score_transaction_ctx::id_base + score_transaction_ctx::id_count))
if (auto trans = idm::get_unlocked<score_transaction_ctx>(ctxId))
{
auto trans = idm::get<score_transaction_ctx>(ctxId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
trans->timeout = timeout;
}
else if (idm_id >= score_ctx::id_base && idm_id < (score_ctx::id_base + score_ctx::id_count))
else if (auto score = idm::get_unlocked<score_ctx>(ctxId))
{
auto score = idm::get<score_ctx>(ctxId);
if (!ctxId)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
score->timeout = timeout;
}
else
@ -5443,23 +5431,17 @@ error_code sceNpScoreSetPlayerCharacterId(s32 ctxId, SceNpScorePcId pcId)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
if (static_cast<u32>(ctxId) >= score_transaction_ctx::id_base)
if (auto trans = idm::get_unlocked<score_transaction_ctx>(ctxId))
{
auto trans = idm::get<score_transaction_ctx>(ctxId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
trans->pcId = pcId;
}
else if (auto score = idm::get_unlocked<score_ctx>(ctxId))
{
score->pcId = pcId;
}
else
{
auto score = idm::get<score_ctx>(ctxId);
if (!ctxId)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
score->pcId = pcId;
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
return CELL_OK;
@ -5476,7 +5458,7 @@ error_code sceNpScoreWaitAsync(s32 transId, vm::ptr<s32> result)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
auto trans = idm::get<score_transaction_ctx>(transId);
auto trans = idm::get_unlocked<score_transaction_ctx>(transId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
@ -5498,7 +5480,7 @@ error_code sceNpScorePollAsync(s32 transId, vm::ptr<s32> result)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
auto trans = idm::get<score_transaction_ctx>(transId);
auto trans = idm::get_unlocked<score_transaction_ctx>(transId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
@ -5515,9 +5497,9 @@ error_code sceNpScorePollAsync(s32 transId, vm::ptr<s32> result)
return CELL_OK;
}
std::pair<std::optional<error_code>, std::shared_ptr<score_transaction_ctx>> get_score_transaction_context(s32 transId, bool reset_transaction = true)
std::pair<std::optional<error_code>, shared_ptr<score_transaction_ctx>> get_score_transaction_context(s32 transId, bool reset_transaction = true)
{
auto trans_ctx = idm::get<score_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<score_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -6217,7 +6199,7 @@ error_code sceNpScoreAbortTransaction(s32 transId)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
auto trans = idm::get<score_transaction_ctx>(transId);
auto trans = idm::get_unlocked<score_transaction_ctx>(transId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;

View file

@ -1837,7 +1837,7 @@ public:
virtual ~RecvMessageDialogBase() = default;
virtual error_code Exec(SceNpBasicMessageMainType type, SceNpBasicMessageRecvOptions options, SceNpBasicMessageRecvAction& recv_result, u64& chosen_msg_id) = 0;
virtual void callback_handler(const std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) = 0;
virtual void callback_handler(const shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) = 0;
protected:
std::shared_ptr<rpcn::rpcn_client> m_rpcn;

View file

@ -139,7 +139,7 @@ error_code sceNpSnsFbAbortHandle(u32 handle)
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
if (!sfh)
{
@ -172,7 +172,7 @@ error_code sceNpSnsFbGetAccessToken(u32 handle, vm::cptr<SceNpSnsFbAccessTokenPa
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
if (!sfh)
{
@ -200,7 +200,7 @@ s32 sceNpSnsFbStreamPublish(u32 handle) // add more arguments
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
if (!sfh)
{
@ -258,7 +258,7 @@ s32 sceNpSnsFbLoadThrottle(u32 handle)
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
if (!sfh)
{
@ -299,7 +299,7 @@ error_code sceNpSnsFbGetLongAccessToken(u32 handle, vm::cptr<SceNpSnsFbAccessTok
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
}
const auto sfh = idm::get<sns_fb_handle_t>(handle);
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
if (!sfh)
{

View file

@ -123,7 +123,7 @@ struct sce_np_trophy_manager
return res;
}
ctxt = idm::check<trophy_context_t>(context);
ctxt = idm::check_unlocked<trophy_context_t>(context);
if (!ctxt)
{
@ -144,7 +144,7 @@ struct sce_np_trophy_manager
return res;
}
const auto hndl = idm::check<trophy_handle_t>(handle);
const auto hndl = idm::check_unlocked<trophy_handle_t>(handle);
if (!hndl)
{
@ -409,7 +409,7 @@ error_code sceNpTrophyAbortHandle(u32 handle)
return SCE_NP_TROPHY_ERROR_INVALID_ARGUMENT;
}
const auto hndl = idm::check<trophy_handle_t>(handle);
const auto hndl = idm::check_unlocked<trophy_handle_t>(handle);
if (!hndl)
{
@ -552,7 +552,7 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
}
const auto [ctxt, error] = trophy_manager.get_context_ex(context, handle, true);
const auto handle_ptr = idm::get<trophy_handle_t>(handle);
const auto handle_ptr = idm::get_unlocked<trophy_handle_t>(handle);
if (error)
{
@ -641,7 +641,7 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
return SCE_NP_TROPHY_ERROR_UNKNOWN_CONTEXT;
}
if (handle_ptr.get() != idm::check<trophy_handle_t>(handle))
if (handle_ptr.get() != idm::check_unlocked<trophy_handle_t>(handle))
{
on_error();
return SCE_NP_TROPHY_ERROR_UNKNOWN_HANDLE;
@ -716,7 +716,6 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
// Create a counter which is destroyed after the function ends
const auto queued = std::make_shared<atomic_t<u32>>(0);
std::weak_ptr<atomic_t<u32>> wkptr = queued;
for (auto status : statuses)
{
@ -724,12 +723,11 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
*queued += status.second;
for (s32 completed = 0; completed <= status.second; completed++)
{
sysutil_register_cb([statusCb, status, context, completed, arg, wkptr](ppu_thread& cb_ppu) -> s32
sysutil_register_cb([statusCb, status, context, completed, arg, queued](ppu_thread& cb_ppu) -> s32
{
// TODO: it is possible that we need to check the return value here as well.
statusCb(cb_ppu, context, status.first, completed, status.second, arg);
const auto queued = wkptr.lock();
if (queued && (*queued)-- == 1)
{
queued->notify_one();

View file

@ -133,7 +133,7 @@ error_code sceNpTusCreateTransactionCtx(s32 titleCtxId)
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto tus = idm::get<tus_ctx>(titleCtxId);
auto tus = idm::get_unlocked<tus_ctx>(titleCtxId);
if (!tus)
{
@ -185,24 +185,12 @@ error_code sceNpTusSetTimeout(s32 ctxId, u32 timeout)
return SCE_NP_COMMUNITY_ERROR_INVALID_ARGUMENT;
}
const u32 idm_id = static_cast<u32>(ctxId);
if (idm_id >= tus_transaction_ctx::id_base && idm_id < (tus_transaction_ctx::id_base + tus_transaction_ctx::id_count))
if (auto trans = idm::get_unlocked<tus_transaction_ctx>(ctxId))
{
auto trans = idm::get<tus_transaction_ctx>(ctxId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
trans->timeout = timeout;
}
else if (idm_id >= tus_ctx::id_base && idm_id < (tus_ctx::id_base + tus_ctx::id_count))
else if (auto tus = idm::get_unlocked<tus_ctx>(ctxId))
{
auto tus = idm::get<tus_ctx>(ctxId);
if (!ctxId)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
}
tus->timeout = timeout;
}
else
@ -224,7 +212,7 @@ error_code sceNpTusAbortTransaction(s32 transId)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
auto trans = idm::get<tus_transaction_ctx>(transId);
auto trans = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
@ -246,7 +234,7 @@ error_code sceNpTusWaitAsync(s32 transId, vm::ptr<s32> result)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
auto trans = idm::get<tus_transaction_ctx>(transId);
auto trans = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
@ -268,7 +256,7 @@ error_code sceNpTusPollAsync(s32 transId, vm::ptr<s32> result)
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
}
auto trans = idm::get<tus_transaction_ctx>(transId);
auto trans = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans)
{
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
@ -326,7 +314,7 @@ error_code scenp_tus_set_multislot_variable(s32 transId, T targetNpId, vm::cptr<
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -413,7 +401,7 @@ error_code scenp_tus_get_multislot_variable(s32 transId, T targetNpId, vm::cptr<
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -500,7 +488,7 @@ error_code scenp_tus_get_multiuser_variable(s32 transId, T targetNpIdArray, SceN
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -599,7 +587,7 @@ error_code scenp_tus_get_friends_variable(s32 transId, SceNpTusSlotId slotId, s3
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -659,7 +647,7 @@ error_code scenp_tus_add_and_get_variable(s32 transId, T targetNpId, SceNpTusSlo
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -736,7 +724,7 @@ error_code scenp_tus_try_and_set_variable(s32 transId, T targetNpId, SceNpTusSlo
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -813,7 +801,7 @@ error_code scenp_tus_delete_multislot_variable(s32 transId, T targetNpId, vm::cp
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -885,7 +873,7 @@ error_code scenp_tus_set_data(s32 transId, T targetNpId, SceNpTusSlotId slotId,
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -957,7 +945,7 @@ error_code scenp_tus_get_data(s32 transId, T targetNpId, SceNpTusSlotId slotId,
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -1044,7 +1032,7 @@ error_code scenp_tus_get_multislot_data_status(s32 transId, T targetNpId, vm::cp
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -1131,7 +1119,7 @@ error_code scenp_tus_get_multiuser_data_status(s32 transId, T targetNpIdArray, S
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -1230,7 +1218,7 @@ error_code scenp_tus_get_friends_data_status(s32 transId, SceNpTusSlotId slotId,
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -1295,7 +1283,7 @@ error_code scenp_tus_delete_multislot_data(s32 transId, T targetNpId, vm::cptr<S
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -1337,7 +1325,7 @@ error_code sceNpTusDeleteMultiSlotDataVUserAsync(s32 transId, vm::cptr<SceNpTusV
return scenp_tus_delete_multislot_data(transId, targetVirtualUserId, slotIdArray, arrayNum, option, true, true);
}
void scenp_tss_no_file(const std::shared_ptr<tus_transaction_ctx>& trans, vm::ptr<SceNpTssDataStatus> dataStatus)
void scenp_tss_no_file(const shared_ptr<tus_transaction_ctx>& trans, vm::ptr<SceNpTssDataStatus> dataStatus)
{
// TSS are files stored on PSN by developers, no dumps available atm
std::memset(dataStatus.get_ptr(), 0, sizeof(SceNpTssDataStatus));
@ -1365,7 +1353,7 @@ error_code sceNpTssGetData(s32 transId, SceNpTssSlotId slotId, vm::ptr<SceNpTssD
return SCE_NP_COMMUNITY_ERROR_INSUFFICIENT_ARGUMENT;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{
@ -1398,7 +1386,7 @@ error_code sceNpTssGetDataAsync(s32 transId, SceNpTssSlotId slotId, vm::ptr<SceN
return SCE_NP_COMMUNITY_ERROR_INSUFFICIENT_ARGUMENT;
}
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
if (!trans_ctx)
{

View file

@ -51,7 +51,7 @@ void config_event_entry(ppu_thread& ppu)
}
const u32 queue_id = cfg.queue_id;
auto queue = idm::get<lv2_obj, lv2_event_queue>(queue_id);
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(queue_id);
while (queue && sys_event_queue_receive(ppu, queue_id, vm::null, 0) == CELL_OK)
{
@ -81,7 +81,7 @@ void config_event_entry(ppu_thread& ppu)
if (!queue->exists)
{
// Exit condition
queue = nullptr;
queue = null_ptr;
break;
}
@ -134,7 +134,7 @@ extern void send_sys_io_connect_event(usz index, u32 state)
if (cfg.init_ctr)
{
if (auto port = idm::get<lv2_obj, lv2_event_queue>(cfg.queue_id))
if (auto port = idm::get_unlocked<lv2_obj, lv2_event_queue>(cfg.queue_id))
{
port->send(0, 1, index, state);
}

View file

@ -60,7 +60,7 @@ error_code sys_mempool_create(ppu_thread& ppu, vm::ptr<sys_mempool_t> mempool, v
auto id = idm::make<memory_pool_t>();
*mempool = id;
auto memory_pool = idm::get<memory_pool_t>(id);
auto memory_pool = idm::get_unlocked<memory_pool_t>(id);
memory_pool->chunk = chunk;
memory_pool->chunk_size = chunk_size;
@ -114,7 +114,7 @@ void sys_mempool_destroy(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_destroy(mempool=%d)", mempool);
auto memory_pool = idm::get<memory_pool_t>(mempool);
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
if (memory_pool)
{
u32 condid = memory_pool->condid;
@ -136,7 +136,7 @@ error_code sys_mempool_free_block(ppu_thread& ppu, sys_mempool_t mempool, vm::pt
{
sysPrxForUser.warning("sys_mempool_free_block(mempool=%d, block=*0x%x)", mempool, block);
auto memory_pool = idm::get<memory_pool_t>(mempool);
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
if (!memory_pool)
{
return CELL_EINVAL;
@ -160,7 +160,7 @@ u64 sys_mempool_get_count(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_get_count(mempool=%d)", mempool);
auto memory_pool = idm::get<memory_pool_t>(mempool);
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
if (!memory_pool)
{
return CELL_EINVAL;
@ -175,7 +175,7 @@ vm::ptr<void> sys_mempool_allocate_block(ppu_thread& ppu, sys_mempool_t mempool)
{
sysPrxForUser.warning("sys_mempool_allocate_block(mempool=%d)", mempool);
auto memory_pool = idm::get<memory_pool_t>(mempool);
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
if (!memory_pool)
{ // if the memory pool gets deleted-- is null, clearly it's impossible to allocate memory.
return vm::null;
@ -185,7 +185,7 @@ vm::ptr<void> sys_mempool_allocate_block(ppu_thread& ppu, sys_mempool_t mempool)
while (memory_pool->free_blocks.empty()) // while is to guard against spurious wakeups
{
sys_cond_wait(ppu, memory_pool->condid, 0);
memory_pool = idm::get<memory_pool_t>(mempool);
memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
if (!memory_pool) // in case spurious wake up was from delete, don't die by accessing a freed pool.
{ // No need to unlock as if the pool is freed, the lock was freed as well.
return vm::null;
@ -202,7 +202,7 @@ vm::ptr<void> sys_mempool_try_allocate_block(ppu_thread& ppu, sys_mempool_t memp
{
sysPrxForUser.warning("sys_mempool_try_allocate_block(mempool=%d)", mempool);
auto memory_pool = idm::get<memory_pool_t>(mempool);
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
if (!memory_pool || memory_pool->free_blocks.empty())
{

View file

@ -1,6 +1,8 @@
#include "stdafx.h"
#include "PPUAnalyser.h"
#include "lv2/sys_sync.h"
#include "PPUOpcodes.h"
#include "PPUThread.h"
@ -37,7 +39,8 @@ void fmt_class_string<bs_t<ppu_attr>>::format(std::string& out, u64 arg)
format_bitset(out, arg, "[", ",", "]", &fmt_class_string<ppu_attr>::format);
}
void ppu_module::validate(u32 reloc)
template <>
void ppu_module<lv2_obj>::validate(u32 reloc)
{
// Load custom PRX configuration if available
if (fs::file yml{path + ".yml"})
@ -529,7 +532,8 @@ namespace ppu_patterns
};
}
bool ppu_module::analyse(u32 lib_toc, u32 entry, const u32 sec_end, const std::vector<u32>& applied, const std::vector<u32>& exported_funcs, std::function<bool()> check_aborted)
template <>
bool ppu_module<lv2_obj>::analyse(u32 lib_toc, u32 entry, const u32 sec_end, const std::vector<u32>& applied, const std::vector<u32>& exported_funcs, std::function<bool()> check_aborted)
{
if (segs.empty())
{

View file

@ -72,8 +72,11 @@ struct ppu_segment
};
// PPU Module Information
struct ppu_module
template <typename Type>
struct ppu_module : public Type
{
using Type::Type;
ppu_module() noexcept = default;
ppu_module(const ppu_module&) = delete;
@ -177,7 +180,8 @@ struct ppu_module
}
};
struct main_ppu_module : public ppu_module
template <typename T>
struct main_ppu_module : public ppu_module<T>
{
u32 elf_entry{};
u32 seg0_code_end{};

View file

@ -576,7 +576,7 @@ extern const std::unordered_map<u32, std::string_view>& get_exported_function_na
}
// Resolve relocations for variable/function linkage.
static void ppu_patch_refs(const ppu_module& _module, std::vector<ppu_reloc>* out_relocs, u32 fref, u32 faddr)
static void ppu_patch_refs(const ppu_module<lv2_obj>& _module, std::vector<ppu_reloc>* out_relocs, u32 fref, u32 faddr)
{
struct ref_t
{
@ -704,7 +704,7 @@ extern bool ppu_register_library_lock(std::string_view libname, bool lock_lib)
}
// Load and register exports; return special exports found (nameless module)
static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link, u32 exports_start, u32 exports_end, bool for_observing_callbacks = false, std::vector<u32>* funcs = nullptr, std::basic_string<char>* loaded_flags = nullptr)
static auto ppu_load_exports(const ppu_module<lv2_obj>& _module, ppu_linkage_info* link, u32 exports_start, u32 exports_end, bool for_observing_callbacks = false, std::vector<u32>* funcs = nullptr, std::basic_string<char>* loaded_flags = nullptr)
{
std::unordered_map<u32, u32> result;
@ -803,7 +803,7 @@ static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link,
const auto fnids = +lib.nids;
const auto faddrs = +lib.addrs;
u32 previous_rtoc = umax;
u64 previous_rtoc = umax;
// Get functions
for (u32 i = 0, end = lib.num_func; i < end; i++)
@ -816,21 +816,22 @@ static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link,
{
if (previous_rtoc == fdata.rtoc)
{
ppu_loader.notice("**** %s export: [%s] (0x%08x) at 0x%x [at:0x%x] rtoc=same", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, fdata.addr);
// Shortened printing, replacement string is 10 characters as 0x%08x
ppu_loader.notice("**** %s export: (0x%08x) at 0x%07x [at:0x%07x, rtoc:same-above]: %s", module_name, fnid, faddr, fdata.addr, ppu_get_function_name(module_name, fnid));
}
else
{
previous_rtoc = fdata.rtoc;
ppu_loader.notice("**** %s export: [%s] (0x%08x) at 0x%x [at:0x%x] rtoc=0x%x", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, fdata.addr, fdata.rtoc);
ppu_loader.notice("**** %s export: (0x%08x) at 0x%07x [at:0x%07x, rtoc:0x%08x]: %s", module_name, fnid, faddr, fdata.addr, fdata.rtoc, ppu_get_function_name(module_name, fnid));
}
}
else if (fptr)
{
ppu_loader.error("**** %s export: [%s] (0x%08x) at 0x%x [Invalid Function Address: 0x%x!]", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, fdata.addr);
ppu_loader.error("**** %s export: (0x%08x) at 0x%07x [Invalid Function Address: 0x%07x!]: '%s'", module_name, fnid, faddr, fdata.addr, ppu_get_function_name(module_name, fnid));
}
else
{
ppu_loader.warning("**** %s export: [%s] (0x%08x) at 0x%x [Illegal Descriptor Address!]", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr);
ppu_loader.warning("**** %s export: (0x%08x) at 0x%07x [Illegal Descriptor Address!]: '%s'", module_name, fnid, faddr, ppu_get_function_name(module_name, fnid));
}
if (funcs)
@ -938,7 +939,7 @@ static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link,
return result;
}
static auto ppu_load_imports(const ppu_module& _module, std::vector<ppu_reloc>& relocs, ppu_linkage_info* link, u32 imports_start, u32 imports_end)
static auto ppu_load_imports(const ppu_module<lv2_obj>& _module, std::vector<ppu_reloc>& relocs, ppu_linkage_info* link, u32 imports_start, u32 imports_end)
{
std::unordered_map<u32, void*> result;
@ -1030,10 +1031,10 @@ static auto ppu_load_imports(const ppu_module& _module, std::vector<ppu_reloc>&
// For _sys_prx_register_module
void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<char>& loaded_flags)
{
auto& _main = g_fxo->get<main_ppu_module>();
auto& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
auto& link = g_fxo->get<ppu_linkage_info>();
ppu_module vm_all_fake_module{};
ppu_module<lv2_obj> vm_all_fake_module{};
vm_all_fake_module.segs.emplace_back(ppu_segment{0x10000, 0 - 0x10000u, 1 /*LOAD*/, 0, 0 - 0x1000u, vm::base(0x10000)});
vm_all_fake_module.addr_to_seg_index.emplace(0x10000, 0);
@ -1130,7 +1131,7 @@ void init_ppu_functions(utils::serial* ar, bool full = false)
}
}
static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment& seg)
static void ppu_check_patch_spu_images(const ppu_module<lv2_obj>& mod, const ppu_segment& seg)
{
if (!seg.size)
{
@ -1139,7 +1140,7 @@ static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment&
const bool is_firmware = mod.path.starts_with(vfs::get("/dev_flash/"));
const auto _main = g_fxo->try_get<main_ppu_module>();
const auto _main = g_fxo->try_get<main_ppu_module<lv2_obj>>();
const std::string_view seg_view{ensure(mod.get_ptr<char>(seg.addr)), seg.size};
@ -1430,10 +1431,10 @@ static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment&
}
}
void try_spawn_ppu_if_exclusive_program(const ppu_module& m)
void try_spawn_ppu_if_exclusive_program(const ppu_module<lv2_obj>& m)
{
// If only PRX/OVL has been loaded at Emu.BootGame(), launch a single PPU thread so its memory can be viewed
if (Emu.IsReady() && g_fxo->get<main_ppu_module>().segs.empty() && !Emu.DeserialManager())
if (Emu.IsReady() && g_fxo->get<main_ppu_module<lv2_obj>>().segs.empty() && !Emu.DeserialManager())
{
ppu_thread_params p
{
@ -1521,15 +1522,15 @@ const char* get_prx_name_by_cia(u32 addr)
return nullptr;
}
std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
{
if (elf != elf_error::ok)
{
return nullptr;
return null_ptr;
}
// Create new PRX object
const auto prx = !ar && !virtual_load ? idm::make_ptr<lv2_obj, lv2_prx>() : std::make_shared<lv2_prx>();
const auto prx = !ar && !virtual_load ? idm::make_ptr<lv2_obj, lv2_prx>() : make_shared<lv2_prx>();
// Access linkage information object
auto& link = g_fxo->get<ppu_linkage_info>();
@ -2054,7 +2055,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
init_ppu_functions(ar, false);
// Set for delayed initialization in ppu_initialize()
auto& _main = g_fxo->get<main_ppu_module>();
auto& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
// Access linkage information object
auto& link = g_fxo->get<ppu_linkage_info>();
@ -2080,7 +2081,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
struct on_fatal_error
{
ppu_module& _main;
ppu_module<lv2_obj>& _main;
bool errored = true;
~on_fatal_error()
@ -2498,7 +2499,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
}
// Initialize process
std::vector<std::shared_ptr<lv2_prx>> loaded_modules;
std::vector<shared_ptr<lv2_prx>> loaded_modules;
// Module list to load at startup
std::set<std::string> load_libs;
@ -2778,11 +2779,11 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
return true;
}
std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
{
if (elf != elf_error::ok)
{
return {nullptr, CELL_ENOENT};
return {null_ptr, CELL_ENOENT};
}
// Access linkage information object
@ -2804,12 +2805,12 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
if (!r.valid() || !r.inside(addr_range::start_length(0x30000000, 0x10000000)))
{
// TODO: Check error and if there's a better way to error check
return {nullptr, CELL_ENOEXEC};
return {null_ptr, CELL_ENOEXEC};
}
}
}
std::shared_ptr<lv2_overlay> ovlm = std::make_shared<lv2_overlay>();
shared_ptr<lv2_overlay> ovlm = make_shared<lv2_overlay>();
// Set path (TODO)
ovlm->name = path.substr(path.find_last_of('/') + 1);
@ -2859,7 +2860,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
if (!vm::check_addr(addr, vm::page_readable, size))
{
ppu_loader.error("ppu_load_overlay(): Archived PPU overlay memory has not been found! (addr=0x%x, memsz=0x%x)", addr, size);
return {nullptr, CELL_EABORT};
return {null_ptr, CELL_EABORT};
}
}
else if (!vm::get(vm::any, 0x30000000)->falloc(addr, size))
@ -2873,7 +2874,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
}
// TODO: Check error code, maybe disallow more than one overlay instance completely
return {nullptr, CELL_EBUSY};
return {null_ptr, CELL_EBUSY};
}
// Store only LOAD segments (TODO)
@ -3088,7 +3089,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
return !!(cpu->state & cpu_flag::exit);
}))
{
return {nullptr, CellError{CELL_CANCEL + 0u}};
return {null_ptr, CellError{CELL_CANCEL + 0u}};
}
// Validate analyser results (not required)
@ -3105,11 +3106,11 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
bool ppu_load_rel_exec(const ppu_rel_object& elf)
{
ppu_module relm{};
ppu_module<lv2_obj> relm{};
struct on_fatal_error
{
ppu_module& relm;
ppu_module<lv2_obj>& relm;
bool errored = true;
~on_fatal_error()

View file

@ -174,13 +174,13 @@ bool serialize<ppu_thread::cr_bits>(utils::serial& ar, typename ppu_thread::cr_b
}
extern void ppu_initialize();
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
extern bool ppu_initialize(const ppu_module& info, bool check_only = false, u64 file_size = 0);
static void ppu_initialize2(class jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module& whole_module);
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release = false);
extern bool ppu_initialize(const ppu_module<lv2_obj>& info, bool check_only = false, u64 file_size = 0);
static void ppu_initialize2(class jit_compiler& jit, const ppu_module<lv2_obj>& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module<lv2_obj>& whole_module);
extern bool ppu_load_exec(const ppu_exec_object&, bool virtual_load, const std::string&, utils::serial* = nullptr);
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* = nullptr);
extern std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* = nullptr);
extern void ppu_unload_prx(const lv2_prx&);
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64 file_offset, utils::serial* = nullptr);
extern shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64 file_offset, utils::serial* = nullptr);
extern void ppu_execute_syscall(ppu_thread& ppu, u64 code);
static void ppu_break(ppu_thread&, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*);
@ -550,7 +550,7 @@ u32 ppu_read_mmio_aware_u32(u8* vm_base, u32 eal)
if (eal >= RAW_SPU_BASE_ADDR)
{
// RawSPU MMIO
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
if (!thread)
{
@ -578,7 +578,7 @@ void ppu_write_mmio_aware_u32(u8* vm_base, u32 eal, u32 value)
if (eal >= RAW_SPU_BASE_ADDR)
{
// RawSPU MMIO
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
if (!thread)
{
@ -3450,7 +3450,7 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
{
if (count > 20000 && g_cfg.core.perf_report) [[unlikely]]
{
perf_log.warning(u8"STCX: took too long: %.3fµs (%u c)", count / (utils::get_tsc_freq() / 1000'000.), count);
perf_log.warning("STCX: took too long: %.3fus (%u c)", count / (utils::get_tsc_freq() / 1000'000.), count);
}
break;
@ -3837,7 +3837,7 @@ extern fs::file make_file_view(fs::file&& _file, u64 offset, u64 max_size = umax
return file;
}
extern void ppu_finalize(const ppu_module& info, bool force_mem_release)
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release)
{
if (info.segs.empty())
{
@ -3885,7 +3885,7 @@ extern void ppu_finalize(const ppu_module& info, bool force_mem_release)
#endif
}
extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module*>* loaded_modules)
extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module<lv2_obj>*>* loaded_modules)
{
if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm)
{
@ -3978,7 +3978,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
if (loaded_modules)
{
if (std::any_of(loaded_modules->begin(), loaded_modules->end(), [&](ppu_module* obj)
if (std::any_of(loaded_modules->begin(), loaded_modules->end(), [&](ppu_module<lv2_obj>* obj)
{
return obj->name == entry.name;
}))
@ -4311,7 +4311,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
auto slice = possible_exec_file_paths.pop_all();
auto main_module = std::move(g_fxo->get<main_ppu_module>());
auto main_module = std::move(g_fxo->get<main_ppu_module<lv2_obj>>());
for (; slice; slice.pop_front(), g_progr_fdone++)
{
@ -4348,7 +4348,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
{
while (exec_err == elf_error::ok)
{
main_ppu_module& _main = g_fxo->get<main_ppu_module>();
main_ppu_module<lv2_obj>& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
_main = {};
auto current_cache = std::move(g_fxo->get<spu_cache>());
@ -4393,7 +4393,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
ppu_log.notice("Failed to precompile '%s' as executable (%s)", path, exec_err);
}
g_fxo->get<main_ppu_module>() = std::move(main_module);
g_fxo->get<main_ppu_module<lv2_obj>>() = std::move(main_module);
g_fxo->get<spu_cache>().collect_funcs_to_precompile = true;
Emu.ConfigurePPUCache();
});
@ -4403,7 +4403,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
extern void ppu_initialize()
{
if (!g_fxo->is_init<main_ppu_module>())
if (!g_fxo->is_init<main_ppu_module<lv2_obj>>())
{
return;
}
@ -4413,7 +4413,7 @@ extern void ppu_initialize()
return;
}
auto& _main = g_fxo->get<main_ppu_module>();
auto& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
std::optional<scoped_progress_dialog> progress_dialog(std::in_place, get_localized_string(localized_string_id::PROGRESS_DIALOG_ANALYZING_PPU_EXECUTABLE));
@ -4436,7 +4436,7 @@ extern void ppu_initialize()
compile_main = ppu_initialize(_main, true);
}
std::vector<ppu_module*> module_list;
std::vector<ppu_module<lv2_obj>*> module_list;
const std::string firmware_sprx_path = vfs::get("/dev_flash/sys/external/");
@ -4541,7 +4541,7 @@ extern void ppu_initialize()
}
}
bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
bool ppu_initialize(const ppu_module<lv2_obj>& info, bool check_only, u64 file_size)
{
if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm)
{
@ -4668,7 +4668,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
const u32 reloc = info.relocs.empty() ? 0 : ::at32(info.segs, 0).addr;
// Info sent to threads
std::vector<std::pair<std::string, ppu_module>> workload;
std::vector<std::pair<std::string, ppu_module<lv2_obj>>> workload;
// Info to load to main JIT instance (true - compiled)
std::vector<std::pair<std::string, bool>> link_workload;
@ -4733,7 +4733,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
}
// Copy module information (TODO: optimize)
ppu_module part;
ppu_module<lv2_obj> part;
part.copy_part(info);
part.funcs.reserve(16000);
@ -5035,15 +5035,15 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
struct thread_op
{
atomic_t<u32>& work_cv;
std::vector<std::pair<std::string, ppu_module>>& workload;
const ppu_module& main_module;
std::vector<std::pair<std::string, ppu_module<lv2_obj>>>& workload;
const ppu_module<lv2_obj>& main_module;
const std::string& cache_path;
const cpu_thread* cpu;
std::unique_lock<decltype(jit_core_allocator::sem)> core_lock;
thread_op(atomic_t<u32>& work_cv, std::vector<std::pair<std::string, ppu_module>>& workload
, const cpu_thread* cpu, const ppu_module& main_module, const std::string& cache_path, decltype(jit_core_allocator::sem)& sem) noexcept
thread_op(atomic_t<u32>& work_cv, std::vector<std::pair<std::string, ppu_module<lv2_obj>>>& workload
, const cpu_thread* cpu, const ppu_module<lv2_obj>& main_module, const std::string& cache_path, decltype(jit_core_allocator::sem)& sem) noexcept
: work_cv(work_cv)
, workload(workload)
@ -5257,7 +5257,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
#endif
}
static void ppu_initialize2(jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module& whole_module)
static void ppu_initialize2(jit_compiler& jit, const ppu_module<lv2_obj>& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module<lv2_obj>& whole_module)
{
#ifdef LLVM_AVAILABLE
using namespace llvm;

View file

@ -3,6 +3,7 @@
#include "Emu/system_config.h"
#include "Emu/Cell/Common.h"
#include "Emu/Cell/lv2/sys_sync.h"
#include "PPUTranslator.h"
#include "PPUThread.h"
#include "SPUThread.h"
@ -28,7 +29,7 @@ const ppu_decoder<PPUTranslator> s_ppu_decoder;
extern const ppu_decoder<ppu_itype> g_ppu_itype;
extern const ppu_decoder<ppu_iname> g_ppu_iname;
PPUTranslator::PPUTranslator(LLVMContext& context, Module* _module, const ppu_module& info, ExecutionEngine& engine)
PPUTranslator::PPUTranslator(LLVMContext& context, Module* _module, const ppu_module<lv2_obj>& info, ExecutionEngine& engine)
: cpu_translator(_module, false)
, m_info(info)
, m_pure_attr()
@ -322,7 +323,7 @@ Function* PPUTranslator::Translate(const ppu_function& info)
return m_function;
}
Function* PPUTranslator::GetSymbolResolver(const ppu_module& info)
Function* PPUTranslator::GetSymbolResolver(const ppu_module<lv2_obj>& info)
{
m_function = cast<Function>(m_module->getOrInsertFunction("__resolve_symbols", FunctionType::get(get_type<void>(), { get_type<u8*>(), get_type<u64>() }, false)).getCallee());

View file

@ -8,10 +8,15 @@
#include "util/types.hpp"
template <typename T>
struct ppu_module;
struct lv2_obj;
class PPUTranslator final : public cpu_translator
{
// PPU Module
const ppu_module& m_info;
const ppu_module<lv2_obj>& m_info;
// Relevant relocations
std::map<u64, const ppu_reloc*> m_relocs;
@ -331,7 +336,7 @@ public:
// Handle compilation errors
void CompilationError(const std::string& error);
PPUTranslator(llvm::LLVMContext& context, llvm::Module* _module, const ppu_module& info, llvm::ExecutionEngine& engine);
PPUTranslator(llvm::LLVMContext& context, llvm::Module* _module, const ppu_module<lv2_obj>& info, llvm::ExecutionEngine& engine);
~PPUTranslator();
// Get thread context struct type
@ -339,7 +344,7 @@ public:
// Parses PPU opcodes and translate them into LLVM IR
llvm::Function* Translate(const ppu_function& info);
llvm::Function* GetSymbolResolver(const ppu_module& info);
llvm::Function* GetSymbolResolver(const ppu_module<lv2_obj>& info);
void MFVSCR(ppu_opcode_t op);
void MTVSCR(ppu_opcode_t op);

View file

@ -2415,7 +2415,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
if (eal < SYS_SPU_THREAD_BASE_LOW)
{
// RawSPU MMIO
auto thread = idm::get<named_thread<spu_thread>>(find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
auto thread = idm::get_unlocked<named_thread<spu_thread>>(find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
if (!thread)
{
@ -3837,7 +3837,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
if (count2 > 20000 && g_cfg.core.perf_report) [[unlikely]]
{
perf_log.warning(u8"PUTLLC: took too long: %.3fµs (%u c) (addr=0x%x) (S)", count2 / (utils::get_tsc_freq() / 1000'000.), count2, addr);
perf_log.warning("PUTLLC: took too long: %.3fus (%u c) (addr=0x%x) (S)", count2 / (utils::get_tsc_freq() / 1000'000.), count2, addr);
}
if (ok)
@ -3872,7 +3872,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
{
if (count > 20000 && g_cfg.core.perf_report) [[unlikely]]
{
perf_log.warning(u8"PUTLLC: took too long: %.3fµs (%u c) (addr = 0x%x)", count / (utils::get_tsc_freq() / 1000'000.), count, addr);
perf_log.warning("PUTLLC: took too long: %.3fus (%u c) (addr = 0x%x)", count / (utils::get_tsc_freq() / 1000'000.), count, addr);
}
break;
@ -4087,7 +4087,7 @@ void do_cell_atomic_128_store(u32 addr, const void* to_write)
if (result > 20000 && g_cfg.core.perf_report) [[unlikely]]
{
perf_log.warning(u8"STORE128: took too long: %.3fµs (%u c) (addr=0x%x)", result / (utils::get_tsc_freq() / 1000'000.), result, addr);
perf_log.warning("STORE128: took too long: %.3fus (%u c) (addr=0x%x)", result / (utils::get_tsc_freq() / 1000'000.), result, addr);
}
static_cast<void>(cpu->test_stopped());
@ -6007,7 +6007,7 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
spu_function_logger logger(*this, "sys_spu_thread_send_event");
std::shared_ptr<lv2_event_queue> queue;
shared_ptr<lv2_event_queue> queue;
{
std::lock_guard lock(group->mutex);
@ -6059,7 +6059,7 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
spu_function_logger logger(*this, "sys_spu_thread_throw_event");
std::shared_ptr<lv2_event_queue> queue;
shared_ptr<lv2_event_queue> queue;
{
std::lock_guard lock{group->mutex};
queue = this->spup[spup];
@ -6447,7 +6447,7 @@ bool spu_thread::stop_and_signal(u32 code)
return true;
}
auto get_queue = [this](u32 spuq) -> const std::shared_ptr<lv2_event_queue>&
auto get_queue = [this](u32 spuq) -> const shared_ptr<lv2_event_queue>&
{
for (auto& v : this->spuq)
{
@ -6460,7 +6460,7 @@ bool spu_thread::stop_and_signal(u32 code)
}
}
static const std::shared_ptr<lv2_event_queue> empty;
static const shared_ptr<lv2_event_queue> empty;
return empty;
};
@ -6523,7 +6523,7 @@ bool spu_thread::stop_and_signal(u32 code)
spu_function_logger logger(*this, "sys_spu_thread_receive_event");
std::shared_ptr<lv2_event_queue> queue;
shared_ptr<lv2_event_queue> queue;
while (true)
{
@ -6665,7 +6665,7 @@ bool spu_thread::stop_and_signal(u32 code)
spu_log.trace("sys_spu_thread_tryreceive_event(spuq=0x%x)", spuq);
std::shared_ptr<lv2_event_queue> queue;
shared_ptr<lv2_event_queue> queue;
reader_lock{group->mutex}, queue = get_queue(spuq);

View file

@ -453,7 +453,7 @@ struct spu_int_ctrl_t
atomic_t<u64> mask;
atomic_t<u64> stat;
std::shared_ptr<struct lv2_int_tag> tag;
shared_ptr<struct lv2_int_tag> tag;
void set(u64 ints);
@ -755,8 +755,8 @@ public:
atomic_t<status_npc_sync_var> status_npc{};
std::array<spu_int_ctrl_t, 3> int_ctrl{}; // SPU Class 0, 1, 2 Interrupt Management
std::array<std::pair<u32, std::shared_ptr<lv2_event_queue>>, 32> spuq{}; // Event Queue Keys for SPU Thread
std::shared_ptr<lv2_event_queue> spup[64]; // SPU Ports
std::array<std::pair<u32, shared_ptr<lv2_event_queue>>, 32> spuq{}; // Event Queue Keys for SPU Thread
shared_ptr<lv2_event_queue> spup[64]; // SPU Ports
spu_channel exit_status{}; // Threaded SPU exit status (not a channel, but the interface fits)
atomic_t<u32> last_exit_status; // Value to be written in exit_status after checking group termination
lv2_spu_group* const group; // SPU Thread Group (access by the spu threads in the group only! From other threads obtain a shared pointer to group using group ID)

View file

@ -14,11 +14,21 @@
LOG_CHANNEL(sys_cond);
lv2_cond::lv2_cond(utils::serial& ar)
lv2_cond::lv2_cond(utils::serial& ar) noexcept
: key(ar)
, name(ar)
, mtx_id(ar)
, mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
, mutex(idm::check_unlocked<lv2_obj, lv2_mutex>(mtx_id))
, _mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
{
}
lv2_cond::lv2_cond(u64 key, u64 name, u32 mtx_id, shared_ptr<lv2_obj> mutex0) noexcept
: key(key)
, name(name)
, mtx_id(mtx_id)
, mutex(static_cast<lv2_mutex*>(mutex0.get()))
, _mutex(mutex0)
{
}
@ -49,7 +59,8 @@ CellError lv2_cond::on_id_create()
{
if (!mutex)
{
mutex = ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id));
_mutex = static_cast<shared_ptr<lv2_obj>>(ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)));
}
// Defer function
@ -59,10 +70,9 @@ CellError lv2_cond::on_id_create()
return {};
}
std::shared_ptr<void> lv2_cond::load(utils::serial& ar)
std::function<void(void*)> lv2_cond::load(utils::serial& ar)
{
auto cond = std::make_shared<lv2_cond>(ar);
return lv2_obj::load(cond->key, cond);
return load_func(make_shared<lv2_cond>(ar));
}
void lv2_cond::save(utils::serial& ar)
@ -76,7 +86,7 @@ error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id,
sys_cond.trace("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr);
auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id);
auto mutex = idm::get_unlocked<lv2_obj, lv2_mutex>(mutex_id);
if (!mutex)
{
@ -94,7 +104,7 @@ error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id,
if (const auto error = lv2_obj::create<lv2_cond>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_cond>(
return make_single<lv2_cond>(
ipc_key,
_attr.name_u64,
mutex_id,

View file

@ -26,19 +26,14 @@ struct lv2_cond final : lv2_obj
const u64 name;
const u32 mtx_id;
std::shared_ptr<lv2_mutex> mutex; // Associated Mutex
lv2_mutex* mutex; // Associated Mutex
shared_ptr<lv2_obj> _mutex;
ppu_thread* sq{};
lv2_cond(u64 key, u64 name, u32 mtx_id, std::shared_ptr<lv2_mutex> mutex)
: key(key)
, name(name)
, mtx_id(mtx_id)
, mutex(std::move(mutex))
{
}
lv2_cond(u64 key, u64 name, u32 mtx_id, shared_ptr<lv2_obj> mutex0) noexcept;
lv2_cond(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
lv2_cond(utils::serial& ar) noexcept;
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
CellError on_id_create();

View file

@ -101,10 +101,10 @@ void lv2_config::initialize()
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER2, 0, 1, 0, hid_info, 0x1a)->notify();
}
void lv2_config::add_service_event(const std::shared_ptr<lv2_config_service_event>& event)
void lv2_config::add_service_event(shared_ptr<lv2_config_service_event> event)
{
std::lock_guard lock(m_mutex);
events.emplace(event->id, event);
events.emplace(event->id, std::move(event));
}
void lv2_config::remove_service_event(u32 id)
@ -140,13 +140,13 @@ bool lv2_config_service_listener::check_service(const lv2_config_service& servic
return true;
}
bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_service_event>& event)
bool lv2_config_service_listener::notify(const shared_ptr<lv2_config_service_event>& event)
{
service_events.emplace_back(event);
return event->notify();
}
bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_service>& service)
bool lv2_config_service_listener::notify(const shared_ptr<lv2_config_service>& service)
{
if (!check_service(*service))
return false;
@ -158,7 +158,7 @@ bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_servic
void lv2_config_service_listener::notify_all()
{
std::vector<std::shared_ptr<lv2_config_service>> services;
std::vector<shared_ptr<lv2_config_service>> services;
// Grab all events
idm::select<lv2_config_service>([&](u32 /*id*/, lv2_config_service& service)
@ -170,7 +170,7 @@ void lv2_config_service_listener::notify_all()
});
// Sort services by timestamp
sort(services.begin(), services.end(), [](const std::shared_ptr<lv2_config_service>& s1, const std::shared_ptr<lv2_config_service>& s2)
sort(services.begin(), services.end(), [](const shared_ptr<lv2_config_service>& s1, const shared_ptr<lv2_config_service>& s2)
{
return s1->timestamp < s2->timestamp;
});
@ -198,9 +198,9 @@ void lv2_config_service::unregister()
void lv2_config_service::notify() const
{
std::vector<std::shared_ptr<lv2_config_service_listener>> listeners;
std::vector<shared_ptr<lv2_config_service_listener>> listeners;
auto sptr = wkptr.lock();
const shared_ptr<lv2_config_service> sptr = get_shared_ptr();
idm::select<lv2_config_service_listener>([&](u32 /*id*/, lv2_config_service_listener& listener)
{
@ -210,13 +210,14 @@ void lv2_config_service::notify() const
for (auto& listener : listeners)
{
listener->notify(this->get_shared_ptr());
listener->notify(sptr);
}
}
bool lv2_config_service_event::notify() const
{
const auto _handle = handle.lock();
const auto _handle = handle;
if (!_handle)
{
return false;
@ -259,7 +260,7 @@ error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl)
sys_config.trace("sys_config_open(equeue_hdl=0x%x, out_config_hdl=*0x%x)", equeue_hdl, out_config_hdl);
// Find queue with the given ID
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_hdl);
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_hdl);
if (!queue)
{
return CELL_ESRCH;
@ -303,7 +304,7 @@ error_code sys_config_get_service_event(u32 config_hdl, u32 event_id, vm::ptr<sy
sys_config.trace("sys_config_get_service_event(config_hdl=0x%x, event_id=0x%llx, dst=*0x%llx, size=0x%llx)", config_hdl, event_id, dst, size);
// Find sys_config handle object with the given ID
const auto cfg = idm::get<lv2_config_handle>(config_hdl);
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg)
{
return CELL_ESRCH;
@ -335,7 +336,7 @@ error_code sys_config_add_service_listener(u32 config_hdl, sys_config_service_id
sys_config.trace("sys_config_add_service_listener(config_hdl=0x%x, service_id=0x%llx, min_verbosity=0x%llx, in=*0x%x, size=%lld, type=0x%llx, out_listener_hdl=*0x%x)", config_hdl, service_id, min_verbosity, in, size, type, out_listener_hdl);
// Find sys_config handle object with the given ID
auto cfg = idm::get<lv2_config_handle>(config_hdl);
auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg)
{
return CELL_ESRCH;
@ -383,7 +384,7 @@ error_code sys_config_register_service(u32 config_hdl, sys_config_service_id ser
sys_config.trace("sys_config_register_service(config_hdl=0x%x, service_id=0x%llx, user_id=0x%llx, verbosity=0x%llx, data_but=*0x%llx, size=%lld, out_service_hdl=*0x%llx)", config_hdl, service_id, user_id, verbosity, data_buf, size, out_service_hdl);
// Find sys_config handle object with the given ID
const auto cfg = idm::get<lv2_config_handle>(config_hdl);
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
if (!cfg)
{
return CELL_ESRCH;

View file

@ -3,6 +3,9 @@
#include <map>
#include <list>
#include "util/atomic.hpp"
#include "util/shared_ptr.hpp"
/*
* sys_config is a "subscription-based data storage API"
@ -133,30 +136,30 @@ class lv2_config
shared_mutex m_mutex;
// Map of LV2 Service Events
std::unordered_map<u32, std::weak_ptr<lv2_config_service_event>> events;
std::unordered_map<u32, shared_ptr<lv2_config_service_event>> events;
public:
void initialize();
// Service Events
void add_service_event(const std::shared_ptr<lv2_config_service_event>& event);
void add_service_event(shared_ptr<lv2_config_service_event> event);
void remove_service_event(u32 id);
std::shared_ptr<lv2_config_service_event> find_event(u32 id)
shared_ptr<lv2_config_service_event> find_event(u32 id)
{
reader_lock lock(m_mutex);
const auto it = events.find(id);
if (it == events.cend())
return nullptr;
return null_ptr;
if (auto event = it->second.lock())
if (it->second)
{
return event;
return it->second;
}
return nullptr;
return null_ptr;
}
};
@ -175,33 +178,35 @@ private:
u32 idm_id;
// queue for service/io event notifications
const std::weak_ptr<lv2_event_queue> queue;
const shared_ptr<lv2_event_queue> queue;
bool send_queue_event(u64 source, u64 d1, u64 d2, u64 d3) const
{
if (auto sptr = queue.lock())
if (auto sptr = queue)
{
return sptr->send(source, d1, d2, d3) == 0;
}
return false;
}
public:
// Constructors (should not be used directly)
lv2_config_handle(std::weak_ptr<lv2_event_queue>&& _queue)
lv2_config_handle(shared_ptr<lv2_event_queue> _queue) noexcept
: queue(std::move(_queue))
{}
{
}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_handle> create(Args&&... args)
static shared_ptr<lv2_config_handle> create(Args&&... args)
{
if (auto cfg = idm::make_ptr<lv2_config_handle>(std::forward<Args>(args)...))
{
cfg->idm_id = idm::last_id();
return cfg;
}
return nullptr;
return null_ptr;
}
// Notify event queue for this handle
@ -225,7 +230,6 @@ public:
private:
// IDM data
u32 idm_id;
std::weak_ptr<lv2_config_service> wkptr;
// Whether this service is currently registered or not
bool registered = true;
@ -240,27 +244,27 @@ public:
const std::vector<u8> data;
// Constructors (should not be used directly)
lv2_config_service(sys_config_service_id _id, u64 _user_id, u64 _verbosity, u32 _padding, const u8 _data[], usz size)
lv2_config_service(sys_config_service_id _id, u64 _user_id, u64 _verbosity, u32 _padding, const u8* _data, usz size) noexcept
: timestamp(get_system_time())
, id(_id)
, user_id(_user_id)
, verbosity(_verbosity)
, padding(_padding)
, data(&_data[0], &_data[size])
{}
{
}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_service> create(Args&&... args)
static shared_ptr<lv2_config_service> create(Args&&... args)
{
if (auto service = idm::make_ptr<lv2_config_service>(std::forward<Args>(args)...))
{
service->wkptr = service;
service->idm_id = idm::last_id();
return service;
}
return nullptr;
return null_ptr;
}
// Registration
@ -272,7 +276,7 @@ public:
// Utilities
usz get_size() const { return sizeof(sys_config_service_event_t)-1 + data.size(); }
std::shared_ptr<lv2_config_service> get_shared_ptr () const { return wkptr.lock(); }
shared_ptr<lv2_config_service> get_shared_ptr () const { return idm::get_unlocked<lv2_config_service>(idm_id); }
u32 get_id() const { return idm_id; }
};
@ -290,14 +294,13 @@ public:
private:
// IDM data
u32 idm_id;
std::weak_ptr<lv2_config_service_listener> wkptr;
// The service listener owns the service events - service events will not be freed as long as their corresponding listener exists
// This has been confirmed to be the case in realhw
std::vector<std::shared_ptr<lv2_config_service_event>> service_events;
std::weak_ptr<lv2_config_handle> handle;
std::vector<shared_ptr<lv2_config_service_event>> service_events;
shared_ptr<lv2_config_handle> handle;
bool notify(const std::shared_ptr<lv2_config_service_event>& event);
bool notify(const shared_ptr<lv2_config_service_event>& event);
public:
const sys_config_service_id service_id;
@ -307,8 +310,8 @@ public:
const std::vector<u8> data;
// Constructors (should not be used directly)
lv2_config_service_listener(std::shared_ptr<lv2_config_handle>& _handle, sys_config_service_id _service_id, u64 _min_verbosity, sys_config_service_listener_type _type, const u8 _data[], usz size)
: handle(_handle)
lv2_config_service_listener(shared_ptr<lv2_config_handle> _handle, sys_config_service_id _service_id, u64 _min_verbosity, sys_config_service_listener_type _type, const u8* _data, usz size) noexcept
: handle(std::move(_handle))
, service_id(_service_id)
, min_verbosity(_min_verbosity)
, type(_type)
@ -317,30 +320,29 @@ public:
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_service_listener> create(Args&&... args)
static shared_ptr<lv2_config_service_listener> create(Args&&... args)
{
if (auto listener = idm::make_ptr<lv2_config_service_listener>(std::forward<Args>(args)...))
{
listener->wkptr = listener;
listener->idm_id = idm::last_id();
return listener;
}
return nullptr;
return null_ptr;
}
// Check whether service matches
bool check_service(const lv2_config_service& service) const;
// Register new event, and notify queue
bool notify(const std::shared_ptr<lv2_config_service>& service);
bool notify(const shared_ptr<lv2_config_service>& service);
// (Re-)notify about all still-registered past events
void notify_all();
// Utilities
u32 get_id() const { return idm_id; }
std::shared_ptr<lv2_config_service_listener> get_shared_ptr() const { return wkptr.lock(); }
shared_ptr<lv2_config_service_listener> get_shared_ptr() const { return idm::get_unlocked<lv2_config_service_listener>(idm_id); }
};
/*
@ -363,30 +365,24 @@ public:
// Note: Events hold a shared_ptr to their corresponding service - services only get freed once there are no more pending service events
// This has been confirmed to be the case in realhw
const std::weak_ptr<lv2_config_handle> handle;
const std::shared_ptr<lv2_config_service> service;
const shared_ptr<lv2_config_handle> handle;
const shared_ptr<lv2_config_service> service;
const lv2_config_service_listener& listener;
// Constructors (should not be used directly)
lv2_config_service_event(const std::weak_ptr<lv2_config_handle>& _handle, const std::shared_ptr<lv2_config_service>& _service, const lv2_config_service_listener& _listener)
: id(get_next_id())
, handle(_handle)
, service(_service)
, listener(_listener)
{}
lv2_config_service_event(const std::weak_ptr<lv2_config_handle>&& _handle, const std::shared_ptr<lv2_config_service>&& _service, const lv2_config_service_listener& _listener)
lv2_config_service_event(shared_ptr<lv2_config_handle> _handle, shared_ptr<lv2_config_service> _service, const lv2_config_service_listener& _listener) noexcept
: id(get_next_id())
, handle(std::move(_handle))
, service(std::move(_service))
, listener(_listener)
{}
{
}
// Factory
template <typename... Args>
static std::shared_ptr<lv2_config_service_event> create(Args&&... args)
static shared_ptr<lv2_config_service_event> create(Args&&... args)
{
auto ev = std::make_shared<lv2_config_service_event>(std::forward<Args>(args)...);
auto ev = make_shared<lv2_config_service_event>(std::forward<Args>(args)...);
g_fxo->get<lv2_config>().add_service_event(ev);
@ -394,7 +390,7 @@ public:
}
// Destructor
~lv2_config_service_event()
~lv2_config_service_event() noexcept
{
if (auto global = g_fxo->try_get<lv2_config>())
{

View file

@ -35,10 +35,10 @@ lv2_event_queue::lv2_event_queue(utils::serial& ar) noexcept
ar(events);
}
std::shared_ptr<void> lv2_event_queue::load(utils::serial& ar)
std::function<void(void*)> lv2_event_queue::load(utils::serial& ar)
{
auto queue = std::make_shared<lv2_event_queue>(ar);
return lv2_obj::load(queue->key, queue);
auto queue = make_shared<lv2_event_queue>(ar);
return [ptr = lv2_obj::load(queue->key, queue)](void* storage) { *static_cast<shared_ptr<lv2_obj>*>(storage) = ptr; };
}
void lv2_event_queue::save(utils::serial& ar)
@ -57,13 +57,13 @@ void lv2_event_queue::save_ptr(utils::serial& ar, lv2_event_queue* q)
ar(q->id);
}
std::shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue, std::string_view msg)
shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, shared_ptr<lv2_event_queue>& queue, std::string_view msg)
{
const u32 id = ar.pop<u32>();
if (!id)
{
return nullptr;
return {};
}
if (auto q = idm::get_unlocked<lv2_obj, lv2_event_queue>(id))
@ -89,7 +89,7 @@ std::shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, st
});
// Null until resolved
return nullptr;
return {};
}
lv2_event_port::lv2_event_port(utils::serial& ar)
@ -106,7 +106,7 @@ void lv2_event_port::save(utils::serial& ar)
lv2_event_queue::save_ptr(ar, queue.get());
}
std::shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
{
if (ipc_key == SYS_EVENT_QUEUE_LOCAL)
{
@ -238,7 +238,7 @@ error_code sys_event_queue_create(cpu_thread& cpu, vm::ptr<u32> equeue_id, vm::p
if (const auto error = lv2_obj::create<lv2_event_queue>(pshared, ipc_key, flags, [&]()
{
return std::make_shared<lv2_event_queue>(protocol, type, size, name, ipc_key);
return make_shared<lv2_event_queue>(protocol, type, size, name, ipc_key);
}))
{
return error;
@ -394,7 +394,7 @@ error_code sys_event_queue_tryreceive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sy
sys_event.trace("sys_event_queue_tryreceive(equeue_id=0x%x, event_array=*0x%x, size=%d, number=*0x%x)", equeue_id, event_array, size, number);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id);
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
if (!queue)
{

View file

@ -100,10 +100,10 @@ struct lv2_event_queue final : public lv2_obj
lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64 ipc_key) noexcept;
lv2_event_queue(utils::serial& ar) noexcept;
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
static void save_ptr(utils::serial&, lv2_event_queue*);
static std::shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue, std::string_view msg = {});
static shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, shared_ptr<lv2_event_queue>& queue, std::string_view msg = {});
CellError send(lv2_event event, bool* notified_thread = nullptr, lv2_event_port* port = nullptr);
@ -113,7 +113,7 @@ struct lv2_event_queue final : public lv2_obj
}
// Get event queue by its global key
static std::shared_ptr<lv2_event_queue> find(u64 ipc_key);
static shared_ptr<lv2_event_queue> find(u64 ipc_key);
};
struct lv2_event_port final : lv2_obj
@ -124,7 +124,7 @@ struct lv2_event_port final : lv2_obj
const u64 name; // Event source (generated from id and process id if not set)
atomic_t<usz> is_busy = 0; // Counts threads waiting on event sending
std::shared_ptr<lv2_event_queue> queue; // Event queue this port is connected to
shared_ptr<lv2_event_queue> queue; // Event queue this port is connected to
lv2_event_port(s32 type, u64 name)
: type(type)

View file

@ -22,10 +22,9 @@ lv2_event_flag::lv2_event_flag(utils::serial& ar)
ar(pattern);
}
std::shared_ptr<void> lv2_event_flag::load(utils::serial& ar)
std::function<void(void*)> lv2_event_flag::load(utils::serial& ar)
{
auto eflag = std::make_shared<lv2_event_flag>(ar);
return lv2_obj::load(eflag->key, eflag);
return load_func(make_shared<lv2_event_flag>(ar));
}
void lv2_event_flag::save(utils::serial& ar)
@ -66,7 +65,7 @@ error_code sys_event_flag_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<sys_e
if (const auto error = lv2_obj::create<lv2_event_flag>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_event_flag>(
return make_shared<lv2_event_flag>(
_attr.protocol,
ipc_key,
_attr.type,
@ -330,7 +329,7 @@ error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn)
// Warning: may be called from SPU thread.
sys_event_flag.trace("sys_event_flag_set(id=0x%x, bitptn=0x%llx)", id, bitptn);
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
if (!flag)
{
@ -502,7 +501,7 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
if (num) *num = 0;
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
if (!flag)
{

View file

@ -54,7 +54,7 @@ struct lv2_event_flag final : lv2_obj
}
lv2_event_flag(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
// Check mode arg

View file

@ -79,7 +79,7 @@ void fmt_class_string<lv2_file>::format(std::string& out, u64 arg)
const usz pos = file.file ? file.file.pos() : umax;
const usz size = file.file ? file.file.size() : umax;
fmt::append(out, u8"%s, “%s”, Mode: 0x%x, Flags: 0x%x, Pos/Size: %s/%s (0x%x/0x%x)", file.type, file.name.data(), file.mode, file.flags, get_size(pos), get_size(size), pos, size);
fmt::append(out, u8"%s, “%s”, Mode: 0x%x, Flags: 0x%x, Pos/Size: %s/%s (0x%x/0x%x)", file.type, file.name.data(), file.mode, file.flags, get_size(pos), get_size(size), pos, size);
}
template<>
@ -87,7 +87,7 @@ void fmt_class_string<lv2_dir>::format(std::string& out, u64 arg)
{
const auto& dir = get_object(arg);
fmt::append(out, u8"Directory, “%s”, Entries: %u/%u", dir.name.data(), std::min<u64>(dir.pos, dir.entries.size()), dir.entries.size());
fmt::append(out, u8"Directory, “%s”, Entries: %u/%u", dir.name.data(), std::min<u64>(dir.pos, dir.entries.size()), dir.entries.size());
}
bool has_fs_write_rights(std::string_view vpath)
@ -615,11 +615,11 @@ void loaded_npdrm_keys::save(utils::serial& ar)
struct lv2_file::file_view : fs::file_base
{
const std::shared_ptr<lv2_file> m_file;
const shared_ptr<lv2_file> m_file;
const u64 m_off;
u64 m_pos;
explicit file_view(const std::shared_ptr<lv2_file>& _file, u64 offset)
explicit file_view(const shared_ptr<lv2_file>& _file, u64 offset)
: m_file(_file)
, m_off(offset)
, m_pos(0)
@ -699,7 +699,7 @@ struct lv2_file::file_view : fs::file_base
}
};
fs::file lv2_file::make_view(const std::shared_ptr<lv2_file>& _file, u64 offset)
fs::file lv2_file::make_view(const shared_ptr<lv2_file>& _file, u64 offset)
{
fs::file result;
result.reset(std::make_unique<lv2_file::file_view>(_file, offset));
@ -745,7 +745,7 @@ error_code sys_fs_test(ppu_thread&, u32 arg1, u32 arg2, vm::ptr<u32> arg3, u32 a
return CELL_EFAULT;
}
const auto file = idm::get<lv2_fs_object>(*arg3);
const auto file = idm::get_unlocked<lv2_fs_object>(*arg3);
if (!file)
{
@ -1059,16 +1059,16 @@ error_code sys_fs_open(ppu_thread& ppu, vm::cptr<char> path, s32 flags, vm::ptr<
return {g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath) == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, error, path};
}
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&ppath = ppath, &file = file, mode, flags, &real = real, &type = type]() -> std::shared_ptr<lv2_file>
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&ppath = ppath, &file = file, mode, flags, &real = real, &type = type]() -> shared_ptr<lv2_file>
{
std::shared_ptr<lv2_file> result;
shared_ptr<lv2_file> result;
if (type >= lv2_file_type::sdata && !g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
{
return result;
}
result = std::make_shared<lv2_file>(ppath, std::move(file), mode, flags, real, type);
result = stx::make_shared<lv2_file>(ppath, std::move(file), mode, flags, real, type);
sys_fs.warning("sys_fs_open(): fd=%u, %s", idm::last_id(), *result);
return result;
}))
@ -1100,7 +1100,7 @@ error_code sys_fs_read(ppu_thread& ppu, u32 fd, vm::ptr<void> buf, u64 nbytes, v
return CELL_EFAULT;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file || (nbytes && file->flags & CELL_FS_O_WRONLY))
{
@ -1169,7 +1169,7 @@ error_code sys_fs_write(ppu_thread& ppu, u32 fd, vm::cptr<void> buf, u64 nbytes,
return CELL_EFAULT;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file || (nbytes && !(file->flags & CELL_FS_O_ACCMODE)))
{
@ -1239,7 +1239,7 @@ error_code sys_fs_close(ppu_thread& ppu, u32 fd)
ppu.state += cpu_flag::wait;
lv2_obj::sleep(ppu);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -1279,7 +1279,7 @@ error_code sys_fs_close(ppu_thread& ppu, u32 fd)
auto& default_container = g_fxo->get<default_sys_fs_container>();
std::lock_guard lock(default_container.mutex);
if (auto ct = idm::get<lv2_memory_container>(file->ct_id))
if (auto ct = idm::get_unlocked<lv2_memory_container>(file->ct_id))
{
ct->free(file->ct_used);
if (default_container.id == file->ct_id)
@ -1442,7 +1442,7 @@ error_code sys_fs_readdir(ppu_thread& ppu, u32 fd, vm::ptr<CellFsDirent> dir, vm
return CELL_EFAULT;
}
const auto directory = idm::get<lv2_fs_object, lv2_dir>(fd);
const auto directory = idm::get_unlocked<lv2_fs_object, lv2_dir>(fd);
if (!directory)
{
@ -1614,7 +1614,7 @@ error_code sys_fs_fstat(ppu_thread& ppu, u32 fd, vm::ptr<CellFsStat> sb)
sys_fs.warning("sys_fs_fstat(fd=%d, sb=*0x%x)", fd, sb);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -1960,7 +1960,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -2056,7 +2056,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -2081,14 +2081,14 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
fs::file stream;
stream.reset(std::move(sdata_file));
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&file = *file, &stream = stream]() -> std::shared_ptr<lv2_file>
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&file = *file, &stream = stream]() -> shared_ptr<lv2_file>
{
if (!g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
{
return nullptr;
return null_ptr;
}
return std::make_shared<lv2_file>(file, std::move(stream), file.mode, CELL_FS_O_RDONLY, file.real_path, lv2_file_type::sdata);
return stx::make_shared<lv2_file>(file, std::move(stream), file.mode, CELL_FS_O_RDONLY, file.real_path, lv2_file_type::sdata);
}))
{
arg->out_code = CELL_OK;
@ -2198,13 +2198,13 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
return CELL_OK;
}
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
return CELL_EBADF;
}
if (auto ct = idm::get<lv2_memory_container>(file->ct_id))
if (auto ct = idm::get_unlocked<lv2_memory_container>(file->ct_id))
{
ct->free(file->ct_used);
if (default_container.id == file->ct_id)
@ -2427,7 +2427,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
return CELL_EINVAL;
}
const auto directory = idm::get<lv2_fs_object, lv2_dir>(fd);
const auto directory = idm::get_unlocked<lv2_fs_object, lv2_dir>(fd);
if (!directory)
{
@ -2566,14 +2566,14 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
return result.error;
}
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&]() -> std::shared_ptr<lv2_file>
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&]() -> shared_ptr<lv2_file>
{
if (!g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
{
return nullptr;
return null_ptr;
}
return std::make_shared<lv2_file>(result.ppath, std::move(result.file), 0, 0, std::move(result.real_path), lv2_file_type::sdata);
return stx::make_shared<lv2_file>(result.ppath, std::move(result.file), 0, 0, std::move(result.real_path), lv2_file_type::sdata);
}))
{
arg->out_code = CELL_OK;
@ -2597,7 +2597,7 @@ error_code sys_fs_lseek(ppu_thread& ppu, u32 fd, s64 offset, s32 whence, vm::ptr
sys_fs.trace("sys_fs_lseek(fd=%d, offset=0x%llx, whence=0x%x, pos=*0x%x)", fd, offset, whence, pos);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -2643,7 +2643,7 @@ error_code sys_fs_fdatasync(ppu_thread& ppu, u32 fd)
sys_fs.trace("sys_fs_fdadasync(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
{
@ -2668,7 +2668,7 @@ error_code sys_fs_fsync(ppu_thread& ppu, u32 fd)
sys_fs.trace("sys_fs_fsync(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
{
@ -2692,7 +2692,7 @@ error_code sys_fs_fget_block_size(ppu_thread& ppu, u32 fd, vm::ptr<u64> sector_s
sys_fs.warning("sys_fs_fget_block_size(fd=%d, sector_size=*0x%x, block_size=*0x%x, arg4=*0x%x, out_flags=*0x%x)", fd, sector_size, block_size, arg4, out_flags);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -2819,7 +2819,7 @@ error_code sys_fs_ftruncate(ppu_thread& ppu, u32 fd, u64 size)
sys_fs.warning("sys_fs_ftruncate(fd=%d, size=0x%llx)", fd, size);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
{
@ -3089,7 +3089,7 @@ error_code sys_fs_lsn_get_cda_size(ppu_thread&, u32 fd, vm::ptr<u64> ptr)
{
sys_fs.warning("sys_fs_lsn_get_cda_size(fd=%d, ptr=*0x%x)", fd, ptr);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -3112,7 +3112,7 @@ error_code sys_fs_lsn_lock(ppu_thread&, u32 fd)
{
sys_fs.trace("sys_fs_lsn_lock(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -3134,7 +3134,7 @@ error_code sys_fs_lsn_unlock(ppu_thread&, u32 fd)
{
sys_fs.trace("sys_fs_lsn_unlock(fd=%d)", fd);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{

View file

@ -360,7 +360,7 @@ struct lv2_file final : lv2_fs_object
struct file_view;
// Make file view from lv2_file object (for MSELF support)
static fs::file make_view(const std::shared_ptr<lv2_file>& _file, u64 offset);
static fs::file make_view(const shared_ptr<lv2_file>& _file, u64 offset);
};
struct lv2_dir final : lv2_fs_object

View file

@ -12,13 +12,13 @@
LOG_CHANNEL(sys_interrupt);
lv2_int_tag::lv2_int_tag() noexcept
: lv2_obj{1}
: lv2_obj(1)
, id(idm::last_id())
{
}
lv2_int_tag::lv2_int_tag(utils::serial& ar) noexcept
: lv2_obj{1}
: lv2_obj(1)
, id(idm::last_id())
, handler([&]()
{
@ -44,8 +44,8 @@ void lv2_int_tag::save(utils::serial& ar)
ar(lv2_obj::check(handler) ? handler->id : 0);
}
lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept
: lv2_obj{1}
lv2_int_serv::lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread, u64 arg1, u64 arg2) noexcept
: lv2_obj(1)
, id(idm::last_id())
, thread(thread)
, arg1(arg1)
@ -54,7 +54,7 @@ lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thre
}
lv2_int_serv::lv2_int_serv(utils::serial& ar) noexcept
: lv2_obj{1}
: lv2_obj(1)
, id(idm::last_id())
, thread(idm::get_unlocked<named_thread<ppu_thread>>(ar))
, arg1(ar)
@ -96,7 +96,7 @@ void lv2_int_serv::join() const
thread->cmd_notify.notify_one();
(*thread)();
idm::remove_verify<named_thread<ppu_thread>>(thread->id, static_cast<std::weak_ptr<named_thread<ppu_thread>>>(thread));
idm::remove_verify<named_thread<ppu_thread>>(thread->id, thread);
}
error_code sys_interrupt_tag_destroy(ppu_thread& ppu, u32 intrtag)
@ -139,7 +139,7 @@ error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32
const u32 id = idm::import<lv2_obj, lv2_int_serv>([&]()
{
std::shared_ptr<lv2_int_serv> result;
shared_ptr<lv2_int_serv> result;
// Get interrupt tag
const auto tag = idm::check_unlocked<lv2_obj, lv2_int_tag>(intrtag);
@ -173,7 +173,7 @@ error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32
return result;
}
result = std::make_shared<lv2_int_serv>(it, arg1, arg2);
result = make_shared<lv2_int_serv>(it, arg1, arg2);
tag->handler = result;
it->cmd_list
@ -251,7 +251,7 @@ void ppu_interrupt_thread_entry(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struc
{
while (true)
{
std::shared_ptr<lv2_int_serv> serv = nullptr;
shared_ptr<lv2_int_serv> serv = null_ptr;
// Loop endlessly trying to invoke an interrupt if required
idm::select<named_thread<spu_thread>>([&](u32, spu_thread& spu)

View file

@ -11,7 +11,7 @@ struct lv2_int_tag final : public lv2_obj
static const u32 id_base = 0x0a000000;
const u32 id;
std::shared_ptr<struct lv2_int_serv> handler;
shared_ptr<struct lv2_int_serv> handler;
lv2_int_tag() noexcept;
lv2_int_tag(utils::serial& ar) noexcept;
@ -23,11 +23,11 @@ struct lv2_int_serv final : public lv2_obj
static const u32 id_base = 0x0b000000;
const u32 id;
const std::shared_ptr<named_thread<ppu_thread>> thread;
const shared_ptr<named_thread<ppu_thread>> thread;
const u64 arg1;
const u64 arg2;
lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept;
lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread, u64 arg1, u64 arg2) noexcept;
lv2_int_serv(utils::serial& ar) noexcept;
void save(utils::serial& ar);

View file

@ -43,7 +43,7 @@ error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block)
return CELL_EFAULT;
}
if (auto io = idm::get<lv2_io_buf>(handle))
if (auto io = idm::get_unlocked<lv2_io_buf>(handle))
{
// no idea what we actually need to allocate
if (u32 addr = vm::alloc(io->block_count * io->block_size, vm::main))
@ -62,7 +62,7 @@ error_code sys_io_buffer_free(u32 handle, u32 block)
{
sys_io.todo("sys_io_buffer_free(handle=0x%x, block=0x%x)", handle, block);
const auto io = idm::get<lv2_io_buf>(handle);
const auto io = idm::get_unlocked<lv2_io_buf>(handle);
if (!io)
{

View file

@ -64,7 +64,7 @@ error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id)
sys_lwcond.trace("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
std::shared_ptr<lv2_lwcond> _cond;
shared_ptr<lv2_lwcond> _cond;
while (true)
{
@ -440,7 +440,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
ppu.gpr[3] = CELL_OK;
std::shared_ptr<lv2_lwmutex> mutex;
shared_ptr<lv2_lwmutex> mutex;
auto& sstate = *ppu.optional_savestate_state;

View file

@ -56,7 +56,7 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
std::shared_ptr<lv2_lwmutex> _mutex;
shared_ptr<lv2_lwmutex> _mutex;
while (true)
{

View file

@ -28,10 +28,13 @@ lv2_memory_container::lv2_memory_container(utils::serial& ar, bool from_idm) noe
{
}
std::shared_ptr<void> lv2_memory_container::load(utils::serial& ar)
std::function<void(void*)> lv2_memory_container::load(utils::serial& ar)
{
// Use idm::last_id() only for the instances at IDM
return std::make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true);
return [ptr = make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true)](void* storage)
{
*static_cast<shared_ptr<lv2_memory_container>*>(storage) = ptr;
};
}
void lv2_memory_container::save(utils::serial& ar)
@ -43,7 +46,7 @@ lv2_memory_container* lv2_memory_container::search(u32 id)
{
if (id != SYS_MEMORY_CONTAINER_ID_INVALID)
{
return idm::check<lv2_memory_container>(id);
return idm::check_unlocked<lv2_memory_container>(id);
}
return &g_fxo->get<lv2_memory_container>();
@ -397,7 +400,7 @@ error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_inf
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)", mem_info, cid);
const auto ct = idm::get<lv2_memory_container>(cid);
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct)
{

View file

@ -74,7 +74,7 @@ struct lv2_memory_container
lv2_memory_container(u32 size, bool from_idm = false) noexcept;
lv2_memory_container(utils::serial& ar, bool from_idm = false) noexcept;
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
static lv2_memory_container* search(u32 id);

View file

@ -82,13 +82,13 @@ CellError lv2_memory::on_id_create()
return {};
}
std::shared_ptr<void> lv2_memory::load(utils::serial& ar)
std::function<void(void*)> lv2_memory::load(utils::serial& ar)
{
auto mem = std::make_shared<lv2_memory>(ar);
auto mem = make_shared<lv2_memory>(ar);
mem->exists++; // Disable on_id_create()
std::shared_ptr<void> ptr = lv2_obj::load(mem->key, mem, +mem->pshared);
auto func = load_func(mem, +mem->pshared);
mem->exists--;
return ptr;
return func;
}
void lv2_memory::save(utils::serial& ar)
@ -128,7 +128,7 @@ error_code create_lv2_shm(bool pshared, u64 ipc_key, u64 size, u32 align, u64 fl
if (auto error = lv2_obj::create<lv2_memory>(_pshared, ipc_key, exclusive ? SYS_SYNC_NEWLY_CREATED : SYS_SYNC_NOT_CARE, [&]()
{
return std::make_shared<lv2_memory>(
return make_shared<lv2_memory>(
static_cast<u32>(size),
align,
flags,
@ -294,7 +294,7 @@ error_code sys_mmapper_allocate_shared_memory_from_container(ppu_thread& ppu, u6
}
}
const auto ct = idm::get<lv2_memory_container>(cid);
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct)
{
@ -491,7 +491,7 @@ error_code sys_mmapper_allocate_shared_memory_from_container_ext(ppu_thread& ppu
}
}
const auto ct = idm::get<lv2_memory_container>(cid);
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
if (!ct)
{
@ -797,7 +797,7 @@ error_code sys_mmapper_enable_page_fault_notification(ppu_thread& ppu, u32 start
// TODO: Check memory region's flags to make sure the memory can be used for page faults.
auto queue = idm::get<lv2_obj, lv2_event_queue>(event_queue_id);
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(event_queue_id);
if (!queue)
{ // Can't connect the queue if it doesn't exist.

View file

@ -31,7 +31,7 @@ struct lv2_memory : lv2_obj
lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv2_memory_container* ct);
lv2_memory(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
CellError on_id_create();

View file

@ -25,10 +25,9 @@ lv2_mutex::lv2_mutex(utils::serial& ar)
control.raw().owner >>= 1;
}
std::shared_ptr<void> lv2_mutex::load(utils::serial& ar)
std::function<void(void*)> lv2_mutex::load(utils::serial& ar)
{
auto mtx = std::make_shared<lv2_mutex>(ar);
return lv2_obj::load(mtx->key, mtx);
return load_func(make_shared<lv2_mutex>(ar));
}
void lv2_mutex::save(utils::serial& ar)
@ -88,7 +87,7 @@ error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_
if (auto error = lv2_obj::create<lv2_mutex>(_attr.pshared, _attr.ipc_key, _attr.flags, [&]()
{
return std::make_shared<lv2_mutex>(
return make_shared<lv2_mutex>(
_attr.protocol,
_attr.recursive,
_attr.adaptive,

View file

@ -58,7 +58,7 @@ struct lv2_mutex final : lv2_obj
}
lv2_mutex(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
template <typename T>

View file

@ -266,25 +266,25 @@ lv2_socket::lv2_socket(utils::serial& ar, lv2_socket_type _type)
ar(last_bound_addr);
}
std::shared_ptr<void> lv2_socket::load(utils::serial& ar)
std::function<void(void*)> lv2_socket::load(utils::serial& ar)
{
const lv2_socket_type type{ar};
std::shared_ptr<lv2_socket> sock_lv2;
shared_ptr<lv2_socket> sock_lv2;
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
auto lv2_native = std::make_shared<lv2_socket_native>(ar, type);
auto lv2_native = make_shared<lv2_socket_native>(ar, type);
ensure(lv2_native->create_socket() >= 0);
sock_lv2 = std::move(lv2_native);
break;
}
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(ar, type); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(ar, type); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(ar, type); break;
case SYS_NET_SOCK_RAW: sock_lv2 = make_shared<lv2_socket_raw>(ar, type); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = make_shared<lv2_socket_p2p>(ar, type); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = make_shared<lv2_socket_p2ps>(ar, type); break;
}
if (std::memcmp(&sock_lv2->last_bound_addr, std::array<u8, 16>{}.data(), 16))
@ -293,7 +293,7 @@ std::shared_ptr<void> lv2_socket::load(utils::serial& ar)
sock_lv2->bind(sock_lv2->last_bound_addr);
}
return sock_lv2;
return [ptr = sock_lv2](void* storage) { *static_cast<shared_ptr<lv2_socket>*>(storage) = ptr; };;
}
void lv2_socket::save(utils::serial& ar, bool save_only_this_class)
@ -352,7 +352,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
s32 result = 0;
sys_net_sockaddr sn_addr{};
std::shared_ptr<lv2_socket> new_socket{};
shared_ptr<lv2_socket> new_socket{};
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
{
@ -465,7 +465,7 @@ error_code sys_net_bnet_bind(ppu_thread& ppu, s32 s, vm::cptr<sys_net_sockaddr>
return -SYS_NET_EINVAL;
}
if (!idm::check<lv2_socket>(s))
if (!idm::check_unlocked<lv2_socket>(s))
{
return -SYS_NET_EBADF;
}
@ -514,7 +514,7 @@ error_code sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr
return -SYS_NET_EAFNOSUPPORT;
}
if (!idm::check<lv2_socket>(s))
if (!idm::check_unlocked<lv2_socket>(s))
{
return -SYS_NET_EBADF;
}
@ -1194,14 +1194,14 @@ error_code sys_net_bnet_socket(ppu_thread& ppu, lv2_socket_family family, lv2_so
return -SYS_NET_EPROTONOSUPPORT;
}
std::shared_ptr<lv2_socket> sock_lv2;
shared_ptr<lv2_socket> sock_lv2;
switch (type)
{
case SYS_NET_SOCK_STREAM:
case SYS_NET_SOCK_DGRAM:
{
auto lv2_native = std::make_shared<lv2_socket_native>(family, type, protocol);
auto lv2_native = make_shared<lv2_socket_native>(family, type, protocol);
if (s32 result = lv2_native->create_socket(); result < 0)
{
return sys_net_error{result};
@ -1210,9 +1210,9 @@ error_code sys_net_bnet_socket(ppu_thread& ppu, lv2_socket_family family, lv2_so
sock_lv2 = std::move(lv2_native);
break;
}
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(family, type, protocol); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(family, type, protocol); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(family, type, protocol); break;
case SYS_NET_SOCK_RAW: sock_lv2 = make_shared<lv2_socket_raw>(family, type, protocol); break;
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = make_shared<lv2_socket_p2p>(family, type, protocol); break;
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = make_shared<lv2_socket_p2ps>(family, type, protocol); break;
}
const s32 s = idm::import_existing<lv2_socket>(sock_lv2);
@ -1775,7 +1775,7 @@ error_code sys_net_abort(ppu_thread& ppu, s32 type, u64 arg, s32 flags)
{
std::lock_guard nw_lock(g_fxo->get<network_context>().mutex_thread_loop);
const auto sock = idm::get<lv2_socket>(static_cast<u32>(arg));
const auto sock = idm::get_unlocked<lv2_socket>(static_cast<u32>(arg));
if (!sock)
{

View file

@ -64,7 +64,7 @@ void lv2_socket::set_poll_event(bs_t<lv2_socket::poll_t> event)
events += event;
}
void lv2_socket::poll_queue(std::shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event, std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb)
void lv2_socket::poll_queue(shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event, std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb)
{
set_poll_event(event);
queue.emplace_back(std::move(ppu), poll_cb);

View file

@ -60,7 +60,7 @@ public:
lv2_socket(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket(utils::serial&) {}
lv2_socket(utils::serial&, lv2_socket_type type);
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial&, bool save_only_this_class = false);
virtual ~lv2_socket() = default;
@ -69,7 +69,7 @@ public:
void set_lv2_id(u32 id);
bs_t<poll_t> get_events() const;
void set_poll_event(bs_t<poll_t> event);
void poll_queue(std::shared_ptr<ppu_thread> ppu, bs_t<poll_t> event, std::function<bool(bs_t<poll_t>)> poll_cb);
void poll_queue(shared_ptr<ppu_thread> ppu, bs_t<poll_t> event, std::function<bool(bs_t<poll_t>)> poll_cb);
u32 clear_queue(ppu_thread*);
void handle_events(const pollfd& native_fd, bool unset_connecting = false);
void queue_wake(ppu_thread* ppu);
@ -85,7 +85,7 @@ public:
#endif
public:
virtual std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) = 0;
virtual std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) = 0;
virtual s32 bind(const sys_net_sockaddr& addr) = 0;
virtual std::optional<s32> connect(const sys_net_sockaddr& addr) = 0;
@ -133,7 +133,7 @@ protected:
atomic_bs_t<poll_t> events{};
// Event processing workload (pair of thread id and the processing function)
std::vector<std::pair<std::shared_ptr<ppu_thread>, std::function<bool(bs_t<poll_t>)>>> queue;
std::vector<std::pair<shared_ptr<ppu_thread>, std::function<bool(bs_t<poll_t>)>>> queue;
// Socket options value keepers
// Non-blocking IO option

View file

@ -106,7 +106,7 @@ void lv2_socket_native::set_socket(socket_type socket, lv2_socket_family family,
set_non_blocking();
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_native::accept(bool is_lock)
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_native::accept(bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
@ -127,7 +127,7 @@ std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_
if (native_socket != invalid_socket)
{
auto newsock = std::make_shared<lv2_socket_native>(family, type, protocol);
auto newsock = make_single<lv2_socket_native>(family, type, protocol);
newsock->set_socket(native_socket, family, type, protocol);
// Sockets inherit non blocking behaviour from their parent
@ -274,7 +274,7 @@ std::optional<s32> lv2_socket_native::connect(const sys_net_sockaddr& addr)
#ifdef _WIN32
connecting = true;
#endif
this->poll_queue(nullptr, lv2_socket::poll_t::write, [this](bs_t<lv2_socket::poll_t> events) -> bool
this->poll_queue(null_ptr, lv2_socket::poll_t::write, [this](bs_t<lv2_socket::poll_t> events) -> bool
{
if (events & lv2_socket::poll_t::write)
{

View file

@ -30,13 +30,15 @@
class lv2_socket_native final : public lv2_socket
{
public:
static constexpr u32 id_type = 1;
lv2_socket_native(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_native(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
~lv2_socket_native();
s32 create_socket();
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;

View file

@ -72,7 +72,7 @@ void lv2_socket_p2p::handle_new_data(sys_net_sockaddr_in_p2p p2p_addr, std::vect
}
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2p::accept([[maybe_unused]] bool is_lock)
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2p::accept([[maybe_unused]] bool is_lock)
{
sys_net.fatal("[P2P] accept() called on a P2P socket");
return {};

View file

@ -9,7 +9,7 @@ public:
lv2_socket_p2p(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;

View file

@ -467,7 +467,7 @@ bool lv2_socket_p2ps::handle_listening(p2ps_encapsulated_tcp* tcp_header, [[mayb
const u16 new_op_vport = tcp_header->src_port;
const u64 new_cur_seq = send_hdr.seq + 1;
const u64 new_data_beg_seq = send_hdr.ack;
auto sock_lv2 = std::make_shared<lv2_socket_p2ps>(socket, port, vport, new_op_addr, new_op_port, new_op_vport, new_cur_seq, new_data_beg_seq, so_nbio);
auto sock_lv2 = make_shared<lv2_socket_p2ps>(socket, port, vport, new_op_addr, new_op_port, new_op_vport, new_cur_seq, new_data_beg_seq, so_nbio);
const s32 new_sock_id = idm::import_existing<lv2_socket>(sock_lv2);
sock_lv2->set_lv2_id(new_sock_id);
const u64 key_connected = (reinterpret_cast<struct sockaddr_in*>(op_addr)->sin_addr.s_addr) | (static_cast<u64>(tcp_header->src_port) << 48) | (static_cast<u64>(tcp_header->dst_port) << 32);
@ -600,7 +600,7 @@ std::pair<s32, sys_net_sockaddr> lv2_socket_p2ps::getpeername()
return {CELL_OK, res};
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2ps::accept(bool is_lock)
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2ps::accept(bool is_lock)
{
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);

View file

@ -58,6 +58,8 @@ std::vector<u8> generate_u2s_packet(const p2ps_encapsulated_tcp& header, const u
class lv2_socket_p2ps final : public lv2_socket_p2p
{
public:
static constexpr u32 id_type = 2;
lv2_socket_p2ps(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op_addr, u16 op_port, u16 op_vport, u64 cur_seq, u64 data_beg_seq, s32 so_nbio);
lv2_socket_p2ps(utils::serial& ar, lv2_socket_type type);
@ -70,7 +72,7 @@ public:
void send_u2s_packet(std::vector<u8> data, const ::sockaddr_in* dst, u64 seq, bool require_ack);
void close_stream();
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;

View file

@ -36,7 +36,7 @@ void lv2_socket_raw::save(utils::serial& ar)
lv2_socket::save(ar, true);
}
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_raw::accept([[maybe_unused]] bool is_lock)
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_raw::accept([[maybe_unused]] bool is_lock)
{
sys_net.fatal("[RAW] accept() called on a RAW socket");
return {};

View file

@ -5,11 +5,13 @@
class lv2_socket_raw final : public lv2_socket
{
public:
static constexpr u32 id_type = 1;
lv2_socket_raw(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
lv2_socket_raw(utils::serial& ar, lv2_socket_type type);
void save(utils::serial& ar);
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
s32 bind(const sys_net_sockaddr& addr) override;
std::optional<s32> connect(const sys_net_sockaddr& addr) override;

View file

@ -138,7 +138,7 @@ void p2p_thread::bind_sce_np_port()
void network_thread::operator()()
{
std::vector<std::shared_ptr<lv2_socket>> socklist;
std::vector<shared_ptr<lv2_socket>> socklist;
socklist.reserve(lv2_socket::id_count);
{

View file

@ -135,11 +135,11 @@ bool nt_p2p_port::handle_connected(s32 sock_id, p2ps_encapsulated_tcp* tcp_heade
bool nt_p2p_port::handle_listening(s32 sock_id, p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr)
{
auto sock = idm::get<lv2_socket>(sock_id);
auto sock = idm::get_unlocked<lv2_socket>(sock_id);
if (!sock)
return false;
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(*sock.get());
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(*sock);
return sock_p2ps.handle_listening(tcp_header, data, op_addr);
}

View file

@ -13,10 +13,10 @@
#include "sys_overlay.h"
#include "sys_fs.h"
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar = nullptr);
extern std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar = nullptr);
extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0);
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
extern bool ppu_initialize(const ppu_module<lv2_obj>&, bool check_only = false, u64 file_size = 0);
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release = false);
LOG_CHANNEL(sys_overlay);
@ -68,7 +68,7 @@ static error_code overlay_load_module(vm::ptr<u32> ovlmid, const std::string& vp
ppu_initialize(*ovlm);
sys_overlay.success(u8"Loaded overlay: “%s” (id=0x%x)", vpath, idm::last_id());
sys_overlay.success("Loaded overlay: \"%s\" (id=0x%x)", vpath, idm::last_id());
*ovlmid = idm::last_id();
*entry = ovlm->entry;
@ -78,7 +78,7 @@ static error_code overlay_load_module(vm::ptr<u32> ovlmid, const std::string& vp
fs::file make_file_view(fs::file&& file, u64 offset, u64 size);
std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
std::function<void(void*)> lv2_overlay::load(utils::serial& ar)
{
const std::string vpath = ar.pop<std::string>();
const std::string path = vfs::get(vpath);
@ -86,7 +86,7 @@ std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
sys_overlay.success("lv2_overlay::load(): vpath='%s', path='%s', offset=0x%x", vpath, path, offset);
std::shared_ptr<lv2_overlay> ovlm;
shared_ptr<lv2_overlay> ovlm;
fs::file file{path.substr(0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
@ -110,7 +110,10 @@ std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
sys_overlay.error("lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)", vpath, offset);
}
return ovlm;
return [ovlm](void* storage)
{
*static_cast<shared_ptr<lv2_obj>*>(storage) = ovlm;
};
}
void lv2_overlay::save(utils::serial& ar)
@ -156,7 +159,7 @@ error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd, u64 offset
return CELL_EINVAL;
}
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{

View file

@ -5,7 +5,7 @@
#include "sys_sync.h"
#include <vector>
struct lv2_overlay final : lv2_obj, ppu_module
struct lv2_overlay final : ppu_module<lv2_obj>
{
static const u32 id_base = 0x25000000;
@ -15,7 +15,7 @@ struct lv2_overlay final : lv2_obj, ppu_module
lv2_overlay() = default;
lv2_overlay(utils::serial&){}
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
};

View file

@ -22,9 +22,9 @@ LOG_CHANNEL(sys_ppu_thread);
// Simple structure to cleanup previous thread, because can't remove its own thread
struct ppu_thread_cleaner
{
std::shared_ptr<void> old;
shared_ptr<named_thread<ppu_thread>> old;
std::shared_ptr<void> clean(std::shared_ptr<void> ptr)
shared_ptr<named_thread<ppu_thread>> clean(shared_ptr<named_thread<ppu_thread>> ptr)
{
return std::exchange(old, std::move(ptr));
}
@ -86,7 +86,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
ppu_join_status old_status;
// Avoid cases where cleaning causes the destructor to be called inside IDM lock scope (for performance)
std::shared_ptr<void> old_ppu;
shared_ptr<named_thread<ppu_thread>> old_ppu;
{
lv2_obj::notify_all_t notify;
lv2_obj::prepare_for_sleep(ppu);
@ -115,7 +115,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
if (old_status != ppu_join_status::joinable)
{
// Remove self ID from IDM, move owning ptr
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(std::move(idm::find_unlocked<named_thread<ppu_thread>>(ppu.id)->second));
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(idm::withdraw<named_thread<ppu_thread>>(ppu.id, 0, std::false_type{}));
}
// Get writers mask (wait for all current writers to quit)
@ -147,7 +147,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
if (old_ppu)
{
// It is detached from IDM now so join must be done explicitly now
*static_cast<named_thread<ppu_thread>*>(old_ppu.get()) = thread_state::finished;
*old_ppu = thread_state::finished;
}
// Need to wait until the current writers finish
@ -435,7 +435,7 @@ error_code sys_ppu_thread_stop(ppu_thread& ppu, u32 thread_id)
return CELL_ENOSYS;
}
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [](named_thread<ppu_thread>&) {});
if (!thread)
{
@ -529,7 +529,7 @@ error_code _sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, vm::p
p.arg0 = arg;
p.arg1 = unk;
return std::make_shared<named_thread<ppu_thread>>(p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
return stx::make_shared<named_thread<ppu_thread>>(p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
});
if (!tid)
@ -539,7 +539,7 @@ error_code _sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, vm::p
return CELL_EAGAIN;
}
sys_ppu_thread.warning(u8"_sys_ppu_thread_create(): Thread “%s” created (id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)", ppu_name, tid, entry.addr, entry.rtoc, tls);
sys_ppu_thread.warning("_sys_ppu_thread_create(): Thread \"%s\" created (id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)", ppu_name, tid, entry.addr, entry.rtoc, tls);
ppu.check_state();
*thread_id = tid;
@ -594,7 +594,7 @@ error_code sys_ppu_thread_rename(ppu_thread& ppu, u32 thread_id, vm::cptr<char>
sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread)
{
@ -618,7 +618,7 @@ error_code sys_ppu_thread_rename(ppu_thread& ppu, u32 thread_id, vm::cptr<char>
auto _name = make_single<std::string>(std::move(out_str));
// thread_ctrl name is not changed (TODO)
sys_ppu_thread.warning(u8"sys_ppu_thread_rename(): Thread renamed to “%s”", *_name);
sys_ppu_thread.warning("sys_ppu_thread_rename(): Thread renamed to \"%s\"", *_name);
thread->ppu_tname.store(std::move(_name));
thread_ctrl::set_name(*thread, thread->thread_name); // TODO: Currently sets debugger thread name only for local thread
@ -631,7 +631,7 @@ error_code sys_ppu_thread_recover_page_fault(ppu_thread& ppu, u32 thread_id)
sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)", thread_id);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread)
{
@ -647,7 +647,7 @@ error_code sys_ppu_thread_get_page_fault_context(ppu_thread& ppu, u32 thread_id,
sys_ppu_thread.todo("sys_ppu_thread_get_page_fault_context(thread_id=0x%x, ctxt=*0x%x)", thread_id, ctxt);
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
if (!thread)
{

View file

@ -231,7 +231,7 @@ CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags)
return CELL_EPERM;
default:
{
if (auto vm0 = idm::get<sys_vm_t>(sys_vm_t::find_id(addr)))
if (auto vm0 = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr)))
{
// sys_vm area was not covering the address specified but made a reservation on the entire 256mb region
if (vm0->addr + vm0->size - 1 < addr)
@ -433,16 +433,26 @@ void lv2_exitspawn(ppu_thread& ppu, std::vector<std::string>& argv, std::vector<
using namespace id_manager;
auto func = [is_real_reboot, old_size = g_fxo->get<lv2_memory_container>().size, vec = (reader_lock{g_mutex}, g_fxo->get<id_map<lv2_memory_container>>().vec)](u32 sdk_suggested_mem) mutable
shared_ptr<utils::serial> idm_capture = make_shared<utils::serial>();
{
reader_lock rlock{g_mutex};
g_fxo->get<id_map<lv2_memory_container>>().save(*idm_capture);
}
idm_capture->set_reading_state();
auto func = [is_real_reboot, old_size = g_fxo->get<lv2_memory_container>().size, idm_capture](u32 sdk_suggested_mem) mutable
{
if (is_real_reboot)
{
// Do not save containers on actual reboot
vec.clear();
ensure(g_fxo->init<id_map<lv2_memory_container>>());
}
else
{
// Save LV2 memory containers
ensure(g_fxo->init<id_map<lv2_memory_container>>(*idm_capture));
}
// Save LV2 memory containers
ensure(g_fxo->init<id_map<lv2_memory_container>>())->vec = std::move(vec);
// Empty the containers, accumulate their total size
u32 total_size = 0;

View file

@ -17,12 +17,12 @@
#include "sys_memory.h"
#include <span>
extern void dump_executable(std::span<const u8> data, const ppu_module* _module, std::string_view title_id);
extern void dump_executable(std::span<const u8> data, const ppu_module<lv2_obj>* _module, std::string_view title_id);
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64, utils::serial* = nullptr);
extern shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64, utils::serial* = nullptr);
extern void ppu_unload_prx(const lv2_prx& prx);
extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0);
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
extern bool ppu_initialize(const ppu_module<lv2_obj>&, bool check_only = false, u64 file_size = 0);
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release = false);
extern void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<char>& loaded_flags);
LOG_CHANNEL(sys_prx);
@ -235,7 +235,7 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
prx->name = std::move(name);
prx->path = std::move(path);
sys_prx.warning(u8"Ignored module: “%s” (id=0x%x)", vpath, idm::last_id());
sys_prx.warning("Ignored module: \"%s\" (id=0x%x)", vpath, idm::last_id());
return not_an_error(idm::last_id());
};
@ -253,7 +253,7 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
{
if (fs_error + 0u == CELL_ENOENT && is_firmware_sprx)
{
sys_prx.error(u8"firmware SPRX not found: “%s” (forcing HLE implementation)", vpath, idm::last_id());
sys_prx.error("firmware SPRX not found: \"%s\" (forcing HLE implementation)", vpath, idm::last_id());
return hle_load();
}
@ -298,14 +298,14 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
ppu_initialize(*prx);
sys_prx.success(u8"Loaded module: “%s” (id=0x%x)", vpath, idm::last_id());
sys_prx.success("Loaded module: \"%s\" (id=0x%x)", vpath, idm::last_id());
return not_an_error(idm::last_id());
}
fs::file make_file_view(fs::file&& file, u64 offset, u64 size);
std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
std::function<void(void*)> lv2_prx::load(utils::serial& ar)
{
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(lv2_prx_overlay);
@ -316,11 +316,11 @@ std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
usz seg_count = 0;
ar.deserialize_vle(seg_count);
std::shared_ptr<lv2_prx> prx;
shared_ptr<lv2_prx> prx;
auto hle_load = [&]()
{
prx = std::make_shared<lv2_prx>();
prx = make_shared<lv2_prx>();
prx->path = path;
prx->name = path.substr(path.find_last_of(fs::delim) + 1);
};
@ -337,7 +337,7 @@ std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
{
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
file = make_file_view(std::move(file), offset, umax);
prx = ppu_load_prx(ppu_prx_object{ decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic)) }, false, path, 0, &ar);
prx = ppu_load_prx(ppu_prx_object{decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic))}, false, path, 0, &ar);
prx->m_loaded_flags = std::move(loaded_flags);
prx->m_external_loaded_flags = std::move(external_flags);
@ -369,7 +369,11 @@ std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
}
prx->state = state;
return prx;
return [prx](void* storage)
{
*static_cast<shared_ptr<lv2_obj>*>(storage) = prx;
};
}
void lv2_prx::save(utils::serial& ar)
@ -407,7 +411,7 @@ error_code _sys_prx_load_module_by_fd(ppu_thread& ppu, s32 fd, u64 offset, u64 f
sys_prx.warning("_sys_prx_load_module_by_fd(fd=%d, offset=0x%x, flags=0x%x, pOpt=*0x%x)", fd, offset, flags, pOpt);
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
if (!file)
{
@ -519,7 +523,7 @@ error_code _sys_prx_start_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys
return CELL_EINVAL;
}
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
const auto prx = idm::get_unlocked<lv2_obj, lv2_prx>(id);
if (!prx)
{
@ -600,7 +604,7 @@ error_code _sys_prx_stop_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_
sys_prx.warning("_sys_prx_stop_module(id=0x%x, flags=0x%x, pOpt=*0x%x)", id, flags, pOpt);
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
const auto prx = idm::get_unlocked<lv2_obj, lv2_prx>(id);
if (!prx)
{
@ -1013,7 +1017,7 @@ error_code _sys_prx_get_module_info(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<
sys_prx.warning("_sys_prx_get_module_info(id=0x%x, flags=%d, pOpt=*0x%x)", id, flags, pOpt);
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
const auto prx = idm::get_unlocked<lv2_obj, lv2_prx>(id);
if (!pOpt)
{

View file

@ -172,7 +172,7 @@ enum : u32
PRX_STATE_DESTROYED, // Last state, the module cannot be restarted
};
struct lv2_prx final : lv2_obj, ppu_module
struct lv2_prx final : ppu_module<lv2_obj>
{
static const u32 id_base = 0x23000000;
@ -204,7 +204,7 @@ struct lv2_prx final : lv2_obj, ppu_module
lv2_prx() noexcept = default;
lv2_prx(utils::serial&) {}
static std::shared_ptr<void> load(utils::serial&);
static std::function<void(void*)> load(utils::serial&);
void save(utils::serial& ar);
};

View file

@ -425,7 +425,7 @@ error_code sys_rsx_context_iomap(cpu_thread& cpu, u32 context_id, u32 io, u32 ea
return CELL_EINVAL;
}
if ((addr == ea || !(addr % 0x1000'0000)) && idm::check<sys_vm_t>(sys_vm_t::find_id(addr)))
if ((addr == ea || !(addr % 0x1000'0000)) && idm::check_unlocked<sys_vm_t>(sys_vm_t::find_id(addr)))
{
// Virtual memory is disallowed
return CELL_EINVAL;

View file

@ -164,7 +164,7 @@ error_code sys_rsxaudio_initialize(vm::ptr<u32> handle)
return CELL_ENOMEM;
}
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(id);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(id);
std::lock_guard lock(rsxaudio_obj->mutex);
rsxaudio_obj->shmem = vm::addr_t{vm::alloc(sizeof(rsxaudio_shmem), vm::main)};
@ -201,7 +201,7 @@ error_code sys_rsxaudio_finalize(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_finalize(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -219,7 +219,7 @@ error_code sys_rsxaudio_finalize(u32 handle)
{
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
rsxaudio_thread.rsxaudio_obj_ptr = {};
rsxaudio_thread.rsxaudio_obj_ptr = null_ptr;
}
rsxaudio_obj->init = false;
@ -235,7 +235,7 @@ error_code sys_rsxaudio_import_shared_memory(u32 handle, vm::ptr<u64> addr)
{
sys_rsxaudio.trace("sys_rsxaudio_import_shared_memory(handle=0x%x, addr=*0x%x)", handle, addr);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -264,7 +264,7 @@ error_code sys_rsxaudio_unimport_shared_memory(u32 handle, vm::ptr<u64> addr /*
{
sys_rsxaudio.trace("sys_rsxaudio_unimport_shared_memory(handle=0x%x, addr=*0x%x)", handle, addr);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -287,7 +287,7 @@ error_code sys_rsxaudio_create_connection(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_create_connection(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -305,15 +305,15 @@ error_code sys_rsxaudio_create_connection(u32 handle)
const error_code port_create_status = [&]() -> error_code
{
if (auto queue1 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_1_id))
if (auto queue1 = idm::get_unlocked<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_1_id))
{
rsxaudio_obj->event_queue[0] = queue1;
if (auto queue2 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_2_id))
if (auto queue2 = idm::get_unlocked<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_2_id))
{
rsxaudio_obj->event_queue[1] = queue2;
if (auto queue3 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_3_id))
if (auto queue3 = idm::get_unlocked<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_3_id))
{
rsxaudio_obj->event_queue[2] = queue3;
@ -350,7 +350,7 @@ error_code sys_rsxaudio_close_connection(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_close_connection(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -367,7 +367,7 @@ error_code sys_rsxaudio_close_connection(u32 handle)
{
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
rsxaudio_thread.rsxaudio_obj_ptr = {};
rsxaudio_thread.rsxaudio_obj_ptr = null_ptr;
}
for (u32 q_idx = 0; q_idx < SYS_RSXAUDIO_PORT_CNT; q_idx++)
@ -382,7 +382,7 @@ error_code sys_rsxaudio_prepare_process(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_prepare_process(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -413,7 +413,7 @@ error_code sys_rsxaudio_start_process(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_start_process(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -463,7 +463,7 @@ error_code sys_rsxaudio_stop_process(u32 handle)
{
sys_rsxaudio.trace("sys_rsxaudio_stop_process(handle=0x%x)", handle);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{
@ -511,7 +511,7 @@ error_code sys_rsxaudio_get_dma_param(u32 handle, u32 flag, vm::ptr<u64> out)
{
sys_rsxaudio.trace("sys_rsxaudio_get_dma_param(handle=0x%x, flag=0x%x, out=0x%x)", handle, flag, out);
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
if (!rsxaudio_obj)
{

View file

@ -161,7 +161,7 @@ struct lv2_rsxaudio final : lv2_obj
vm::addr_t shmem{};
std::array<std::shared_ptr<lv2_event_queue>, SYS_RSXAUDIO_PORT_CNT> event_queue{};
std::array<shared_ptr<lv2_event_queue>, SYS_RSXAUDIO_PORT_CNT> event_queue{};
// lv2 uses port memory addresses for their names
static constexpr std::array<u64, SYS_RSXAUDIO_PORT_CNT> event_port_name{ 0x8000000000400100, 0x8000000000400200, 0x8000000000400300 };
@ -583,7 +583,7 @@ public:
atomic_t<bool> rsxaudio_ctx_allocated = false;
shared_mutex rsxaudio_obj_upd_m{};
std::shared_ptr<lv2_rsxaudio> rsxaudio_obj_ptr{};
shared_ptr<lv2_rsxaudio> rsxaudio_obj_ptr{};
void operator()();
rsxaudio_data_thread& operator=(thread_state state);

View file

@ -19,10 +19,9 @@ lv2_rwlock::lv2_rwlock(utils::serial& ar)
ar(owner);
}
std::shared_ptr<void> lv2_rwlock::load(utils::serial& ar)
std::function<void(void*)> lv2_rwlock::load(utils::serial& ar)
{
auto rwlock = std::make_shared<lv2_rwlock>(ar);
return lv2_obj::load(rwlock->key, rwlock);
return load_func(make_shared<lv2_rwlock>(stx::exact_t<utils::serial&>(ar)));
}
void lv2_rwlock::save(utils::serial& ar)
@ -56,7 +55,7 @@ error_code sys_rwlock_create(ppu_thread& ppu, vm::ptr<u32> rw_lock_id, vm::ptr<s
if (auto error = lv2_obj::create<lv2_rwlock>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
return make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
}))
{
return error;

View file

@ -40,7 +40,7 @@ struct lv2_rwlock final : lv2_obj
}
lv2_rwlock(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
};

View file

@ -20,10 +20,9 @@ lv2_sema::lv2_sema(utils::serial& ar)
ar(val);
}
std::shared_ptr<void> lv2_sema::load(utils::serial& ar)
std::function<void(void*)> lv2_sema::load(utils::serial& ar)
{
auto sema = std::make_shared<lv2_sema>(ar);
return lv2_obj::load(sema->key, sema);
return load_func(make_shared<lv2_sema>(stx::exact_t<utils::serial&>(ar)));
}
void lv2_sema::save(utils::serial& ar)
@ -68,7 +67,7 @@ error_code sys_semaphore_create(ppu_thread& ppu, vm::ptr<u32> sem_id, vm::ptr<sy
if (auto error = lv2_obj::create<lv2_sema>(_attr.pshared, ipc_key, _attr.flags, [&]
{
return std::make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64, max_val, initial_val);
return make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64, max_val, initial_val);
}))
{
return error;

View file

@ -42,7 +42,7 @@ struct lv2_sema final : lv2_obj
}
lv2_sema(utils::serial& ar);
static std::shared_ptr<void> load(utils::serial& ar);
static std::function<void(void*)> load(utils::serial& ar);
void save(utils::serial& ar);
};

View file

@ -228,7 +228,7 @@ lv2_spu_group::lv2_spu_group(utils::serial& ar) noexcept
if (ar.pop<bool>())
{
ar(id_manager::g_id);
thread = std::make_shared<named_thread<spu_thread>>(stx::launch_retainer{}, ar, this);
thread = stx::make_shared<named_thread<spu_thread>>(stx::launch_retainer{}, ar, this);
ensure(idm::import_existing<named_thread<spu_thread>>(thread, idm::last_id()));
running += !thread->stop_flag_removal_protection;
}
@ -340,7 +340,7 @@ void lv2_spu_image::save(utils::serial& ar)
}
// Get spu thread ptr, returns group ptr as well for refcounting
std::pair<named_thread<spu_thread>*, std::shared_ptr<lv2_spu_group>> lv2_spu_group::get_thread(u32 id)
std::pair<named_thread<spu_thread>*, shared_ptr<lv2_spu_group>> lv2_spu_group::get_thread(u32 id)
{
if (id >= 0x06000000)
{
@ -349,7 +349,7 @@ std::pair<named_thread<spu_thread>*, std::shared_ptr<lv2_spu_group>> lv2_spu_gro
}
// Bits 0-23 contain group id (without id base)
decltype(get_thread(0)) res{nullptr, idm::get<lv2_spu_group>((id & 0xFFFFFF) | (lv2_spu_group::id_base & ~0xFFFFFF))};
decltype(get_thread(0)) res{nullptr, idm::get_unlocked<lv2_spu_group>((id & 0xFFFFFF) | (lv2_spu_group::id_base & ~0xFFFFFF))};
// Bits 24-31 contain thread index within the group
const u32 index = id >> 24;
@ -461,7 +461,7 @@ error_code _sys_spu_image_get_information(ppu_thread& ppu, vm::ptr<sys_spu_image
return CELL_EINVAL;
}
const auto image = idm::get<lv2_obj, lv2_spu_image>(img->entry_point);
const auto image = idm::get_unlocked<lv2_obj, lv2_spu_image>(img->entry_point);
if (!image)
{
@ -544,7 +544,7 @@ error_code _sys_spu_image_get_segments(ppu_thread& ppu, vm::ptr<sys_spu_image> i
return CELL_EINVAL;
}
const auto handle = idm::get<lv2_obj, lv2_spu_image>(img->entry_point);
const auto handle = idm::get_unlocked<lv2_obj, lv2_spu_image>(img->entry_point);
if (!handle)
{
@ -604,7 +604,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
{
case SYS_SPU_IMAGE_TYPE_KERNEL:
{
const auto handle = idm::get<lv2_obj, lv2_spu_image>(image.entry_point);
const auto handle = idm::get_unlocked<lv2_obj, lv2_spu_image>(image.entry_point);
if (!handle)
{
@ -702,7 +702,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
// Read thread name
const std::string thread_name(attr_data.name.get_ptr(), std::max<u32>(attr_data.name_len, 1) - 1);
const auto group = idm::get<lv2_spu_group>(group_id);
const auto group = idm::get_unlocked<lv2_spu_group>(group_id);
if (!group)
{
@ -737,7 +737,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
ensure(idm::import<named_thread<spu_thread>>([&]()
{
const auto spu = std::make_shared<named_thread<spu_thread>>(group.get(), spu_num, thread_name, tid, false, option);
const auto spu = stx::make_shared<named_thread<spu_thread>>(group.get(), spu_num, thread_name, tid, false, option);
group->threads[inited] = spu;
group->threads_map[spu_num] = static_cast<s8>(inited);
return spu;
@ -763,7 +763,7 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr<u32> thread, u32 g
}
lock.unlock();
sys_spu.warning(u8"sys_spu_thread_initialize(): Thread “%s” created (id=0x%x)", thread_name, tid);
sys_spu.warning("sys_spu_thread_initialize(): Thread \"%s\" created (id=0x%x)", thread_name, tid);
ppu.check_state();
*thread = tid;
@ -927,7 +927,7 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
if (use_memct && mem_size)
{
const auto sct = idm::get<lv2_memory_container>(attr_data.ct);
const auto sct = idm::get_unlocked<lv2_memory_container>(attr_data.ct);
if (!sct)
{
@ -970,7 +970,7 @@ error_code sys_spu_thread_group_create(ppu_thread& ppu, vm::ptr<u32> id, u32 num
}
lock.unlock();
sys_spu.warning(u8"sys_spu_thread_group_create(): Thread group “%s” created (id=0x%x)", group->name, idm::last_id());
sys_spu.warning("sys_spu_thread_group_create(): Thread group \"%s\" created (id=0x%x)", group->name, idm::last_id());
ppu.check_state();
*id = idm::last_id();
@ -1040,7 +1040,7 @@ error_code sys_spu_thread_group_start(ppu_thread& ppu, u32 id)
sys_spu.trace("sys_spu_thread_group_start(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1126,7 +1126,7 @@ error_code sys_spu_thread_group_suspend(ppu_thread& ppu, u32 id)
sys_spu.trace("sys_spu_thread_group_suspend(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1209,7 +1209,7 @@ error_code sys_spu_thread_group_resume(ppu_thread& ppu, u32 id)
sys_spu.trace("sys_spu_thread_group_resume(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1297,7 +1297,7 @@ error_code sys_spu_thread_group_yield(ppu_thread& ppu, u32 id)
sys_spu.trace("sys_spu_thread_group_yield(id=0x%x)", id);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1331,7 +1331,7 @@ error_code sys_spu_thread_group_terminate(ppu_thread& ppu, u32 id, s32 value)
sys_spu.trace("sys_spu_thread_group_terminate(id=0x%x, value=0x%x)", id, value);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1450,7 +1450,7 @@ error_code sys_spu_thread_group_join(ppu_thread& ppu, u32 id, vm::ptr<u32> cause
sys_spu.trace("sys_spu_thread_group_join(id=0x%x, cause=*0x%x, status=*0x%x)", id, cause, status);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1556,7 +1556,7 @@ error_code sys_spu_thread_group_set_priority(ppu_thread& ppu, u32 id, s32 priori
sys_spu.trace("sys_spu_thread_group_set_priority(id=0x%x, priority=%d)", id, priority);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1582,7 +1582,7 @@ error_code sys_spu_thread_group_get_priority(ppu_thread& ppu, u32 id, vm::ptr<s3
sys_spu.trace("sys_spu_thread_group_get_priority(id=0x%x, priority=*0x%x)", id, priority);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1609,7 +1609,7 @@ error_code sys_spu_thread_group_set_cooperative_victims(ppu_thread& ppu, u32 id,
sys_spu.warning("sys_spu_thread_group_set_cooperative_victims(id=0x%x, threads_mask=0x%x)", id, threads_mask);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1637,7 +1637,7 @@ error_code sys_spu_thread_group_syscall_253(ppu_thread& ppu, u32 id, vm::ptr<sys
sys_spu.warning("sys_spu_thread_group_syscall_253(id=0x%x, info=*0x%x)", id, info);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1855,7 +1855,7 @@ error_code sys_spu_thread_group_connect_event(ppu_thread& ppu, u32 id, u32 eq, u
sys_spu.warning("sys_spu_thread_group_connect_event(id=0x%x, eq=0x%x, et=%d)", id, eq, et);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1879,7 +1879,7 @@ error_code sys_spu_thread_group_connect_event(ppu_thread& ppu, u32 id, u32 eq, u
return CELL_EINVAL;
}
auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(eq);
std::lock_guard lock(group->mutex);
@ -1904,7 +1904,7 @@ error_code sys_spu_thread_group_disconnect_event(ppu_thread& ppu, u32 id, u32 et
sys_spu.warning("sys_spu_thread_group_disconnect_event(id=0x%x, et=%d)", id, et);
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -1939,7 +1939,7 @@ error_code sys_spu_thread_connect_event(ppu_thread& ppu, u32 id, u32 eq, u32 et,
sys_spu.warning("sys_spu_thread_connect_event(id=0x%x, eq=0x%x, et=%d, spup=%d)", id, eq, et, spup);
const auto [thread, group] = lv2_spu_group::get_thread(id);
auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(eq);
if (!queue || !thread) [[unlikely]]
{
@ -2006,7 +2006,7 @@ error_code sys_spu_thread_bind_queue(ppu_thread& ppu, u32 id, u32 spuq, u32 spuq
sys_spu.warning("sys_spu_thread_bind_queue(id=0x%x, spuq=0x%x, spuq_num=0x%x)", id, spuq, spuq_num);
const auto [thread, group] = lv2_spu_group::get_thread(id);
auto queue = idm::get<lv2_obj, lv2_event_queue>(spuq);
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(spuq);;
if (!queue || !thread) [[unlikely]]
{
@ -2096,8 +2096,8 @@ error_code sys_spu_thread_group_connect_event_all_threads(ppu_thread& ppu, u32 i
return CELL_EINVAL;
}
const auto group = idm::get<lv2_spu_group>(id);
const auto queue = idm::get<lv2_obj, lv2_event_queue>(eq);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(eq);
if (!group || !queue)
{
@ -2178,7 +2178,7 @@ error_code sys_spu_thread_group_disconnect_event_all_threads(ppu_thread& ppu, u3
return CELL_EINVAL;
}
const auto group = idm::get<lv2_spu_group>(id);
const auto group = idm::get_unlocked<lv2_spu_group>(id);
if (!group)
{
@ -2258,7 +2258,7 @@ error_code sys_raw_spu_recover_page_fault(ppu_thread& ppu, u32 id)
sys_spu.warning("sys_raw_spu_recover_page_fault(id=0x%x)", id);
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread) [[unlikely]]
{
@ -2367,7 +2367,7 @@ error_code sys_isolated_spu_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<voi
sys_spu_image img;
img.load(obj);
auto image_info = idm::get<lv2_obj, lv2_spu_image>(img.entry_point);
auto image_info = idm::get_unlocked<lv2_obj, lv2_spu_image>(img.entry_point);
img.deploy(thread->ls, std::span(image_info->segs.get_ptr(), image_info->nsegs));
thread->write_reg(ls_addr + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, image_info->e_entry);
@ -2402,7 +2402,7 @@ error_code raw_spu_destroy(ppu_thread& ppu, u32 id)
// TODO: CELL_EBUSY is not returned
// Kernel objects which must be removed
std::vector<std::pair<std::shared_ptr<lv2_obj>, u32>> to_remove;
std::vector<std::pair<shared_ptr<lv2_obj>, u32>> to_remove;
// Clear interrupt handlers
for (auto& intr : thread->int_ctrl)
@ -2484,7 +2484,7 @@ error_code raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 /*hwthread*/,
const auto tag = idm::import<lv2_obj, lv2_int_tag>([&]()
{
std::shared_ptr<lv2_int_tag> result;
shared_ptr<lv2_int_tag> result;
auto thread = idm::check_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
@ -2502,7 +2502,7 @@ error_code raw_spu_create_interrupt_tag(u32 id, u32 class_id, u32 /*hwthread*/,
return result;
}
result = std::make_shared<lv2_int_tag>();
result = make_single<lv2_int_tag>();
int_ctrl.tag = result;
return result;
});
@ -2543,7 +2543,7 @@ error_code raw_spu_set_int_mask(u32 id, u32 class_id, u64 mask)
return CELL_EINVAL;
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
@ -2582,7 +2582,7 @@ error_code raw_spu_set_int_stat(u32 id, u32 class_id, u64 stat)
return CELL_EINVAL;
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
@ -2620,7 +2620,7 @@ error_code raw_spu_get_int_control(u32 id, u32 class_id, vm::ptr<u64> value, ato
return CELL_EINVAL;
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
@ -2672,7 +2672,7 @@ error_code sys_isolated_spu_get_int_stat(ppu_thread& ppu, u32 id, u32 class_id,
template <bool isolated = false>
error_code raw_spu_read_puint_mb(u32 id, vm::ptr<u32> value)
{
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
@ -2711,7 +2711,7 @@ error_code raw_spu_set_spu_cfg(u32 id, u32 value)
fmt::throw_exception("Unexpected value (0x%x)", value);
}
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
@ -2744,7 +2744,7 @@ error_code sys_isolated_spu_set_spu_cfg(ppu_thread& ppu, u32 id, u32 value)
template <bool isolated = false>
error_code raw_spu_get_spu_cfg(u32 id, vm::ptr<u32> value)
{
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread || thread->get_type() != (isolated ? spu_type::isolated : spu_type::raw)) [[unlikely]]
{
@ -2781,7 +2781,7 @@ error_code sys_isolated_spu_start(ppu_thread& ppu, u32 id)
sys_spu.todo("sys_isolated_spu_start(id=%d)", id);
const auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
const auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu(id));
if (!thread) [[unlikely]]
{

View file

@ -296,14 +296,14 @@ struct lv2_spu_group
class ppu_thread* waiter = nullptr;
bool set_terminate = false;
std::array<std::shared_ptr<named_thread<spu_thread>>, 8> threads; // SPU Threads
std::array<shared_ptr<named_thread<spu_thread>>, 8> threads; // SPU Threads
std::array<s8, 256> threads_map; // SPU Threads map based number
std::array<std::pair<u32, std::vector<sys_spu_segment>>, 8> imgs; // Entry points, SPU image segments
std::array<std::array<u64, 4>, 8> args; // SPU Thread Arguments
std::shared_ptr<lv2_event_queue> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
std::shared_ptr<lv2_event_queue> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
std::shared_ptr<lv2_event_queue> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
shared_ptr<lv2_event_queue> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
shared_ptr<lv2_event_queue> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
shared_ptr<lv2_event_queue> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
lv2_spu_group(std::string name, u32 num, s32 _prio, s32 type, lv2_memory_container* ct, bool uses_scheduler, u32 mem_size) noexcept
: name(std::move(name))
@ -344,7 +344,7 @@ struct lv2_spu_group
return ep_sysmodule ? ep_sysmodule->send(SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY, data1, data2, data3) : CELL_ENOTCONN;
}
static std::pair<named_thread<spu_thread>*, std::shared_ptr<lv2_spu_group>> get_thread(u32 id);
static std::pair<named_thread<spu_thread>*, shared_ptr<lv2_spu_group>> get_thread(u32 id);
};
class ppu_thread;

View file

@ -16,7 +16,7 @@ namespace
struct storage_manager
{
// This is probably wrong and should be assigned per fd or something
atomic_ptr<std::shared_ptr<lv2_event_queue>> asyncequeue;
atomic_ptr<shared_ptr<lv2_event_queue>> asyncequeue;
};
}
@ -65,7 +65,7 @@ error_code sys_storage_read(u32 fd, u32 mode, u32 start_sector, u32 num_sectors,
}
std::memset(bounce_buf.get_ptr(), 0, num_sectors * 0x200ull);
const auto handle = idm::get<lv2_storage>(fd);
const auto handle = idm::get_unlocked<lv2_storage>(fd);
if (!handle)
{
@ -94,7 +94,7 @@ error_code sys_storage_write(u32 fd, u32 mode, u32 start_sector, u32 num_sectors
return CELL_EFAULT;
}
const auto handle = idm::get<lv2_storage>(fd);
const auto handle = idm::get_unlocked<lv2_storage>(fd);
if (!handle)
{
@ -119,7 +119,7 @@ error_code sys_storage_async_configure(u32 fd, u32 io_buf, u32 equeue_id, u32 un
auto& manager = g_fxo->get<storage_manager>();
if (auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id))
if (auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id))
{
manager.asyncequeue.store(queue);
}

View file

@ -9,6 +9,8 @@
#include "Emu/IdManager.h"
#include "Emu/IPC.h"
#include "util/shared_ptr.hpp"
#include <thread>
// attr_protocol (waiting scheduling policy)
@ -97,7 +99,9 @@ public:
lv2_obj() noexcept = default;
lv2_obj(u32 i) noexcept : exists{ i } {}
lv2_obj(lv2_obj&& rhs) noexcept : exists{ +rhs.exists } {}
lv2_obj(utils::serial&) noexcept {}
lv2_obj& operator=(lv2_obj&& rhs) noexcept { exists = +rhs.exists; return *this; }
void save(utils::serial&) {}
// Existence validation (workaround for shared-ptr ref-counting)
@ -348,11 +352,11 @@ public:
// EAGAIN for IDM IDs shortage
CellError error = CELL_EAGAIN;
if (!idm::import<lv2_obj, T>([&]() -> std::shared_ptr<T>
if (!idm::import<lv2_obj, T>([&]() -> shared_ptr<T>
{
std::shared_ptr<T> result = make();
shared_ptr<T> result = make();
auto finalize_construct = [&]() -> std::shared_ptr<T>
auto finalize_construct = [&]() -> shared_ptr<T>
{
if ((error = result->on_id_create()))
{
@ -413,7 +417,7 @@ public:
}
template <typename T>
static void on_id_destroy(T& obj, u64 ipc_key, u64 pshared = -1)
static void on_id_destroy(T& obj, u64 ipc_key, u64 pshared = umax)
{
if (pshared == umax)
{
@ -428,16 +432,16 @@ public:
}
template <typename T>
static std::shared_ptr<T> load(u64 ipc_key, std::shared_ptr<T> make, u64 pshared = -1)
static shared_ptr<T> load(u64 ipc_key, shared_ptr<T> make, u64 pshared = umax)
{
if (pshared == umax ? ipc_key != 0 : pshared != 0)
{
g_fxo->need<ipc_manager<T, u64>>();
make = g_fxo->get<ipc_manager<T, u64>>().add(ipc_key, [&]()
g_fxo->get<ipc_manager<T, u64>>().add(ipc_key, [&]()
{
return make;
}, true).second;
});
}
// Ensure no error
@ -445,6 +449,13 @@ public:
return make;
}
template <typename T, typename Storage = lv2_obj>
static std::function<void(void*)> load_func(shared_ptr<T> make, u64 pshared = umax)
{
const u64 key = make->key;
return [ptr = load<T>(key, make, pshared)](void* storage) { *static_cast<shared_ptr<Storage>*>(storage) = ptr; };
}
static bool wait_timeout(u64 usec, ppu_thread* cpu = {}, bool scale = true, bool is_usleep = false);
static inline void notify_all()

View file

@ -20,7 +20,7 @@ LOG_CHANNEL(sys_timer);
struct lv2_timer_thread
{
shared_mutex mutex;
std::deque<std::shared_ptr<lv2_timer>> timers;
std::deque<shared_ptr<lv2_timer>> timers;
lv2_timer_thread();
void operator()();
@ -31,7 +31,7 @@ struct lv2_timer_thread
};
lv2_timer::lv2_timer(utils::serial& ar)
: lv2_obj{1}
: lv2_obj(1)
, state(ar)
, port(lv2_event_queue::load_ptr(ar, port, "timer"))
, source(ar)
@ -368,7 +368,7 @@ error_code sys_timer_connect_event_queue(ppu_thread& ppu, u32 timer_id, u32 queu
const auto timer = idm::check<lv2_obj, lv2_timer>(timer_id, [&](lv2_timer& timer) -> CellError
{
const auto found = idm::find_unlocked<lv2_obj, lv2_event_queue>(queue_id);
auto found = idm::get_unlocked<lv2_obj, lv2_event_queue>(queue_id);
if (!found)
{
@ -383,7 +383,7 @@ error_code sys_timer_connect_event_queue(ppu_thread& ppu, u32 timer_id, u32 queu
}
// Connect event queue
timer.port = std::static_pointer_cast<lv2_event_queue>(found->second);
timer.port = found;
timer.source = name ? name : (u64{process_getpid() + 0u} << 32) | u64{timer_id};
timer.data1 = data1;
timer.data2 = data2;

View file

@ -28,7 +28,7 @@ struct lv2_timer : lv2_obj
shared_mutex mutex;
atomic_t<u32> state{SYS_TIMER_STATE_STOP};
std::shared_ptr<lv2_event_queue> port;
shared_ptr<lv2_event_queue> port;
u64 source;
u64 data1;
u64 data2;
@ -40,7 +40,7 @@ struct lv2_timer : lv2_obj
u64 check_unlocked(u64 _now) noexcept;
lv2_timer() noexcept
: lv2_obj{1}
: lv2_obj(1)
{
}

View file

@ -64,7 +64,7 @@ error_code sys_vm_memory_map(ppu_thread& ppu, u64 vsize, u64 psize, u32 cid, u64
return CELL_EINVAL;
}
const auto idm_ct = idm::get<lv2_memory_container>(cid);
const auto idm_ct = idm::get_unlocked<lv2_memory_container>(cid);
const auto ct = cid == SYS_MEMORY_CONTAINER_ID_INVALID ? &g_fxo->get<lv2_memory_container>() : idm_ct.get();
@ -260,7 +260,7 @@ error_code sys_vm_lock(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -281,7 +281,7 @@ error_code sys_vm_unlock(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -302,7 +302,7 @@ error_code sys_vm_touch(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -323,7 +323,7 @@ error_code sys_vm_flush(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -344,7 +344,7 @@ error_code sys_vm_invalidate(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -365,7 +365,7 @@ error_code sys_vm_store(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -386,7 +386,7 @@ error_code sys_vm_sync(ppu_thread& ppu, u32 addr, u32 size)
return CELL_EINVAL;
}
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -402,7 +402,7 @@ error_code sys_vm_test(ppu_thread& ppu, u32 addr, u32 size, vm::ptr<u64> result)
sys_vm.warning("sys_vm_test(addr=0x%x, size=0x%x, result=*0x%x)", addr, size, result);
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || u64{addr} + size > u64{block->addr} + block->size)
{
@ -421,7 +421,7 @@ error_code sys_vm_get_statistics(ppu_thread& ppu, u32 addr, vm::ptr<sys_vm_stati
sys_vm.warning("sys_vm_get_statistics(addr=0x%x, stat=*0x%x)", addr, stat);
const auto block = idm::get<sys_vm_t>(sys_vm_t::find_id(addr));
const auto block = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr));
if (!block || block->addr != addr)
{

View file

@ -587,7 +587,7 @@ bool gdb_thread::cmd_thread_info(gdb_cmd&)
bool gdb_thread::cmd_current_thread(gdb_cmd&)
{
return send_cmd_ack(selected_thread.expired() ? "" : ("QC" + u64_to_padded_hex(selected_thread.lock()->id)));
return send_cmd_ack(selected_thread && selected_thread->state.none_of(cpu_flag::exit) ? "" : ("QC" + u64_to_padded_hex(selected_thread->id)));
}
bool gdb_thread::cmd_read_register(gdb_cmd& cmd)
@ -596,8 +596,13 @@ bool gdb_thread::cmd_read_register(gdb_cmd& cmd)
{
return send_cmd_ack("E02");
}
auto th = selected_thread.lock();
if (auto ppu = th->try_get<named_thread<ppu_thread>>())
if (!selected_thread || selected_thread->state & cpu_flag::exit)
{
return send_cmd_ack("");
}
if (auto ppu = selected_thread->try_get<named_thread<ppu_thread>>())
{
u32 rid = hex_to_u32(cmd.data);
std::string result = get_reg(ppu, rid);
@ -608,7 +613,8 @@ bool gdb_thread::cmd_read_register(gdb_cmd& cmd)
}
return send_cmd_ack(result);
}
GDB.warning("Unimplemented thread type %d.", th->id_type());
GDB.warning("Unimplemented thread type %d.", selected_thread->id_type());
return send_cmd_ack("");
}
@ -618,10 +624,14 @@ bool gdb_thread::cmd_write_register(gdb_cmd& cmd)
{
return send_cmd_ack("E02");
}
auto th = selected_thread.lock();
if (th->get_class() == thread_class::ppu)
if (!selected_thread || selected_thread->state & cpu_flag::exit)
{
return send_cmd_ack("");
}
if (auto ppu = selected_thread->try_get<named_thread<ppu_thread>>())
{
auto ppu = static_cast<named_thread<ppu_thread>*>(th.get());
usz eq_pos = cmd.data.find('=');
if (eq_pos == umax)
{
@ -637,7 +647,7 @@ bool gdb_thread::cmd_write_register(gdb_cmd& cmd)
}
return send_cmd_ack("OK");
}
GDB.warning("Unimplemented thread type %d.", th->id_type());
GDB.warning("Unimplemented thread type %d.", selected_thread->id_type());
return send_cmd_ack("");
}
@ -707,10 +717,13 @@ bool gdb_thread::cmd_read_all_registers(gdb_cmd&)
std::string result;
select_thread(general_ops_thread_id);
auto th = selected_thread.lock();
if (th->get_class() == thread_class::ppu)
if (!selected_thread || selected_thread->state & cpu_flag::exit)
{
return send_cmd_ack("");
}
if (auto ppu = selected_thread->try_get<named_thread<ppu_thread>>())
{
auto ppu = static_cast<named_thread<ppu_thread>*>(th.get());
//68 64-bit registers, and 3 32-bit
result.reserve(68*16 + 3*8);
for (int i = 0; i < 71; ++i)
@ -719,17 +732,22 @@ bool gdb_thread::cmd_read_all_registers(gdb_cmd&)
}
return send_cmd_ack(result);
}
GDB.warning("Unimplemented thread type %d.", th->id_type());
GDB.warning("Unimplemented thread type %d.", selected_thread ->id_type());
return send_cmd_ack("");
}
bool gdb_thread::cmd_write_all_registers(gdb_cmd& cmd)
{
select_thread(general_ops_thread_id);
auto th = selected_thread.lock();
if (th->get_class() == thread_class::ppu)
if (!selected_thread || selected_thread->state & cpu_flag::exit)
{
return send_cmd_ack("");
}
if (auto ppu = selected_thread->try_get<named_thread<ppu_thread>>())
{
auto ppu = static_cast<named_thread<ppu_thread>*>(th.get());
int ptr = 0;
for (int i = 0; i < 71; ++i)
{
@ -739,7 +757,8 @@ bool gdb_thread::cmd_write_all_registers(gdb_cmd& cmd)
}
return send_cmd_ack("OK");
}
GDB.warning("Unimplemented thread type %d.", th->id_type());
GDB.warning("Unimplemented thread type %d.", selected_thread->id_type());
return send_cmd_ack("E01");
}
@ -789,13 +808,23 @@ bool gdb_thread::cmd_vcont(gdb_cmd& cmd)
if (cmd.data[1] == 'c' || cmd.data[1] == 's')
{
select_thread(continue_ops_thread_id);
auto ppu = std::static_pointer_cast<named_thread<ppu_thread>>(selected_thread.lock());
auto ppu = !selected_thread || selected_thread->state & cpu_flag::exit ? nullptr : selected_thread->try_get<named_thread<ppu_thread>>();
paused = false;
if (cmd.data[1] == 's')
if (ppu)
{
ppu->state += cpu_flag::dbg_step;
bs_t<cpu_flag> add_flags{};
if (cmd.data[1] == 's')
{
add_flags += cpu_flag::dbg_step;
}
ppu->add_remove_flags(add_flags, cpu_flag::dbg_pause);
}
ppu->state -= cpu_flag::dbg_pause;
//special case if app didn't start yet (only loaded)
if (Emu.IsReady())
{
@ -805,19 +834,21 @@ bool gdb_thread::cmd_vcont(gdb_cmd& cmd)
{
Emu.Resume();
}
else
{
ppu->state.notify_one();
}
wait_with_interrupts();
//we are in all-stop mode
Emu.Pause();
select_thread(pausedBy);
// we have to remove dbg_pause from thread that paused execution, otherwise
// it will be paused forever (Emu.Resume only removes dbg_global_pause)
ppu = std::static_pointer_cast<named_thread<ppu_thread>>(selected_thread.lock());
ppu = !selected_thread || selected_thread->state & cpu_flag::exit ? nullptr : selected_thread->try_get<named_thread<ppu_thread>>();
if (ppu)
ppu->state -= cpu_flag::dbg_pause;
{
ppu->add_remove_flags({}, cpu_flag::dbg_pause);
}
return send_reason();
}
return send_cmd_ack("");

View file

@ -16,7 +16,7 @@ class gdb_thread
int server_socket = -1;
int client_socket = -1;
std::weak_ptr<cpu_thread> selected_thread{};
shared_ptr<cpu_thread> selected_thread{};
u64 continue_ops_thread_id = ANY_THREAD;
u64 general_ops_thread_id = ANY_THREAD;

View file

@ -5,11 +5,13 @@
#include "Utilities/mutex.h"
#include "util/shared_ptr.hpp"
// IPC manager for objects of type T and IPC keys of type K.
template <typename T, typename K>
class ipc_manager final
{
std::unordered_map<K, std::shared_ptr<T>> m_map;
std::unordered_map<K, shared_ptr<T>> m_map;
mutable shared_mutex m_mutex;
@ -17,12 +19,12 @@ public:
// Add new object if specified ipc_key is not used
// .first: added new object?, .second: what's at m_map[key] after this function if (peek_ptr || added new object) is true
template <typename F>
std::pair<bool, std::shared_ptr<T>> add(const K& ipc_key, F&& provider, bool peek_ptr = true)
std::pair<bool, shared_ptr<T>> add(const K& ipc_key, F&& provider, bool peek_ptr = true)
{
std::lock_guard lock(m_mutex);
// Get object location
std::shared_ptr<T>& ptr = m_map[ipc_key];
shared_ptr<T>& ptr = m_map[ipc_key];
const bool existed = ptr.operator bool();
if (!existed)
@ -32,7 +34,7 @@ public:
}
const bool added = !existed && ptr;
return {added, (peek_ptr || added) ? ptr : nullptr};
return {added, (peek_ptr || added) ? ptr : null_ptr};
}
// Unregister specified ipc_key, may return true even if the object doesn't exist anymore
@ -44,7 +46,7 @@ public:
}
// Get object with specified ipc_key
std::shared_ptr<T> get(const K& ipc_key) const
shared_ptr<T> get(const K& ipc_key) const
{
reader_lock lock(m_mutex);
@ -55,7 +57,7 @@ public:
return found->second;
}
return nullptr;
return {};
}
// Check whether the object actually exists

View file

@ -33,7 +33,7 @@ std::vector<std::pair<u128, id_manager::typeinfo>>& id_manager::get_typeinfo_map
return s_map;
}
idm::map_data* idm::allocate_id(std::vector<map_data>& vec, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, bool uses_lowest_id, std::pair<u32, u32> invl_range)
id_manager::id_key* idm::allocate_id(std::span<id_manager::id_key> keys, usz& highest_index, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, bool uses_lowest_id, std::pair<u32, u32> invl_range)
{
if (dst_id != (base ? 0 : u32{umax}))
{
@ -41,44 +41,43 @@ idm::map_data* idm::allocate_id(std::vector<map_data>& vec, u32 type_id, u32 dst
const u32 index = id_manager::get_index(dst_id, base, step, count, invl_range);
ensure(index < count);
vec.resize(std::max<usz>(vec.size(), index + 1));
highest_index = std::max<usz>(highest_index, index + 1);
if (vec[index].second)
if (keys[index].type() != umax)
{
return nullptr;
}
id_manager::g_id = dst_id;
vec[index] = {id_manager::id_key(dst_id, type_id), nullptr};
return &vec[index];
keys[index] = id_manager::id_key(dst_id, type_id);
return &keys[index];
}
if (uses_lowest_id)
{
// Disable the optimization below (hurts accuracy for known cases)
vec.resize(count);
highest_index = count;
}
else if (vec.size() < count)
else if (highest_index < count)
{
// Try to emplace back
const u32 _next = base + step * ::size32(vec);
const u32 _next = base + step * highest_index;
id_manager::g_id = _next;
vec.emplace_back(id_manager::id_key(_next, type_id), nullptr);
return &vec.back();
return &(keys[highest_index++] = (id_manager::id_key(_next, type_id)));
}
// Check all IDs starting from "next id" (TODO)
for (u32 i = 0, next = base; i < count; i++, next += step)
{
const auto ptr = &vec[i];
const auto ptr = &keys[i];
// Look for free ID
if (!ptr->second)
if (ptr->type() == umax)
{
// Incremenet ID invalidation counter
const u32 id = next | ((ptr->first + (1u << invl_range.first)) & (invl_range.second ? (((1u << invl_range.second) - 1) << invl_range.first) : 0));
const u32 id = next | ((ptr->value() + (1u << invl_range.first)) & (invl_range.second ? (((1u << invl_range.second) - 1) << invl_range.first) : 0));
id_manager::g_id = id;
ptr->first = id_manager::id_key(id, type_id);
*ptr = id_manager::id_key(id, type_id);
return ptr;
}
}

View file

@ -7,8 +7,10 @@
#include <vector>
#include <map>
#include <typeinfo>
#include <span>
#include "util/serialization.hpp"
#include "util/shared_ptr.hpp"
#include "util/fixed_typemap.hpp"
extern stx::manual_typemap<void, 0x20'00000, 128> g_fixed_typemap;
@ -19,9 +21,24 @@ enum class thread_state : u32;
extern u16 serial_breathe_and_tag(utils::serial& ar, std::string_view name, bool tag_bit);
template <typename T>
concept IdmCompatible = requires () { u32{T::id_base}, u32{T::id_step}, u32{T::id_count}; };
template <typename T>
concept IdmBaseCompatible = (std::is_final_v<T> ? IdmCompatible<T> : !!(requires () { u32{T::id_step}, u32{T::id_count}; }));
template <typename T>
concept IdmSavable = IdmBaseCompatible<T> && T::savestate_init_pos != 0 && (requires () { std::declval<T>().save(std::declval<stx::exact_t<utils::serial&>>()); });
// If id_base is declared in base type, than storage type must declare id_type
template <typename Base, typename Type>
concept IdmTypesCompatible = PtrSame<Base, Type> && IdmCompatible<Type> && IdmBaseCompatible<Base> && (std::is_same_v<Base, Type> || !IdmCompatible<Base> || !!(requires () { u32{Type::id_type}; }));
// Helper namespace
namespace id_manager
{
using pointer_keeper = std::function<void(void*)>;
// Common global mutex
extern shared_mutex g_mutex;
@ -31,7 +48,7 @@ namespace id_manager
return {0, 0};
}
template <typename T> requires requires () { T::id_invl_range; }
template <typename T> requires requires () { T::id_invl_range.first + T::id_invl_range.second; }
constexpr std::pair<u32, u32> get_invl_range()
{
return T::id_invl_range;
@ -49,15 +66,6 @@ namespace id_manager
return T::id_lowest;
}
template <typename T>
concept IdmCompatible = requires () { +T::id_base, +T::id_step, +T::id_count; };
template <typename T>
concept IdmBaseCompatible = (std::is_final_v<T> ? IdmCompatible<T> : !!(requires () { +T::id_step, +T::id_count; }));
template <typename T>
concept IdmSavable = IdmBaseCompatible<T> && T::savestate_init_pos != 0 && (requires () { std::declval<T>().save(std::declval<stx::exact_t<utils::serial&>>()); });
// Last allocated ID for constructors
extern thread_local u32 g_id;
@ -102,23 +110,27 @@ namespace id_manager
template <typename T, typename = void>
struct id_traits_load_func
{
static constexpr std::shared_ptr<void>(*load)(utils::serial&) = [](utils::serial& ar) -> std::shared_ptr<void>
static constexpr pointer_keeper(*load)(utils::serial&) = [](utils::serial& ar) -> pointer_keeper
{
stx::shared_ptr<T> ptr;
if constexpr (std::is_constructible_v<T, stx::exact_t<const stx::launch_retainer&>, stx::exact_t<utils::serial&>>)
{
return std::make_shared<T>(stx::launch_retainer{}, stx::exact_t<utils::serial&>(ar));
ptr = stx::make_shared<T>(stx::launch_retainer{}, stx::exact_t<utils::serial&>(ar));
}
else
{
return std::make_shared<T>(stx::exact_t<utils::serial&>(ar));
ptr = stx::make_shared<T>(stx::exact_t<utils::serial&>(ar));
}
return [ptr](void* storage) { *static_cast<stx::shared_ptr<T>*>(storage) = ptr; };
};
};
template <typename T>
struct id_traits_load_func<T, std::void_t<decltype(&T::load)>>
{
static constexpr std::shared_ptr<void>(*load)(utils::serial&) = [](utils::serial& ar) -> std::shared_ptr<void>
static constexpr pointer_keeper(*load)(utils::serial&) = [](utils::serial& ar) -> pointer_keeper
{
return T::load(stx::exact_t<utils::serial&>(ar));
};
@ -138,8 +150,8 @@ namespace id_manager
struct dummy_construct
{
dummy_construct() {}
dummy_construct(utils::serial&){}
dummy_construct() = default;
dummy_construct(utils::serial&) noexcept {}
void save(utils::serial&) {}
static constexpr u32 id_base = 1, id_step = 1, id_count = 1;
@ -154,7 +166,7 @@ namespace id_manager
struct typeinfo
{
public:
std::shared_ptr<void>(*load)(utils::serial&);
std::function<void(void*)>(*load)(utils::serial&);
void(*save)(utils::serial&, void*);
bool(*savable)(void* ptr);
@ -164,13 +176,6 @@ namespace id_manager
bool uses_lowest_id;
std::pair<u32, u32> invl_range;
// Get type index
template <typename T>
static inline u32 get_index()
{
return stx::typeindex<id_manager::typeinfo, T>();
}
// Unique type ID within the same container: we use id_base if nothing else was specified
template <typename T>
static consteval u32 get_type()
@ -205,11 +210,11 @@ namespace id_manager
const u128 key = u128{get_type<C>()} << 64 | std::bit_cast<u64>(C::savestate_init_pos);
for (const auto& tinfo : get_typeinfo_map())
for (const auto& [tkey, tinfo] : get_typeinfo_map())
{
if (!(tinfo.first ^ key))
if (!(tkey ^ key))
{
ensure(!std::memcmp(&info, &tinfo.second, sizeof(info)));
ensure(tinfo == info);
return info;
}
}
@ -230,18 +235,23 @@ namespace id_manager
return info;
}
bool operator==(const typeinfo& rhs) const noexcept
{
return base == rhs.base && invl_range == rhs.invl_range && save == rhs.save;
}
};
// ID value with additional type stored
class id_key
{
u32 m_value; // ID value
u32 m_base; // ID base (must be unique for each type in the same container)
u32 m_value = 0; // ID value
u32 m_base = umax; // ID base (must be unique for each type in the same container)
public:
id_key() = default;
id_key() noexcept = default;
id_key(u32 value, u32 type)
id_key(u32 value, u32 type) noexcept
: m_value(value)
, m_base(type)
{
@ -257,7 +267,12 @@ namespace id_manager
return m_base;
}
operator u32() const
void clear()
{
m_base = umax;
}
operator u32() const noexcept
{
return m_value;
}
@ -268,14 +283,14 @@ namespace id_manager
{
static_assert(IdmBaseCompatible<T>, "Please specify IDM compatible type.");
std::vector<std::pair<id_key, std::shared_ptr<void>>> vec{}, private_copy{};
std::array<stx::atomic_ptr<T>, T::id_count> vec_data{};
std::array<stx::shared_ptr<T>, T::id_count> private_copy{};
std::array<id_key, T::id_count> vec_keys{};
usz highest_index = 0;
shared_mutex mutex{}; // TODO: Use this instead of global mutex
id_map() noexcept
{
// Preallocate memory
vec.reserve(T::id_count);
}
id_map() noexcept = default;
// Order it directly before the source type's position
static constexpr double savestate_init_pos_original = T::savestate_init_pos;
@ -283,10 +298,6 @@ namespace id_manager
id_map(utils::serial& ar) noexcept requires IdmSavable<T>
{
vec.resize(T::id_count);
usz highest = 0;
while (true)
{
const u16 tag = serial_breathe_and_tag(ar, g_fxo->get_name<id_map<T>>(), false);
@ -320,25 +331,25 @@ namespace id_manager
g_id = id;
const usz object_index = get_index(id, info->base, info->step, info->count, info->invl_range);
auto& obj = ::at32(vec, object_index);
ensure(!obj.second);
auto& obj = ::at32(vec_data, object_index);
ensure(!obj);
highest = std::max<usz>(highest, object_index + 1);
highest_index = std::max<usz>(highest_index, object_index + 1);
obj.first = id_key(id, static_cast<u32>(static_cast<u64>(type_init_pos >> 64)));
obj.second = info->load(ar);
vec_keys[object_index] = id_key(id, static_cast<u32>(static_cast<u64>(type_init_pos >> 64)));
info->load(ar)(&obj);
}
vec.resize(highest);
}
void save(utils::serial& ar) requires IdmSavable<T>
{
for (const auto& p : vec)
for (const auto& p : vec_data)
{
if (!p.second) continue;
if (!p) continue;
const u128 type_init_pos = u128{p.first.type()} << 64 | std::bit_cast<u64>(T::savestate_init_pos);
auto& key = vec_keys[&p - vec_data.data()];
const u128 type_init_pos = u128{key.type()} << 64 | std::bit_cast<u64>(T::savestate_init_pos);
const typeinfo* info = nullptr;
// Search load functions for the one of this type (see make_typeinfo() for explenation about key composition reasoning)
@ -351,13 +362,13 @@ namespace id_manager
}
// Save each object with needed information
if (info && info->savable(p.second.get()))
if (info && info->savable(p.observe()))
{
// Create a tag for each object
serial_breathe_and_tag(ar, g_fxo->get_name<id_map<T>>(), false);
ar(p.first.value(), p.first.type());
info->save(ar, p.second.get());
ar(key.value(), key.type());
info->save(ar, p.observe());
}
}
@ -367,22 +378,23 @@ namespace id_manager
id_map& operator=(thread_state state) noexcept requires (std::is_assignable_v<T&, thread_state>)
{
private_copy.clear();
if (!vec.empty() || !private_copy.empty())
if (highest_index)
{
reader_lock lock(g_mutex);
// Save all entries
private_copy = vec;
for (usz i = 0; i < highest_index; i++)
{
private_copy[i] = vec_data[i].load();
}
}
// Signal or join threads
for (const auto& [key, ptr] : private_copy)
for (const auto& ptr : private_copy)
{
if (ptr)
{
*static_cast<T*>(ptr.get()) = state;
*ptr = state;
}
}
@ -422,24 +434,27 @@ class idm
// Helper type: pointer + return value propagated
template <typename T, typename RT>
struct return_pair
struct return_pair;
template <typename T, typename RT>
struct return_pair<stx::shared_ptr<T>, RT>
{
std::shared_ptr<T> ptr;
stx::shared_ptr<T> ptr;
RT ret;
explicit operator bool() const
explicit operator bool() const noexcept
{
return ptr.operator bool();
}
T& operator*() const
T& operator*() const noexcept
{
return *ptr;
}
T* operator->() const
T* operator->() const noexcept
{
return ptr.get();
return ptr.operator->();
}
};
@ -450,61 +465,67 @@ class idm
T* ptr;
RT ret;
explicit operator bool() const
explicit operator bool() const noexcept
{
return ptr != nullptr;
}
T& operator*() const
T& operator*() const noexcept
{
return *ptr;
}
T* operator->() const
T* operator->() const noexcept
{
return ptr;
}
};
using map_data = std::pair<id_manager::id_key, std::shared_ptr<void>>;
// Get type ID that is meant to be unique within the same container
template <typename T>
static consteval u32 get_type()
{
return id_manager::typeinfo::get_type<T>();
}
// Prepare new ID (returns nullptr if out of resources)
static map_data* allocate_id(std::vector<map_data>& vec, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, bool uses_lowest_id, std::pair<u32, u32> invl_range);
static id_manager::id_key* allocate_id(std::span<id_manager::id_key> vec, usz& highest_index, u32 type_id, u32 dst_id, u32 base, u32 step, u32 count, bool uses_lowest_id, std::pair<u32, u32> invl_range);
// Get object by internal index if exists (additionally check type if types are not equal)
template <typename T, typename Type>
static map_data* find_index(u32 index, u32 id)
static std::pair<atomic_ptr<T>*, id_manager::id_key*> find_index(u32 index, u32 id)
{
static_assert(PtrSame<T, Type>, "Invalid ID type combination");
static_assert(IdmTypesCompatible<T, Type>, "Invalid ID type combination");
auto& vec = g_fxo->get<id_manager::id_map<T>>().vec;
auto& map = g_fxo->get<id_manager::id_map<T>>();
if (index >= vec.size())
if (index >= map.highest_index)
{
return nullptr;
return {};
}
auto& data = vec[index];
auto& data = map.vec_data[index];
auto& key = map.vec_keys[index];
if (data.second)
if (data)
{
if (std::is_same_v<T, Type> || data.first.type() == get_type<Type>())
if (std::is_same_v<T, Type> || key.type() == get_type<Type>())
{
if (!id_manager::id_traits<Type>::invl_range.second || data.first.value() == id)
if (!id_manager::id_traits<Type>::invl_range.second || key.value() == id)
{
return &data;
return { &data, &key };
}
}
}
return nullptr;
return {};
}
// Find ID
template <typename T, typename Type>
static map_data* find_id(u32 id)
static std::pair<atomic_ptr<T>*, id_manager::id_key*> find_id(u32 id)
{
static_assert(PtrSame<T, Type>, "Invalid ID type combination");
static_assert(IdmTypesCompatible<T, Type>, "Invalid ID type combination");
const u32 index = get_index<Type>(id);
@ -513,9 +534,9 @@ class idm
// Allocate new ID (or use fixed ID) and assign the object from the provider()
template <typename T, typename Type, typename F>
static map_data* create_id(F&& provider, u32 id = id_manager::id_traits<Type>::invalid)
static stx::shared_ptr<Type> create_id(F&& provider, u32 id = id_manager::id_traits<Type>::invalid)
{
static_assert(PtrSame<T, Type>, "Invalid ID type combination");
static_assert(IdmTypesCompatible<T, Type>, "Invalid ID type combination");
// ID traits
using traits = id_manager::id_traits<Type>;
@ -528,18 +549,23 @@ class idm
auto& map = g_fxo->get<id_manager::id_map<T>>();
if (auto* place = allocate_id(map.vec, get_type<Type>(), id, traits::base, traits::step, traits::count, traits::uses_lowest_id, traits::invl_range))
if (auto* key_ptr = allocate_id({map.vec_keys.data(), map.vec_keys.size()}, map.highest_index, get_type<Type>(), id, traits::base, traits::step, traits::count, traits::uses_lowest_id, traits::invl_range))
{
// Get object, store it
place->second = provider();
auto& place = map.vec_data[key_ptr - map.vec_keys.data()];
if (place->second)
// Get object, store it
if (auto object = provider())
{
return place;
place = object;
return object;
}
else
{
key_ptr->clear();
}
}
return nullptr;
return {};
}
public:
@ -549,7 +575,16 @@ public:
static inline void clear()
{
std::lock_guard lock(id_manager::g_mutex);
g_fxo->get<id_manager::id_map<T>>().vec.clear();
for (auto& ptr : g_fxo->get<id_manager::id_map<T>>().vec_data)
{
ptr.reset();
}
for (auto& key : g_fxo->get<id_manager::id_map<T>>().vec_keys)
{
key.clear();
}
}
// Get last ID (updated in create_id/allocate_id)
@ -558,44 +593,38 @@ public:
return id_manager::g_id;
}
// Get type ID that is meant to be unique within the same container
template <typename T>
static consteval u32 get_type()
{
return id_manager::typeinfo::get_type<T>();
}
// Add a new ID of specified type with specified constructor arguments (returns object or nullptr)
// Add a new ID of specified type with specified constructor arguments (returns object or null_ptr)
template <typename T, typename Make = T, typename... Args> requires (std::is_constructible_v<Make, Args&&...>)
static inline std::shared_ptr<Make> make_ptr(Args&&... args)
static inline stx::shared_ptr<Make> make_ptr(Args&&... args)
{
if (auto pair = create_id<T, Make>([&] { return std::make_shared<Make>(std::forward<Args>(args)...); }))
if (auto pair = create_id<T, Make>([&] { return stx::make_shared<Make>(std::forward<Args>(args)...); }))
{
return {pair->second, static_cast<Make*>(pair->second.get())};
return pair;
}
return nullptr;
return null_ptr;
}
// Add a new ID of specified type with specified constructor arguments (returns id)
template <typename T, typename Make = T, typename... Args> requires (std::is_constructible_v<Make, Args&&...>)
static inline u32 make(Args&&... args)
{
if (auto pair = create_id<T, Make>([&] { return std::make_shared<Make>(std::forward<Args>(args)...); }))
if (create_id<T, Make>([&] { return stx::make_shared<Make>(std::forward<Args>(args)...); }))
{
return pair->first;
return last_id();
}
return id_manager::id_traits<Make>::invalid;
}
// Add a new ID for an object returned by provider()
template <typename T, typename Made = T, typename F> requires (std::is_invocable_v<F&&>)
template <typename T, typename Made = T, typename F>
requires IdmTypesCompatible<T, Made> && std::is_convertible_v<std::invoke_result_t<F&&>, stx::shared_ptr<Made>>
static inline u32 import(F&& provider, u32 id = id_manager::id_traits<Made>::invalid)
{
if (auto pair = create_id<T, Made>(std::forward<F>(provider), id))
if (create_id<T, Made>(std::forward<F>(provider), id))
{
return pair->first;
return last_id();
}
return id_manager::id_traits<Made>::invalid;
@ -603,41 +632,28 @@ public:
// Add a new ID for an existing object provided (returns new id)
template <typename T, typename Made = T>
static inline u32 import_existing(std::shared_ptr<T> ptr, u32 id = id_manager::id_traits<Made>::invalid)
requires IdmTypesCompatible<T, Made>
static inline u32 import_existing(stx::shared_ptr<Made> ptr, u32 id = id_manager::id_traits<Made>::invalid)
{
return import<T, Made>([&] { return std::move(ptr); }, id);
}
// Access the ID record without locking (unsafe)
template <typename T, typename Get = T>
static inline map_data* find_unlocked(u32 id)
{
return find_id<T, Get>(id);
return import<T, Made>([&]() -> stx::shared_ptr<Made> { return std::move(ptr); }, id);
}
// Check the ID without locking (can be called from other method)
template <typename T, typename Get = T>
requires IdmTypesCompatible<T, Get>
static inline Get* check_unlocked(u32 id)
{
if (const auto found = find_id<T, Get>(id))
if (const auto found = find_id<T, Get>(id); found.first)
{
return static_cast<Get*>(found->second.get());
return static_cast<Get*>(found.first->observe());
}
return nullptr;
}
// Check the ID
template <typename T, typename Get = T>
static inline Get* check(u32 id)
{
reader_lock lock(id_manager::g_mutex);
return check_unlocked<T, Get>(id);
}
// Check the ID, access object under shared lock
template <typename T, typename Get = T, typename F, typename FRT = std::invoke_result_t<F, Get&>>
requires IdmTypesCompatible<T, Get>
static inline std::conditional_t<std::is_void_v<FRT>, Get*, return_pair<Get*, FRT>> check(u32 id, F&& func)
{
const u32 index = get_index<Get>(id);
@ -649,9 +665,9 @@ public:
reader_lock lock(id_manager::g_mutex);
if (const auto found = find_index<T, Get>(index, id))
if (const auto found = find_index<T, Get>(index, id); found.first)
{
const auto ptr = static_cast<Get*>(found->second.get());
const auto ptr = static_cast<Get*>(found.first->observe());
if constexpr (!std::is_void_v<FRT>)
{
@ -669,57 +685,51 @@ public:
// Get the object without locking (can be called from other method)
template <typename T, typename Get = T>
static inline std::shared_ptr<Get> get_unlocked(u32 id)
requires IdmTypesCompatible<T, Get>
static inline stx::shared_ptr<Get> get_unlocked(u32 id)
{
const auto found = find_id<T, Get>(id);
if (found == nullptr) [[unlikely]]
if (!found.first) [[unlikely]]
{
return nullptr;
return null_ptr;
}
return std::static_pointer_cast<Get>(found->second);
}
// Get the object
template <typename T, typename Get = T>
static inline std::shared_ptr<Get> get(u32 id)
{
reader_lock lock(id_manager::g_mutex);
return get_unlocked<T, Get>(id);
return static_cast<stx::shared_ptr<Get>>(found.first->load());
}
// Get the object, access object under reader lock
template <typename T, typename Get = T, typename F, typename FRT = std::invoke_result_t<F, Get&>>
static inline std::conditional_t<std::is_void_v<FRT>, std::shared_ptr<Get>, return_pair<Get, FRT>> get(u32 id, F&& func)
requires IdmTypesCompatible<T, Get>
static inline std::conditional_t<std::is_void_v<FRT>, stx::shared_ptr<Get>, return_pair<stx::shared_ptr<Get>, FRT>> get(u32 id, F&& func)
{
const u32 index = get_index<Get>(id);
if (index >= id_manager::id_traits<Get>::count)
{
return {nullptr};
return {};
}
reader_lock lock(id_manager::g_mutex);
const auto found = find_index<T, Get>(index, id);
if (found == nullptr) [[unlikely]]
if (!found.first) [[unlikely]]
{
return {nullptr};
return {};
}
const auto ptr = static_cast<Get*>(found->second.get());
auto ptr = static_cast<stx::shared_ptr<Get>>(found.first->load());
Get* obj_ptr = ptr.get();
if constexpr (std::is_void_v<FRT>)
{
func(*ptr);
return {found->second, ptr};
func(*obj_ptr);
return ptr;
}
else
{
return {{found->second, ptr}, func(*ptr)};
return {std::move(ptr), func(*obj_ptr)};
}
}
@ -728,9 +738,10 @@ public:
// Access all objects of specified type. Returns the number of objects processed.
// If function result evaluates to true, stop and return the object and the value.
template <typename T, typename... Get, typename F, typename Lock = std::true_type>
static inline auto select(F&& func, Lock = {})
requires IdmBaseCompatible<T> && (IdmCompatible<Get> && ...) && (std::is_invocable_v<F, u32, Get&> && ...)
static inline auto select(F&& func, Lock = std::true_type{})
{
static_assert((PtrSame<T, Get> && ...), "Invalid ID type combination");
static_assert((IdmTypesCompatible<T, Get> && ...), "Invalid ID type combination");
[[maybe_unused]] std::conditional_t<!!Lock(), reader_lock, const shared_mutex&> lock(id_manager::g_mutex);
@ -740,23 +751,30 @@ public:
static_assert(PtrSame<object_type, T>, "Invalid function argument type combination");
std::conditional_t<std::is_void_v<result_type>, u32, return_pair<object_type, result_type>> result{};
std::conditional_t<std::is_void_v<result_type>, u32, return_pair<stx::shared_ptr<object_type>, result_type>> result{};
for (auto& id : g_fxo->get<id_manager::id_map<T>>().vec)
auto& map = g_fxo->get<id_manager::id_map<T>>();
for (auto& id : map.vec_data)
{
if (auto ptr = static_cast<object_type*>(id.second.get()))
if (auto ptr = static_cast<object_type*>(id.observe()))
{
if (sizeof...(Get) == 0 || ((id.first.type() == get_type<Get>()) || ...))
auto& key = map.vec_keys[&id - map.vec_data.data()];
if (sizeof...(Get) == 0 || ((key.type() == get_type<Get>()) || ...))
{
if constexpr (std::is_void_v<result_type>)
{
func(id.first, *ptr);
func(key, *ptr);
result++;
}
else if ((result.ret = func(id.first, *ptr)))
else
{
result.ptr = {id.second, ptr};
break;
if ((result.ret = func(key, *ptr)))
{
result.ptr = static_cast<stx::shared_ptr<object_type>>(id.load());
break;
}
}
}
}
@ -767,15 +785,17 @@ public:
// Remove the ID
template <typename T, typename Get = T>
requires IdmTypesCompatible<T, Get>
static inline bool remove(u32 id)
{
std::shared_ptr<void> ptr;
stx::shared_ptr<T> ptr;
{
std::lock_guard lock(id_manager::g_mutex);
if (const auto found = find_id<T, Get>(id))
if (const auto found = find_id<T, Get>(id); found.first)
{
ptr = std::move(found->second);
ptr = found.first->exchange(null_ptr);
found.second->clear();
}
else
{
@ -787,17 +807,18 @@ public:
}
// Remove the ID if matches the weak/shared ptr
template <typename T, typename Get = T, typename Ptr>
static inline bool remove_verify(u32 id, Ptr sptr)
template <typename T, typename Get = T, typename Ptr, typename Lock = std::true_type>
requires IdmTypesCompatible<T, Get> && std::is_convertible_v<Lock, bool>
static inline bool remove_verify(u32 id, Ptr&& sptr, Lock = std::true_type{})
{
std::shared_ptr<void> ptr;
stx::shared_ptr<T> ptr;
{
std::lock_guard lock(id_manager::g_mutex);
[[maybe_unused]] std::conditional_t<!!Lock(), std::lock_guard<shared_mutex>, const shared_mutex&> lock(id_manager::g_mutex);
if (const auto found = find_id<T, Get>(id); found &&
(!found->second.owner_before(sptr) && !sptr.owner_before(found->second)))
if (const auto found = find_id<T, Get>(id); found.first && found.first->is_equal(sptr))
{
ptr = std::move(found->second);
ptr = found.first->exchange(null_ptr);
found.second->clear();
}
else
{
@ -809,16 +830,18 @@ public:
}
// Remove the ID and return the object
template <typename T, typename Get = T>
static inline std::shared_ptr<Get> withdraw(u32 id)
template <typename T, typename Get = T, typename Lock = std::true_type>
requires IdmTypesCompatible<T, Get>
static inline stx::shared_ptr<Get> withdraw(u32 id, int = 0, Lock = std::true_type{})
{
std::shared_ptr<Get> ptr;
stx::shared_ptr<Get> ptr;
{
std::lock_guard lock(id_manager::g_mutex);
[[maybe_unused]] std::conditional_t<!!Lock(), std::lock_guard<shared_mutex>, const shared_mutex&> lock(id_manager::g_mutex);
if (const auto found = find_id<T, Get>(id))
if (const auto found = find_id<T, Get>(id); found.first)
{
ptr = std::static_pointer_cast<Get>(::as_rvalue(std::move(found->second)));
ptr = static_cast<stx::shared_ptr<Get>>(found.first->exchange(null_ptr));
found.second->clear();
}
}
@ -827,25 +850,27 @@ public:
// Remove the ID after accessing the object under writer lock, return the object and propagate return value
template <typename T, typename Get = T, typename F, typename FRT = std::invoke_result_t<F, Get&>>
static inline std::conditional_t<std::is_void_v<FRT>, std::shared_ptr<Get>, return_pair<Get, FRT>> withdraw(u32 id, F&& func)
requires IdmTypesCompatible<T, Get> && std::is_invocable_v<F, Get&>
static inline std::conditional_t<std::is_void_v<FRT>, stx::shared_ptr<Get>, return_pair<stx::shared_ptr<Get>, FRT>> withdraw(u32 id, F&& func)
{
const u32 index = get_index<Get>(id);
if (index >= id_manager::id_traits<Get>::count)
{
return {nullptr};
return {};
}
std::unique_lock lock(id_manager::g_mutex);
if (const auto found = find_index<T, Get>(index, id))
if (const auto found = find_index<T, Get>(index, id); found.first)
{
const auto _ptr = static_cast<Get*>(found->second.get());
const auto _ptr = static_cast<Get*>(found.first->observe());
if constexpr (std::is_void_v<FRT>)
{
func(*_ptr);
return std::static_pointer_cast<Get>(::as_rvalue(std::move(found->second)));
found.second->clear();
return static_cast<stx::shared_ptr<Get>>(found.first->exchange(null_ptr));
}
else
{
@ -854,13 +879,14 @@ public:
if (ret)
{
// If return value evaluates to true, don't delete the object (error code)
return {{found->second, _ptr}, std::move(ret)};
return {static_cast<stx::shared_ptr<Get>>(found.first->load()), std::move(ret)};
}
return {std::static_pointer_cast<Get>(::as_rvalue(std::move(found->second))), std::move(ret)};
found.second->clear();
return {static_cast<stx::shared_ptr<Get>>(found.first->exchange(null_ptr)), std::move(ret)};
}
}
return {nullptr};
return {};
}
};

View file

@ -9,6 +9,7 @@ LOG_CHANNEL(sceNp2);
generic_async_transaction_context::generic_async_transaction_context(const SceNpCommunicationId& communicationId, const SceNpCommunicationPassphrase& passphrase, u64 timeout)
: communicationId(communicationId), passphrase(passphrase), timeout(timeout)
, idm_id(idm::last_id())
{
}
@ -73,12 +74,12 @@ bool destroy_tus_context(s32 ctx_id)
return idm::remove<tus_ctx>(static_cast<u32>(ctx_id));
}
tus_transaction_ctx::tus_transaction_ctx(const std::shared_ptr<tus_ctx>& tus)
tus_transaction_ctx::tus_transaction_ctx(const shared_ptr<tus_ctx>& tus)
: generic_async_transaction_context(tus->communicationId, tus->passphrase, tus->timeout)
{
}
s32 create_tus_transaction_context(const std::shared_ptr<tus_ctx>& tus)
s32 create_tus_transaction_context(const shared_ptr<tus_ctx>& tus)
{
s32 tus_id = idm::make<tus_transaction_ctx>(tus);
@ -116,13 +117,13 @@ bool destroy_score_context(s32 ctx_id)
return idm::remove<score_ctx>(static_cast<u32>(ctx_id));
}
score_transaction_ctx::score_transaction_ctx(const std::shared_ptr<score_ctx>& score)
score_transaction_ctx::score_transaction_ctx(const shared_ptr<score_ctx>& score)
: generic_async_transaction_context(score->communicationId, score->passphrase, score->timeout)
{
pcId = score->pcId;
}
s32 create_score_transaction_context(const std::shared_ptr<score_ctx>& score)
s32 create_score_transaction_context(const shared_ptr<score_ctx>& score)
{
s32 trans_id = idm::make<score_transaction_ctx>(score);
@ -158,11 +159,11 @@ bool destroy_match2_context(u16 ctx_id)
}
bool check_match2_context(u16 ctx_id)
{
return (idm::check<match2_ctx>(ctx_id) != nullptr);
return (idm::check_unlocked<match2_ctx>(ctx_id) != nullptr);
}
std::shared_ptr<match2_ctx> get_match2_context(u16 ctx_id)
shared_ptr<match2_ctx> get_match2_context(u16 ctx_id)
{
return idm::get<match2_ctx>(ctx_id);
return idm::get_unlocked<match2_ctx>(ctx_id);
}
lookup_title_ctx::lookup_title_ctx(vm::cptr<SceNpCommunicationId> communicationId)
@ -207,9 +208,9 @@ bool destroy_commerce2_context(u32 ctx_id)
{
return idm::remove<commerce2_ctx>(static_cast<u32>(ctx_id));
}
std::shared_ptr<commerce2_ctx> get_commerce2_context(u16 ctx_id)
shared_ptr<commerce2_ctx> get_commerce2_context(u16 ctx_id)
{
return idm::get<commerce2_ctx>(ctx_id);
return idm::get_unlocked<commerce2_ctx>(ctx_id);
}
signaling_ctx::signaling_ctx(vm::ptr<SceNpId> npid, vm::ptr<SceNpSignalingHandler> handler, vm::ptr<void> arg)
@ -226,9 +227,9 @@ bool destroy_signaling_context(u32 ctx_id)
{
return idm::remove<signaling_ctx>(static_cast<u32>(ctx_id));
}
std::shared_ptr<signaling_ctx> get_signaling_context(u32 ctx_id)
shared_ptr<signaling_ctx> get_signaling_context(u32 ctx_id)
{
return idm::get<signaling_ctx>(ctx_id);
return idm::get_unlocked<signaling_ctx>(ctx_id);
}
matching_ctx::matching_ctx(vm::ptr<SceNpId> npId, vm::ptr<SceNpMatchingHandler> handler, vm::ptr<void> arg)
@ -266,9 +267,9 @@ s32 create_matching_context(vm::ptr<SceNpId> npId, vm::ptr<SceNpMatchingHandler>
ctx->ctx_id = ctx_id;
return static_cast<s32>(ctx_id);
}
std::shared_ptr<matching_ctx> get_matching_context(u32 ctx_id)
shared_ptr<matching_ctx> get_matching_context(u32 ctx_id)
{
return idm::get<matching_ctx>(ctx_id);
return idm::get_unlocked<matching_ctx>(ctx_id);
}
bool destroy_matching_context(u32 ctx_id)
{

View file

@ -20,7 +20,7 @@
// Used By Score and Tus
struct generic_async_transaction_context
{
virtual ~generic_async_transaction_context();
~generic_async_transaction_context();
generic_async_transaction_context(const SceNpCommunicationId& communicationId, const SceNpCommunicationPassphrase& passphrase, u64 timeout);
@ -37,6 +37,8 @@ struct generic_async_transaction_context
u64 timeout;
std::thread thread;
u32 idm_id;
};
struct tdata_invalid
@ -137,8 +139,7 @@ bool destroy_tus_context(s32 ctx_id);
struct tus_transaction_ctx : public generic_async_transaction_context
{
tus_transaction_ctx(const std::shared_ptr<tus_ctx>& tus);
virtual ~tus_transaction_ctx() = default;
tus_transaction_ctx(const shared_ptr<tus_ctx>& tus);
static const u32 id_base = 0x8001;
static const u32 id_step = 1;
@ -148,7 +149,7 @@ struct tus_transaction_ctx : public generic_async_transaction_context
std::variant<tdata_invalid, tdata_tus_get_variables_generic, tdata_tus_get_variable_generic, tdata_tus_set_data, tdata_tus_get_data, tdata_tus_get_datastatus_generic> tdata;
};
s32 create_tus_transaction_context(const std::shared_ptr<tus_ctx>& tus);
s32 create_tus_transaction_context(const shared_ptr<tus_ctx>& tus);
bool destroy_tus_transaction_context(s32 ctx_id);
// Score related
@ -172,8 +173,7 @@ bool destroy_score_context(s32 ctx_id);
struct score_transaction_ctx : public generic_async_transaction_context
{
score_transaction_ctx(const std::shared_ptr<score_ctx>& score);
virtual ~score_transaction_ctx() = default;
score_transaction_ctx(const shared_ptr<score_ctx>& score);
static const u32 id_base = 0x1001;
static const u32 id_step = 1;
@ -183,7 +183,7 @@ struct score_transaction_ctx : public generic_async_transaction_context
std::variant<tdata_invalid, tdata_get_board_infos, tdata_record_score, tdata_record_score_data, tdata_get_score_data, tdata_get_score_generic> tdata;
s32 pcId = 0;
};
s32 create_score_transaction_context(const std::shared_ptr<score_ctx>& score);
s32 create_score_transaction_context(const shared_ptr<score_ctx>& score);
bool destroy_score_transaction_context(s32 ctx_id);
// Match2 related
@ -214,7 +214,7 @@ struct match2_ctx
};
u16 create_match2_context(vm::cptr<SceNpCommunicationId> communicationId, vm::cptr<SceNpCommunicationPassphrase> passphrase, s32 option);
bool check_match2_context(u16 ctx_id);
std::shared_ptr<match2_ctx> get_match2_context(u16 ctx_id);
shared_ptr<match2_ctx> get_match2_context(u16 ctx_id);
bool destroy_match2_context(u16 ctx_id);
struct lookup_title_ctx
@ -261,7 +261,7 @@ struct commerce2_ctx
vm::ptr<void> context_callback_param{};
};
s32 create_commerce2_context(u32 version, vm::cptr<SceNpId> npid, vm::ptr<SceNpCommerce2Handler> handler, vm::ptr<void> arg);
std::shared_ptr<commerce2_ctx> get_commerce2_context(u16 ctx_id);
shared_ptr<commerce2_ctx> get_commerce2_context(u16 ctx_id);
bool destroy_commerce2_context(u32 ctx_id);
struct signaling_ctx
@ -282,7 +282,7 @@ struct signaling_ctx
vm::ptr<void> ext_arg{};
};
s32 create_signaling_context(vm::ptr<SceNpId> npid, vm::ptr<SceNpSignalingHandler> handler, vm::ptr<void> arg);
std::shared_ptr<signaling_ctx> get_signaling_context(u32 ctx_id);
shared_ptr<signaling_ctx> get_signaling_context(u32 ctx_id);
bool destroy_signaling_context(u32 ctx_id);
struct matching_ctx
@ -315,5 +315,5 @@ struct matching_ctx
atomic_t<bool> get_room_limit_version = false;
};
s32 create_matching_context(vm::ptr<SceNpId> npid, vm::ptr<SceNpMatchingHandler> handler, vm::ptr<void> arg);
std::shared_ptr<matching_ctx> get_matching_context(u32 ctx_id);
shared_ptr<matching_ctx> get_matching_context(u32 ctx_id);
bool destroy_matching_context(u32 ctx_id);

View file

@ -502,7 +502,7 @@ namespace np
np_handler::~np_handler()
{
std::unordered_map<u32, std::shared_ptr<generic_async_transaction_context>> moved_trans;
std::unordered_map<u32, shared_ptr<generic_async_transaction_context>> moved_trans;
{
std::lock_guard lock(mutex_async_transactions);
moved_trans = std::move(async_transactions);
@ -999,7 +999,7 @@ namespace np
return CELL_OK;
}
std::optional<std::shared_ptr<std::pair<std::string, message_data>>> np_handler::get_message(u64 id)
std::optional<shared_ptr<std::pair<std::string, message_data>>> np_handler::get_message(u64 id)
{
return get_rpcn()->get_message(id);
}
@ -1019,7 +1019,7 @@ namespace np
}
}
std::optional<std::shared_ptr<std::pair<std::string, message_data>>> np_handler::get_message_selected(SceNpBasicAttachmentDataId id)
std::optional<shared_ptr<std::pair<std::string, message_data>>> np_handler::get_message_selected(SceNpBasicAttachmentDataId id)
{
switch (id)
{
@ -1672,7 +1672,7 @@ namespace np
return ctx_id;
}
std::shared_ptr<matching_ctx> np_handler::take_pending_gui_request(u32 req_id)
shared_ptr<matching_ctx> np_handler::take_pending_gui_request(u32 req_id)
{
const u32 ctx_id = take_gui_request(req_id);

View file

@ -150,9 +150,9 @@ namespace np
error_code get_basic_event(vm::ptr<s32> event, vm::ptr<SceNpUserInfo> from, vm::ptr<u8> data, vm::ptr<u32> size);
// Messages-related functions
std::optional<std::shared_ptr<std::pair<std::string, message_data>>> get_message(u64 id);
std::optional<shared_ptr<std::pair<std::string, message_data>>> get_message(u64 id);
void set_message_selected(SceNpBasicAttachmentDataId id, u64 msg_id);
std::optional<std::shared_ptr<std::pair<std::string, message_data>>> get_message_selected(SceNpBasicAttachmentDataId id);
std::optional<shared_ptr<std::pair<std::string, message_data>>> get_message_selected(SceNpBasicAttachmentDataId id);
void clear_message_selected(SceNpBasicAttachmentDataId id);
void send_message(const message_data& msg_data, const std::set<std::string>& npids);
@ -206,29 +206,29 @@ namespace np
void set_current_gui_ctx_id(u32 id);
// Score requests
void transaction_async_handler(std::unique_lock<shared_mutex> lock, const std::shared_ptr<generic_async_transaction_context>& trans_ctx, u32 req_id, bool async);
void get_board_infos(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, vm::ptr<SceNpScoreBoardInfo> boardInfo, bool async);
void record_score(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, vm::cptr<SceNpScoreComment> scoreComment, const u8* data, u32 data_size, vm::ptr<SceNpScoreRankNumber> tmpRank, bool async);
void record_score_data(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, u32 totalSize, u32 sendSize, const u8* score_data, bool async);
void get_score_data(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const SceNpId& npId, vm::ptr<u32> totalSize, u32 recvSize, vm::ptr<void> score_data, bool async);
void get_score_range(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreRankNumber startSerialRank, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated);
void get_score_npid(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const std::vector<std::pair<SceNpId, s32>>& npid_vec, vm::ptr<SceNpScorePlayerRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated);
void get_score_friend(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, bool include_self, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated);
void transaction_async_handler(std::unique_lock<shared_mutex> lock, const shared_ptr<generic_async_transaction_context>& trans_ctx, u32 req_id, bool async);
void get_board_infos(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, vm::ptr<SceNpScoreBoardInfo> boardInfo, bool async);
void record_score(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, vm::cptr<SceNpScoreComment> scoreComment, const u8* data, u32 data_size, vm::ptr<SceNpScoreRankNumber> tmpRank, bool async);
void record_score_data(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, u32 totalSize, u32 sendSize, const u8* score_data, bool async);
void get_score_data(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const SceNpId& npId, vm::ptr<u32> totalSize, u32 recvSize, vm::ptr<void> score_data, bool async);
void get_score_range(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreRankNumber startSerialRank, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated);
void get_score_npid(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const std::vector<std::pair<SceNpId, s32>>& npid_vec, vm::ptr<SceNpScorePlayerRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated);
void get_score_friend(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, bool include_self, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated);
// TUS requests
void tus_set_multislot_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::cptr<s64> variableArray, s32 arrayNum, bool vuser, bool async);
void tus_get_multislot_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async);
void tus_get_multiuser_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async);
void tus_get_friends_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusVariable> variableArray,s32 arrayNum, bool async);
void tus_add_and_get_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s64 inVariable, vm::ptr<SceNpTusVariable> outVariable, vm::ptr<SceNpTusAddAndGetVariableOptParam> option, bool vuser, bool async);
void tus_try_and_set_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s32 opeType, s64 variable, vm::ptr<SceNpTusVariable> resultVariable, vm::ptr<SceNpTusTryAndSetVariableOptParam> option, bool vuser, bool async);
void tus_delete_multislot_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async);
void tus_set_data(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, u32 totalSize, u32 sendSize, vm::cptr<void> data, vm::cptr<SceNpTusDataInfo> info, vm::ptr<SceNpTusSetDataOptParam> option, bool vuser, bool async);
void tus_get_data(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> dataStatus, vm::ptr<void> data, u32 recvSize, bool vuser, bool async);
void tus_get_multislot_data_status(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async);
void tus_get_multiuser_data_status(std::shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async);
void tus_get_friends_data_status(std::shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool async);
void tus_delete_multislot_data(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async);
void tus_set_multislot_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::cptr<s64> variableArray, s32 arrayNum, bool vuser, bool async);
void tus_get_multislot_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async);
void tus_get_multiuser_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async);
void tus_get_friends_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusVariable> variableArray,s32 arrayNum, bool async);
void tus_add_and_get_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s64 inVariable, vm::ptr<SceNpTusVariable> outVariable, vm::ptr<SceNpTusAddAndGetVariableOptParam> option, bool vuser, bool async);
void tus_try_and_set_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s32 opeType, s64 variable, vm::ptr<SceNpTusVariable> resultVariable, vm::ptr<SceNpTusTryAndSetVariableOptParam> option, bool vuser, bool async);
void tus_delete_multislot_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async);
void tus_set_data(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, u32 totalSize, u32 sendSize, vm::cptr<void> data, vm::cptr<SceNpTusDataInfo> info, vm::ptr<SceNpTusSetDataOptParam> option, bool vuser, bool async);
void tus_get_data(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> dataStatus, vm::ptr<void> data, u32 recvSize, bool vuser, bool async);
void tus_get_multislot_data_status(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async);
void tus_get_multiuser_data_status(shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async);
void tus_get_friends_data_status(shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool async);
void tus_delete_multislot_data(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async);
// Local functions
std::pair<error_code, std::optional<SceNpId>> local_get_npid(u64 room_id, u16 member_id);
@ -494,7 +494,7 @@ namespace np
// Async transaction threads
shared_mutex mutex_async_transactions;
std::unordered_map<u32, std::shared_ptr<generic_async_transaction_context>> async_transactions; // (req_id, transaction_ctx)
std::unordered_map<u32, shared_ptr<generic_async_transaction_context>> async_transactions; // (req_id, transaction_ctx)
// RPCN
shared_mutex mutex_rpcn;
@ -529,7 +529,7 @@ namespace np
void add_gui_request(u32 req_id, u32 ctx_id);
void remove_gui_request(u32 req_id);
u32 take_gui_request(u32 req_id);
std::shared_ptr<matching_ctx> take_pending_gui_request(u32 req_id);
shared_ptr<matching_ctx> take_pending_gui_request(u32 req_id);
shared_mutex mutex_quickmatching;
std::map<SceNpRoomId, u32> pending_quickmatching;

View file

@ -348,7 +348,7 @@ namespace np
return generic_gui_notification_handler(data, "UserKickedGUI", SCE_NP_MATCHING_EVENT_ROOM_KICKED);
}
void gui_epilog(const std::shared_ptr<matching_ctx>& ctx);
void gui_epilog(const shared_ptr<matching_ctx>& ctx);
void np_handler::notif_quickmatch_complete_gui(std::vector<u8>& data)
{

View file

@ -802,7 +802,7 @@ namespace np
return true;
}
void np_handler::transaction_async_handler(std::unique_lock<shared_mutex> lock, const std::shared_ptr<generic_async_transaction_context>& trans_ctx, u32 req_id, bool async)
void np_handler::transaction_async_handler(std::unique_lock<shared_mutex> lock, const shared_ptr<generic_async_transaction_context>& trans_ctx, u32 req_id, bool async)
{
auto worker_function = [trans_ctx = trans_ctx, req_id, this](std::unique_lock<shared_mutex> lock)
{
@ -842,7 +842,7 @@ namespace np
cpu_thread.check_state();
}
void np_handler::get_board_infos(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, vm::ptr<SceNpScoreBoardInfo> boardInfo, bool async)
void np_handler::get_board_infos(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, vm::ptr<SceNpScoreBoardInfo> boardInfo, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
@ -877,7 +877,7 @@ namespace np
return false;
}
auto score_trans = std::dynamic_pointer_cast<score_transaction_ctx>(::at32(async_transactions, req_id));
auto score_trans = idm::get_unlocked<score_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(score_trans);
std::lock_guard lock(score_trans->mutex);
@ -892,7 +892,7 @@ namespace np
return true;
}
void np_handler::record_score(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, vm::cptr<SceNpScoreComment> scoreComment, const u8* data, u32 data_size, vm::ptr<SceNpScoreRankNumber> tmpRank, bool async)
void np_handler::record_score(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, vm::cptr<SceNpScoreComment> scoreComment, const u8* data, u32 data_size, vm::ptr<SceNpScoreRankNumber> tmpRank, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::SCORE);
@ -920,7 +920,7 @@ namespace np
return false;
}
auto score_trans = std::dynamic_pointer_cast<score_transaction_ctx>(::at32(async_transactions, req_id));
auto score_trans = idm::get_unlocked<score_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(score_trans);
std::lock_guard lock(score_trans->mutex);
@ -961,7 +961,7 @@ namespace np
return true;
}
void np_handler::record_score_data(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, u32 totalSize, u32 sendSize, const u8* score_data, bool async)
void np_handler::record_score_data(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreValue score, u32 totalSize, u32 sendSize, const u8* score_data, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
@ -1021,7 +1021,7 @@ namespace np
return set_result_and_wake(CELL_OK);
}
void np_handler::get_score_data(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const SceNpId& npId, vm::ptr<u32> totalSize, u32 recvSize, vm::ptr<void> score_data, bool async)
void np_handler::get_score_data(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const SceNpId& npId, vm::ptr<u32> totalSize, u32 recvSize, vm::ptr<void> score_data, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
@ -1062,7 +1062,7 @@ namespace np
return false;
}
auto score_trans = std::dynamic_pointer_cast<score_transaction_ctx>(::at32(async_transactions, req_id));
auto score_trans = idm::get_unlocked<score_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(score_trans);
std::lock_guard lock(score_trans->mutex);
@ -1098,7 +1098,7 @@ namespace np
return score_trans->set_result_and_wake(not_an_error(to_copy));
}
void np_handler::get_score_range(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreRankNumber startSerialRank, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, [[maybe_unused]] u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated)
void np_handler::get_score_range(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, SceNpScoreRankNumber startSerialRank, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, [[maybe_unused]] u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::SCORE);
@ -1152,7 +1152,7 @@ namespace np
return false;
}
auto score_trans = std::dynamic_pointer_cast<score_transaction_ctx>(::at32(async_transactions, req_id));
auto score_trans = idm::get_unlocked<score_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(score_trans);
std::lock_guard lock(score_trans->mutex);
@ -1280,7 +1280,7 @@ namespace np
return handle_GetScoreResponse(req_id, reply_data);
}
void np_handler::get_score_friend(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, bool include_self, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, [[maybe_unused]] u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated)
void np_handler::get_score_friend(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, bool include_self, vm::ptr<SceNpScoreRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, [[maybe_unused]] u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::SCORE);
@ -1309,7 +1309,7 @@ namespace np
return handle_GetScoreResponse(req_id, reply_data);
}
void np_handler::get_score_npid(std::shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const std::vector<std::pair<SceNpId, s32>>& npid_vec, vm::ptr<SceNpScorePlayerRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, [[maybe_unused]] u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated)
void np_handler::get_score_npid(shared_ptr<score_transaction_ctx>& trans_ctx, SceNpScoreBoardId boardId, const std::vector<std::pair<SceNpId, s32>>& npid_vec, vm::ptr<SceNpScorePlayerRankData> rankArray, u32 rankArraySize, vm::ptr<SceNpScoreComment> commentArray, [[maybe_unused]] u32 commentArraySize, vm::ptr<void> infoArray, u32 infoArraySize, u32 arrayNum, vm::ptr<CellRtcTick> lastSortDate, vm::ptr<SceNpScoreRankNumber> totalRecord, bool async, bool deprecated)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::SCORE);
@ -1380,7 +1380,7 @@ namespace np
return false;
}
auto tus_trans = std::dynamic_pointer_cast<tus_transaction_ctx>(::at32(async_transactions, req_id));
auto tus_trans = idm::get_unlocked<tus_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(tus_trans);
std::lock_guard lock(tus_trans->mutex);
@ -1448,7 +1448,7 @@ namespace np
return false;
}
auto tus_trans = std::dynamic_pointer_cast<tus_transaction_ctx>(::at32(async_transactions, req_id));
auto tus_trans = idm::get_unlocked<tus_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(tus_trans);
std::lock_guard lock(tus_trans->mutex);
@ -1506,7 +1506,7 @@ namespace np
return false;
}
auto tus_trans = std::dynamic_pointer_cast<tus_transaction_ctx>(::at32(async_transactions, req_id));
auto tus_trans = idm::get_unlocked<tus_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(tus_trans);
std::lock_guard lock(tus_trans->mutex);
@ -1568,7 +1568,7 @@ namespace np
return true;
}
void np_handler::tus_set_multislot_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::cptr<s64> variableArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_set_multislot_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::cptr<s64> variableArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1582,7 +1582,7 @@ namespace np
return handle_tus_no_data(req_id, reply_data);
}
void np_handler::tus_get_multislot_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_get_multislot_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1601,7 +1601,7 @@ namespace np
return handle_TusVarResponse(req_id, reply_data);
}
void np_handler::tus_get_multiuser_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_get_multiuser_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusVariable> variableArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1620,7 +1620,7 @@ namespace np
return handle_TusVarResponse(req_id, reply_data);
}
void np_handler::tus_get_friends_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusVariable> variableArray,s32 arrayNum, bool async)
void np_handler::tus_get_friends_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusVariable> variableArray,s32 arrayNum, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1639,7 +1639,7 @@ namespace np
return handle_TusVarResponse(req_id, reply_data);
}
void np_handler::tus_add_and_get_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s64 inVariable, vm::ptr<SceNpTusVariable> outVariable, vm::ptr<SceNpTusAddAndGetVariableOptParam> option, bool vuser, bool async)
void np_handler::tus_add_and_get_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s64 inVariable, vm::ptr<SceNpTusVariable> outVariable, vm::ptr<SceNpTusAddAndGetVariableOptParam> option, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1657,7 +1657,7 @@ namespace np
return handle_TusVariable(req_id, reply_data);
}
void np_handler::tus_try_and_set_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s32 opeType, s64 variable, vm::ptr<SceNpTusVariable> resultVariable, vm::ptr<SceNpTusTryAndSetVariableOptParam> option, bool vuser, bool async)
void np_handler::tus_try_and_set_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, s32 opeType, s64 variable, vm::ptr<SceNpTusVariable> resultVariable, vm::ptr<SceNpTusTryAndSetVariableOptParam> option, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1675,7 +1675,7 @@ namespace np
return handle_TusVariable(req_id, reply_data);
}
void np_handler::tus_delete_multislot_variable(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_delete_multislot_variable(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1689,7 +1689,7 @@ namespace np
return handle_tus_no_data(req_id, reply_data);
}
void np_handler::tus_set_data(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, u32 totalSize, u32 sendSize, vm::cptr<void> data, vm::cptr<SceNpTusDataInfo> info, vm::ptr<SceNpTusSetDataOptParam> option, bool vuser, bool async)
void np_handler::tus_set_data(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, u32 totalSize, u32 sendSize, vm::cptr<void> data, vm::cptr<SceNpTusDataInfo> info, vm::ptr<SceNpTusSetDataOptParam> option, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
@ -1723,7 +1723,7 @@ namespace np
return handle_tus_no_data(req_id, reply_data);
}
void np_handler::tus_get_data(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> dataStatus, vm::ptr<void> data, u32 recvSize, bool vuser, bool async)
void np_handler::tus_get_data(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> dataStatus, vm::ptr<void> data, u32 recvSize, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
@ -1762,7 +1762,7 @@ namespace np
return false;
}
auto tus_trans = std::dynamic_pointer_cast<tus_transaction_ctx>(::at32(async_transactions, req_id));
auto tus_trans = idm::get_unlocked<tus_transaction_ctx>(::at32(async_transactions, req_id)->idm_id);
ensure(tus_trans);
std::lock_guard lock(tus_trans->mutex);
@ -1835,7 +1835,7 @@ namespace np
return true;
}
void np_handler::tus_get_multislot_data_status(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_get_multislot_data_status(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1854,7 +1854,7 @@ namespace np
return handle_TusDataStatusResponse(req_id, reply_data);
}
void np_handler::tus_get_multiuser_data_status(std::shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_get_multiuser_data_status(shared_ptr<tus_transaction_ctx>& trans_ctx, std::vector<SceNpOnlineId> targetNpIdArray, SceNpTusSlotId slotId, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1873,7 +1873,7 @@ namespace np
return handle_TusDataStatusResponse(req_id, reply_data);
}
void np_handler::tus_get_friends_data_status(std::shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool async)
void np_handler::tus_get_friends_data_status(shared_ptr<tus_transaction_ctx>& trans_ctx, SceNpTusSlotId slotId, s32 includeSelf, s32 sortType, vm::ptr<SceNpTusDataStatus> statusArray, s32 arrayNum, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);
@ -1892,7 +1892,7 @@ namespace np
return handle_TusDataStatusResponse(req_id, reply_data);
}
void np_handler::tus_delete_multislot_data(std::shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async)
void np_handler::tus_delete_multislot_data(shared_ptr<tus_transaction_ctx>& trans_ctx, const SceNpOnlineId& targetNpId, vm::cptr<SceNpTusSlotId> slotIdArray, s32 arrayNum, bool vuser, bool async)
{
std::unique_lock lock(trans_ctx->mutex);
const u32 req_id = get_req_id(REQUEST_ID_HIGH::TUS);

View file

@ -14,7 +14,7 @@ LOG_CHANNEL(rpcn_log, "rpcn");
namespace np
{
std::pair<error_code, std::shared_ptr<matching_ctx>> gui_prelude(u32 ctx_id, vm::ptr<SceNpMatchingGUIHandler> handler, vm::ptr<void> arg)
std::pair<error_code, shared_ptr<matching_ctx>> gui_prelude(u32 ctx_id, vm::ptr<SceNpMatchingGUIHandler> handler, vm::ptr<void> arg)
{
auto ctx = get_matching_context(ctx_id);
@ -33,7 +33,7 @@ namespace np
return {CELL_OK, ctx};
}
void gui_epilog(const std::shared_ptr<matching_ctx>& ctx)
void gui_epilog(const shared_ptr<matching_ctx>& ctx)
{
ensure(ctx->busy.compare_and_swap_test(1, 0), "Matching context wasn't busy in gui_epilog");
ctx->queue_gui_callback(SCE_NP_MATCHING_GUI_EVENT_COMMON_UNLOAD, 0);
@ -662,7 +662,7 @@ namespace np
if (ctx->wakey == 0)
{
// Verify that the context is still valid
if (!idm::check<matching_ctx>(ctx->ctx_id))
if (!idm::check_unlocked<matching_ctx>(ctx->ctx_id))
return;
rpcn_log.notice("QuickMatch timeout");

View file

@ -2775,7 +2775,7 @@ namespace rpcn
{
std::lock_guard lock(mutex_messages);
const u64 msg_id = message_counter++;
auto id_and_msg = std::make_shared<std::pair<std::string, message_data>>(std::make_pair(std::move(sender), std::move(mdata)));
auto id_and_msg = stx::make_shared<std::pair<std::string, message_data>>(std::make_pair(std::move(sender), std::move(mdata)));
messages.emplace(msg_id, id_and_msg);
new_messages.push_back(msg_id);
active_messages.insert(msg_id);
@ -2791,7 +2791,7 @@ namespace rpcn
}
}
std::optional<std::shared_ptr<std::pair<std::string, message_data>>> rpcn_client::get_message(u64 id)
std::optional<shared_ptr<std::pair<std::string, message_data>>> rpcn_client::get_message(u64 id)
{
{
std::lock_guard lock(mutex_messages);
@ -2803,9 +2803,9 @@ namespace rpcn
}
}
std::vector<std::pair<u64, std::shared_ptr<std::pair<std::string, message_data>>>> rpcn_client::get_messages_and_register_cb(SceNpBasicMessageMainType type_filter, bool include_bootable, message_cb_func cb_func, void* cb_param)
std::vector<std::pair<u64, shared_ptr<std::pair<std::string, message_data>>>> rpcn_client::get_messages_and_register_cb(SceNpBasicMessageMainType type_filter, bool include_bootable, message_cb_func cb_func, void* cb_param)
{
std::vector<std::pair<u64, std::shared_ptr<std::pair<std::string, message_data>>>> vec_messages;
std::vector<std::pair<u64, shared_ptr<std::pair<std::string, message_data>>>> vec_messages;
{
std::lock_guard lock(mutex_messages);
for (auto id : active_messages)

Some files were not shown because too many files have changed in this diff Show more