mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-20 19:45:20 +00:00
Merge branch 'master' into perf-fix
This commit is contained in:
commit
d0e61fa4a4
207 changed files with 4132 additions and 1696 deletions
|
@ -72,13 +72,13 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then
|
|||
cd qt-downloader
|
||||
git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597
|
||||
# nested Qt 6.8.1 URL workaround
|
||||
sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
|
||||
sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
|
||||
# sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
|
||||
# sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
|
||||
cd "/tmp/Qt"
|
||||
"$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml
|
||||
mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64"
|
||||
sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.1 workaround
|
||||
"$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats -o "$QT_VER/clang_64"
|
||||
# sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.1 workaround
|
||||
"$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64"
|
||||
fi
|
||||
|
||||
cd "$WORKDIR"
|
||||
|
|
|
@ -40,13 +40,13 @@ if [ ! -d "/tmp/Qt/$QT_VER" ]; then
|
|||
cd qt-downloader
|
||||
git checkout f52efee0f18668c6d6de2dec0234b8c4bc54c597
|
||||
# nested Qt 6.8.1 URL workaround
|
||||
sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
|
||||
sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
|
||||
# sed -i '' "s/'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/'qt{0}_{0}{1}{2}'.format(major, minor, patch), 'qt{0}_{0}{1}{2}'.format(major, minor, patch)]))/g" qt-downloader
|
||||
# sed -i '' "s/'{}\/{}\/qt{}_{}\/'/'{0}\/{1}\/qt{2}_{3}\/qt{2}_{3}\/'/g" qt-downloader
|
||||
cd "/tmp/Qt"
|
||||
"$BREW_X64_PATH/bin/pipenv" run pip3 install py7zr requests semantic_version lxml
|
||||
mkdir -p "$QT_VER/macos" ; ln -s "macos" "$QT_VER/clang_64"
|
||||
sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.1 workaround
|
||||
"$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats -o "$QT_VER/clang_64"
|
||||
# sed -i '' 's/args\.version \/ derive_toolchain_dir(args) \/ //g' "$WORKDIR/qt-downloader/qt-downloader" # Qt 6.8.1 workaround
|
||||
"$BREW_X64_PATH/bin/pipenv" run "$WORKDIR/qt-downloader/qt-downloader" macos desktop "$QT_VER" clang_64 --opensource --addons qtmultimedia qtimageformats # -o "$QT_VER/clang_64"
|
||||
fi
|
||||
|
||||
cd "$WORKDIR"
|
||||
|
|
2
3rdparty/7zip/7zip
vendored
2
3rdparty/7zip/7zip
vendored
|
@ -1 +1 @@
|
|||
Subproject commit e008ce3976c087bfd21344af8f00a23cf69d4174
|
||||
Subproject commit e5431fa6f5505e385c6f9367260717e9c47dc2ee
|
2
3rdparty/FAudio
vendored
2
3rdparty/FAudio
vendored
|
@ -1 +1 @@
|
|||
Subproject commit 74d45e615c2e7510c7e0f2ccb91dc6d7ccae4bec
|
||||
Subproject commit b7c2e109ea86b82109244c9c4569ce9ad0c884df
|
2
3rdparty/OpenAL/openal-soft
vendored
2
3rdparty/OpenAL/openal-soft
vendored
|
@ -1 +1 @@
|
|||
Subproject commit d3875f333fb6abe2f39d82caca329414871ae53b
|
||||
Subproject commit 90191edd20bb877c5cbddfdac7ec0fe49ad93727
|
2
3rdparty/curl/curl
vendored
2
3rdparty/curl/curl
vendored
|
@ -1 +1 @@
|
|||
Subproject commit b1ef0e1a01c0bb6ee5367bd9c186a603bde3615a
|
||||
Subproject commit 75a2079d5c28debb2eaa848ca9430f1fe0d7844c
|
2
3rdparty/libsdl-org/SDL
vendored
2
3rdparty/libsdl-org/SDL
vendored
|
@ -1 +1 @@
|
|||
Subproject commit c98c4fbff6d8f3016a3ce6685bf8f43433c3efcc
|
||||
Subproject commit 9c821dc21ccbd69b2bda421fdb35cb4ae2da8f5e
|
|
@ -566,9 +566,41 @@ void fmt_class_string<std::source_location>::format(std::string& out, u64 arg)
|
|||
fmt::append(out, "\n(in file %s", loc.file_name());
|
||||
}
|
||||
|
||||
if (auto func = loc.function_name(); func && func[0])
|
||||
if (std::string_view full_func{loc.function_name() ? loc.function_name() : ""}; !full_func.empty())
|
||||
{
|
||||
fmt::append(out, ", in function %s)", func);
|
||||
// Remove useless disambiguators
|
||||
std::string func = fmt::replace_all(std::string(full_func), {
|
||||
{"struct ", ""},
|
||||
{"class ", ""},
|
||||
{"enum ", ""},
|
||||
{"typename ", ""},
|
||||
#ifdef _MSC_VER
|
||||
{"__cdecl ", ""},
|
||||
#endif
|
||||
{"unsigned long long", "ullong"},
|
||||
//{"unsigned long", "ulong"}, // ullong
|
||||
{"unsigned int", "uint"},
|
||||
{"unsigned short", "ushort"},
|
||||
{"unsigned char", "uchar"}});
|
||||
|
||||
// Remove function argument signature for long names
|
||||
for (usz index = func.find_first_of('('); index != umax && func.size() >= 100u; index = func.find_first_of('(', index))
|
||||
{
|
||||
// Operator() function
|
||||
if (func.compare(0, 3, "()("sv) == 0 || func.compare(0, 3, "() "sv))
|
||||
{
|
||||
if (usz not_space = func.find_first_not_of(' ', index + 2); not_space != umax && func[not_space] == '(')
|
||||
{
|
||||
index += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
func = func.substr(0, index) + "()";
|
||||
break;
|
||||
}
|
||||
|
||||
fmt::append(out, ", in function '%s')", func);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
|
|
@ -1360,7 +1360,7 @@ bool handle_access_violation(u32 addr, bool is_writing, ucontext_t* context) noe
|
|||
// check if address is RawSPU MMIO register
|
||||
do if (addr - RAW_SPU_BASE_ADDR < (6 * RAW_SPU_OFFSET) && (addr % RAW_SPU_OFFSET) >= RAW_SPU_PROB_OFFSET)
|
||||
{
|
||||
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu((addr - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -1548,7 +1548,7 @@ bool handle_access_violation(u32 addr, bool is_writing, ucontext_t* context) noe
|
|||
}
|
||||
}
|
||||
|
||||
if (auto pf_port = idm::get<lv2_obj, lv2_event_port>(pf_port_id); pf_port && pf_port->queue)
|
||||
if (auto pf_port = idm::get_unlocked<lv2_obj, lv2_event_port>(pf_port_id); pf_port && pf_port->queue)
|
||||
{
|
||||
// We notify the game that a page fault occurred so it can rectify it.
|
||||
// Note, for data3, were the memory readable AND we got a page fault, it must be due to a write violation since reads are allowed.
|
||||
|
@ -2555,13 +2555,13 @@ std::string thread_ctrl::get_name_cached()
|
|||
return *name_cache;
|
||||
}
|
||||
|
||||
thread_base::thread_base(native_entry entry, std::string name)
|
||||
thread_base::thread_base(native_entry entry, std::string name) noexcept
|
||||
: entry_point(entry)
|
||||
, m_tname(make_single_value(std::move(name)))
|
||||
{
|
||||
}
|
||||
|
||||
thread_base::~thread_base()
|
||||
thread_base::~thread_base() noexcept
|
||||
{
|
||||
// Cleanup abandoned tasks: initialize default results and signal
|
||||
this->exec();
|
||||
|
@ -2602,7 +2602,7 @@ bool thread_base::join(bool dtor) const
|
|||
|
||||
if (i >= 16 && !(i & (i - 1)) && timeout != atomic_wait_timeout::inf)
|
||||
{
|
||||
sig_log.error(u8"Thread [%s] is too sleepy. Waiting for it %.3fµs already!", *m_tname.load(), (utils::get_tsc() - stamp0) / (utils::get_tsc_freq() / 1000000.));
|
||||
sig_log.error("Thread [%s] is too sleepy. Waiting for it %.3fus already!", *m_tname.load(), (utils::get_tsc() - stamp0) / (utils::get_tsc_freq() / 1000000.));
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -172,9 +172,9 @@ private:
|
|||
friend class named_thread;
|
||||
|
||||
protected:
|
||||
thread_base(native_entry, std::string name);
|
||||
thread_base(native_entry, std::string name) noexcept;
|
||||
|
||||
~thread_base();
|
||||
~thread_base() noexcept;
|
||||
|
||||
public:
|
||||
// Get CPU cycles since last time this function was called. First call returns 0.
|
||||
|
@ -351,7 +351,7 @@ public:
|
|||
// Sets the native thread priority and returns it to zero at destructor
|
||||
struct scoped_priority
|
||||
{
|
||||
explicit scoped_priority(int prio)
|
||||
explicit scoped_priority(int prio) noexcept
|
||||
{
|
||||
set_native_priority(prio);
|
||||
}
|
||||
|
@ -360,7 +360,7 @@ public:
|
|||
|
||||
scoped_priority& operator=(const scoped_priority&) = delete;
|
||||
|
||||
~scoped_priority()
|
||||
~scoped_priority() noexcept
|
||||
{
|
||||
set_native_priority(0);
|
||||
}
|
||||
|
@ -388,7 +388,7 @@ class thread_future_t : public thread_future, result_storage<Ctx, std::condition
|
|||
using future = thread_future_t;
|
||||
|
||||
public:
|
||||
thread_future_t(Ctx&& func, Args&&... args)
|
||||
thread_future_t(Ctx&& func, Args&&... args) noexcept
|
||||
: m_args(std::forward<Args>(args)...)
|
||||
, m_func(std::forward<Ctx>(func))
|
||||
{
|
||||
|
@ -417,7 +417,7 @@ public:
|
|||
};
|
||||
}
|
||||
|
||||
~thread_future_t()
|
||||
~thread_future_t() noexcept
|
||||
{
|
||||
if constexpr (!future::empty && !Discard)
|
||||
{
|
||||
|
@ -570,7 +570,7 @@ public:
|
|||
named_thread& operator=(const named_thread&) = delete;
|
||||
|
||||
// Wait for the completion and access result (if not void)
|
||||
[[nodiscard]] decltype(auto) operator()()
|
||||
[[nodiscard]] decltype(auto) operator()() noexcept
|
||||
{
|
||||
thread::join();
|
||||
|
||||
|
@ -581,7 +581,7 @@ public:
|
|||
}
|
||||
|
||||
// Wait for the completion and access result (if not void)
|
||||
[[nodiscard]] decltype(auto) operator()() const
|
||||
[[nodiscard]] decltype(auto) operator()() const noexcept
|
||||
{
|
||||
thread::join();
|
||||
|
||||
|
@ -593,7 +593,7 @@ public:
|
|||
|
||||
// Send command to the thread to invoke directly (references should be passed via std::ref())
|
||||
template <bool Discard = true, typename Arg, typename... Args>
|
||||
auto operator()(Arg&& arg, Args&&... args)
|
||||
auto operator()(Arg&& arg, Args&&... args) noexcept
|
||||
{
|
||||
// Overloaded operator() of the Context.
|
||||
constexpr bool v1 = std::is_invocable_v<Context, Arg&&, Args&&...>;
|
||||
|
@ -667,12 +667,12 @@ public:
|
|||
}
|
||||
|
||||
// Access thread state
|
||||
operator thread_state() const
|
||||
operator thread_state() const noexcept
|
||||
{
|
||||
return static_cast<thread_state>(thread::m_sync.load() & 3);
|
||||
}
|
||||
|
||||
named_thread& operator=(thread_state s)
|
||||
named_thread& operator=(thread_state s) noexcept
|
||||
{
|
||||
if (s == thread_state::created)
|
||||
{
|
||||
|
@ -693,7 +693,7 @@ public:
|
|||
|
||||
if constexpr (std::is_assignable_v<Context&, thread_state>)
|
||||
{
|
||||
static_cast<Context&>(*this) = s;
|
||||
static_cast<Context&>(*this) = thread_state::aborting;
|
||||
}
|
||||
|
||||
if (notify_sync)
|
||||
|
@ -706,13 +706,18 @@ public:
|
|||
{
|
||||
// This participates in emulation stopping, use destruction-alike semantics
|
||||
thread::join(true);
|
||||
|
||||
if constexpr (std::is_assignable_v<Context&, thread_state>)
|
||||
{
|
||||
static_cast<Context&>(*this) = thread_state::finished;
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
// Context type doesn't need virtual destructor
|
||||
~named_thread()
|
||||
~named_thread() noexcept
|
||||
{
|
||||
// Assign aborting state forcefully and join thread
|
||||
operator=(thread_state::finished);
|
||||
|
|
|
@ -121,7 +121,7 @@ public:
|
|||
|
||||
void unlock_hle()
|
||||
{
|
||||
const u32 value = atomic_storage<u32>::fetch_add_hle_rel(m_value.raw(), 0u - c_one);
|
||||
const u32 value = atomic_storage<u32>::fetch_add_hle_rel(m_value.raw(), ~c_one + 1);
|
||||
|
||||
if (value != c_one) [[unlikely]]
|
||||
{
|
||||
|
|
|
@ -132,7 +132,7 @@ jobs:
|
|||
UPLOAD_REPO_FULL_NAME: "RPCS3/rpcs3-binaries-mac"
|
||||
RELEASE_MESSAGE: "../GitHubReleaseMessage.txt"
|
||||
ARTDIR: $(Build.ArtifactStagingDirectory)
|
||||
QT_VER: '6.8.1'
|
||||
QT_VER: '6.7.3'
|
||||
QT_VER_MAIN: '6'
|
||||
LLVM_COMPILER_VER: '16'
|
||||
|
||||
|
@ -193,7 +193,7 @@ jobs:
|
|||
UPLOAD_REPO_FULL_NAME: "RPCS3/rpcs3-binaries-mac-arm64"
|
||||
RELEASE_MESSAGE: "../GitHubReleaseMessage.txt"
|
||||
ARTDIR: $(Build.ArtifactStagingDirectory)
|
||||
QT_VER: '6.8.1'
|
||||
QT_VER: '6.7.3'
|
||||
QT_VER_MAIN: '6'
|
||||
LLVM_COMPILER_VER: '16'
|
||||
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#pragma GCC diagnostic push
|
||||
#pragma GCC diagnostic ignored "-Wold-style-cast"
|
||||
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
|
||||
#pragma GCC diagnostic ignored "-Wmissing-declarations"
|
||||
#import <Foundation/Foundation.h>
|
||||
#pragma GCC diagnostic pop
|
||||
|
||||
|
|
|
@ -506,6 +506,7 @@ target_sources(rpcs3_emu PRIVATE
|
|||
RSX/GL/OpenGL.cpp
|
||||
RSX/GL/upscalers/fsr1/fsr_pass.cpp
|
||||
RSX/GSRender.cpp
|
||||
RSX/Host/MM.cpp
|
||||
RSX/Host/RSXDMAWriter.cpp
|
||||
RSX/Null/NullGSRender.cpp
|
||||
RSX/NV47/FW/draw_call.cpp
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
#include <string>
|
||||
#include "Emu/CPU/CPUThread.h"
|
||||
#include "Utilities/StrFmt.h"
|
||||
|
||||
enum class cpu_disasm_mode
|
||||
|
@ -22,7 +23,7 @@ protected:
|
|||
const u8* m_offset{};
|
||||
const u32 m_start_pc;
|
||||
std::add_pointer_t<const cpu_thread> m_cpu{};
|
||||
std::shared_ptr<cpu_thread> m_cpu_handle;
|
||||
shared_ptr<cpu_thread> m_cpu_handle;
|
||||
u32 m_op = 0;
|
||||
|
||||
void format_by_mode()
|
||||
|
@ -81,7 +82,7 @@ public:
|
|||
return const_cast<cpu_thread*>(m_cpu);
|
||||
}
|
||||
|
||||
void set_cpu_handle(std::shared_ptr<cpu_thread> cpu)
|
||||
void set_cpu_handle(shared_ptr<cpu_thread> cpu)
|
||||
{
|
||||
m_cpu_handle = std::move(cpu);
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ void fmt_class_string<cpu_threads_emulation_info_dump_t>::format(std::string& ou
|
|||
const u32 must_have_cpu_id = static_cast<u32>(arg);
|
||||
|
||||
// Dump main_thread
|
||||
const auto main_ppu = idm::get<named_thread<ppu_thread>>(ppu_thread::id_base);
|
||||
const auto main_ppu = idm::get_unlocked<named_thread<ppu_thread>>(ppu_thread::id_base);
|
||||
|
||||
if (main_ppu)
|
||||
{
|
||||
|
@ -99,7 +99,7 @@ void fmt_class_string<cpu_threads_emulation_info_dump_t>::format(std::string& ou
|
|||
{
|
||||
if (must_have_cpu_id != ppu_thread::id_base)
|
||||
{
|
||||
const auto selected_ppu = idm::get<named_thread<ppu_thread>>(must_have_cpu_id);
|
||||
const auto selected_ppu = idm::get_unlocked<named_thread<ppu_thread>>(must_have_cpu_id);
|
||||
|
||||
if (selected_ppu)
|
||||
{
|
||||
|
@ -110,7 +110,7 @@ void fmt_class_string<cpu_threads_emulation_info_dump_t>::format(std::string& ou
|
|||
}
|
||||
else if (must_have_cpu_id >> 24 == spu_thread::id_base >> 24)
|
||||
{
|
||||
const auto selected_spu = idm::get<named_thread<spu_thread>>(must_have_cpu_id);
|
||||
const auto selected_spu = idm::get_unlocked<named_thread<spu_thread>>(must_have_cpu_id);
|
||||
|
||||
if (selected_spu)
|
||||
{
|
||||
|
@ -236,7 +236,7 @@ struct cpu_prof
|
|||
}
|
||||
|
||||
// Print info
|
||||
void print(const std::shared_ptr<cpu_thread>& ptr)
|
||||
void print(const shared_ptr<cpu_thread>& ptr)
|
||||
{
|
||||
if (new_samples < min_print_samples || samples == idle)
|
||||
{
|
||||
|
@ -263,7 +263,7 @@ struct cpu_prof
|
|||
new_samples = 0;
|
||||
}
|
||||
|
||||
static void print_all(std::unordered_map<std::shared_ptr<cpu_thread>, sample_info>& threads, sample_info& all_info)
|
||||
static void print_all(std::unordered_map<shared_ptr<cpu_thread>, sample_info>& threads, sample_info& all_info)
|
||||
{
|
||||
u64 new_samples = 0;
|
||||
|
||||
|
@ -319,7 +319,7 @@ struct cpu_prof
|
|||
|
||||
void operator()()
|
||||
{
|
||||
std::unordered_map<std::shared_ptr<cpu_thread>, sample_info> threads;
|
||||
std::unordered_map<shared_ptr<cpu_thread>, sample_info> threads;
|
||||
|
||||
while (thread_ctrl::state() != thread_state::aborting)
|
||||
{
|
||||
|
@ -335,15 +335,15 @@ struct cpu_prof
|
|||
continue;
|
||||
}
|
||||
|
||||
std::shared_ptr<cpu_thread> ptr;
|
||||
shared_ptr<cpu_thread> ptr;
|
||||
|
||||
if (id >> 24 == 1)
|
||||
{
|
||||
ptr = idm::get<named_thread<ppu_thread>>(id);
|
||||
ptr = idm::get_unlocked<named_thread<ppu_thread>>(id);
|
||||
}
|
||||
else if (id >> 24 == 2)
|
||||
{
|
||||
ptr = idm::get<named_thread<spu_thread>>(id);
|
||||
ptr = idm::get_unlocked<named_thread<spu_thread>>(id);
|
||||
}
|
||||
else
|
||||
{
|
||||
|
@ -437,7 +437,7 @@ struct cpu_prof
|
|||
continue;
|
||||
}
|
||||
|
||||
// Wait, roughly for 20µs
|
||||
// Wait, roughly for 20us
|
||||
thread_ctrl::wait_for(20, false);
|
||||
}
|
||||
|
||||
|
@ -1302,7 +1302,7 @@ cpu_thread* cpu_thread::get_next_cpu()
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUDisAsm> make_disasm(const cpu_thread* cpu, std::shared_ptr<cpu_thread> handle);
|
||||
std::shared_ptr<CPUDisAsm> make_disasm(const cpu_thread* cpu, shared_ptr<cpu_thread> handle);
|
||||
|
||||
void cpu_thread::dump_all(std::string& ret) const
|
||||
{
|
||||
|
@ -1318,7 +1318,7 @@ void cpu_thread::dump_all(std::string& ret) const
|
|||
if (u32 cur_pc = get_pc(); cur_pc != umax)
|
||||
{
|
||||
// Dump a snippet of currently executed code (may be unreliable with non-static-interpreter decoders)
|
||||
auto disasm = make_disasm(this, nullptr);
|
||||
auto disasm = make_disasm(this, null_ptr);
|
||||
|
||||
const auto rsx = try_get<rsx::thread>();
|
||||
|
||||
|
@ -1558,14 +1558,14 @@ u32 CPUDisAsm::DisAsmBranchTarget(s32 /*imm*/)
|
|||
return 0;
|
||||
}
|
||||
|
||||
extern bool try_lock_spu_threads_in_a_state_compatible_with_savestates(bool revert_lock, std::vector<std::pair<std::shared_ptr<named_thread<spu_thread>>, u32>>* out_list)
|
||||
extern bool try_lock_spu_threads_in_a_state_compatible_with_savestates(bool revert_lock, std::vector<std::pair<shared_ptr<named_thread<spu_thread>>, u32>>* out_list)
|
||||
{
|
||||
if (out_list)
|
||||
{
|
||||
out_list->clear();
|
||||
}
|
||||
|
||||
auto get_spus = [old_counter = u64{umax}, spu_list = std::vector<std::shared_ptr<named_thread<spu_thread>>>()](bool can_collect, bool force_collect) mutable
|
||||
auto get_spus = [old_counter = u64{umax}, spu_list = std::vector<shared_ptr<named_thread<spu_thread>>>()](bool can_collect, bool force_collect) mutable
|
||||
{
|
||||
const u64 new_counter = cpu_thread::g_threads_created + cpu_thread::g_threads_deleted;
|
||||
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -253,7 +253,7 @@ enum CellAdecSampleRate : s32
|
|||
CELL_ADEC_FS_8kHz,
|
||||
};
|
||||
|
||||
enum CellAdecBitLength : s32
|
||||
enum CellAdecBitLength : u32
|
||||
{
|
||||
CELL_ADEC_BIT_LENGTH_RESERVED1,
|
||||
CELL_ADEC_BIT_LENGTH_16,
|
||||
|
@ -352,8 +352,8 @@ enum AdecCorrectPtsValueType : s8
|
|||
ADEC_CORRECT_PTS_VALUE_TYPE_UNSPECIFIED = -1,
|
||||
|
||||
// Adds a fixed amount
|
||||
ADEC_CORRECT_PTS_VALUE_TYPE_LPCM = 0,
|
||||
// 1
|
||||
ADEC_CORRECT_PTS_VALUE_TYPE_LPCM_HDMV = 0,
|
||||
ADEC_CORRECT_PTS_VALUE_TYPE_LPCM_DVD = 1, // Unused for some reason, the DVD player probably takes care of timestamps itself
|
||||
ADEC_CORRECT_PTS_VALUE_TYPE_ATRACX_48000Hz = 2,
|
||||
ADEC_CORRECT_PTS_VALUE_TYPE_ATRACX_44100Hz = 3,
|
||||
ADEC_CORRECT_PTS_VALUE_TYPE_ATRACX_32000Hz = 4,
|
||||
|
@ -562,6 +562,11 @@ public:
|
|||
{
|
||||
ensure(sys_mutex_lock(ppu, mutex, 0) == CELL_OK); // Error code isn't checked on LLE
|
||||
|
||||
if (ppu.state & cpu_flag::again) // Savestate was created while waiting on the mutex
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
if (entries[front].state == 0xff)
|
||||
{
|
||||
ensure(sys_mutex_unlock(ppu, mutex) == CELL_OK); // Error code isn't checked on LLE
|
||||
|
@ -648,6 +653,20 @@ static_assert(std::is_standard_layout_v<AdecContext> && std::is_trivial_v<AdecCo
|
|||
CHECK_SIZE_ALIGN(AdecContext, 0x530, 8);
|
||||
|
||||
|
||||
enum : u32
|
||||
{
|
||||
CELL_ADEC_LPCM_DVD_CH_RESERVED1,
|
||||
CELL_ADEC_LPCM_DVD_CH_MONO,
|
||||
CELL_ADEC_LPCM_DVD_CH_RESERVED2,
|
||||
CELL_ADEC_LPCM_DVD_CH_STEREO,
|
||||
CELL_ADEC_LPCM_DVD_CH_UNK1, // Either 3 front or 2 front + 1 surround
|
||||
CELL_ADEC_LPCM_DVD_CH_UNK2, // Either 3 front + 1 surround or 2 front + 2 surround
|
||||
CELL_ADEC_LPCM_DVD_CH_3_2,
|
||||
CELL_ADEC_LPCM_DVD_CH_3_2_LFE,
|
||||
CELL_ADEC_LPCM_DVD_CH_3_4,
|
||||
CELL_ADEC_LPCM_DVD_CH_3_4_LFE,
|
||||
};
|
||||
|
||||
struct CellAdecParamLpcm
|
||||
{
|
||||
be_t<u32> channelNumber;
|
||||
|
@ -664,6 +683,216 @@ struct CellAdecLpcmInfo
|
|||
be_t<u32> outputDataSize;
|
||||
};
|
||||
|
||||
// HLE exclusive, for savestates
|
||||
enum class lpcm_dec_state : u8
|
||||
{
|
||||
waiting_for_cmd_mutex_lock,
|
||||
waiting_for_cmd_cond_wait,
|
||||
waiting_for_output_mutex_lock,
|
||||
waiting_for_output_cond_wait,
|
||||
queue_mutex_lock,
|
||||
executing_cmd
|
||||
};
|
||||
|
||||
class LpcmDecSemaphore
|
||||
{
|
||||
be_t<u32> value;
|
||||
be_t<u32> mutex; // sys_mutex_t
|
||||
be_t<u32> cond; // sys_cond_t
|
||||
|
||||
public:
|
||||
error_code init(ppu_thread& ppu, vm::ptr<LpcmDecSemaphore> _this, u32 initial_value)
|
||||
{
|
||||
value = initial_value;
|
||||
|
||||
const vm::var<sys_mutex_attribute_t> mutex_attr{{ SYS_SYNC_PRIORITY, SYS_SYNC_NOT_RECURSIVE, SYS_SYNC_NOT_PROCESS_SHARED, SYS_SYNC_NOT_ADAPTIVE, 0, 0, 0, { "_adem01"_u64 } }};
|
||||
const vm::var<sys_cond_attribute_t> cond_attr{{ SYS_SYNC_NOT_PROCESS_SHARED, 0, 0, { "_adec01"_u64 } }};
|
||||
|
||||
if (error_code ret = sys_mutex_create(ppu, _this.ptr(&LpcmDecSemaphore::mutex), mutex_attr); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
return sys_cond_create(ppu, _this.ptr(&LpcmDecSemaphore::cond), mutex, cond_attr);
|
||||
}
|
||||
|
||||
error_code finalize(ppu_thread& ppu) const
|
||||
{
|
||||
if (error_code ret = sys_cond_destroy(ppu, cond); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
return sys_mutex_destroy(ppu, mutex);
|
||||
}
|
||||
|
||||
error_code release(ppu_thread& ppu)
|
||||
{
|
||||
if (error_code ret = sys_mutex_lock(ppu, mutex, 0); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
value++;
|
||||
|
||||
if (error_code ret = sys_cond_signal(ppu, cond); ret != CELL_OK)
|
||||
{
|
||||
return ret; // LLE doesn't unlock the mutex
|
||||
}
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex);
|
||||
}
|
||||
|
||||
error_code acquire(ppu_thread& ppu, lpcm_dec_state& savestate)
|
||||
{
|
||||
if (savestate == lpcm_dec_state::waiting_for_cmd_cond_wait)
|
||||
{
|
||||
goto cond_wait;
|
||||
}
|
||||
|
||||
savestate = lpcm_dec_state::waiting_for_cmd_mutex_lock;
|
||||
|
||||
if (error_code ret = sys_mutex_lock(ppu, mutex, 0); ret != CELL_OK)
|
||||
{
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (ppu.state & cpu_flag::again)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
|
||||
if (value == 0u)
|
||||
{
|
||||
savestate = lpcm_dec_state::waiting_for_cmd_cond_wait;
|
||||
cond_wait:
|
||||
|
||||
if (error_code ret = sys_cond_wait(ppu, cond, 0); ret != CELL_OK)
|
||||
{
|
||||
return ret; // LLE doesn't unlock the mutex
|
||||
}
|
||||
|
||||
if (ppu.state & cpu_flag::again)
|
||||
{
|
||||
return {};
|
||||
}
|
||||
}
|
||||
|
||||
value--;
|
||||
|
||||
return sys_mutex_unlock(ppu, mutex);
|
||||
}
|
||||
};
|
||||
|
||||
CHECK_SIZE(LpcmDecSemaphore, 0xc);
|
||||
|
||||
enum class LpcmDecCmdType : u32
|
||||
{
|
||||
start_seq,
|
||||
end_seq,
|
||||
decode_au,
|
||||
close
|
||||
};
|
||||
|
||||
struct LpcmDecCmd
|
||||
{
|
||||
be_t<s32> pcm_handle;
|
||||
vm::bcptr<void> au_start_addr;
|
||||
be_t<u32> au_size;
|
||||
u32 reserved1[2];
|
||||
CellAdecParamLpcm lpcm_param;
|
||||
be_t<LpcmDecCmdType> type;
|
||||
u32 reserved2;
|
||||
|
||||
LpcmDecCmd() = default; // cellAdecOpen()
|
||||
|
||||
LpcmDecCmd(LpcmDecCmdType&& type) // End sequence
|
||||
: type(type)
|
||||
{
|
||||
}
|
||||
|
||||
LpcmDecCmd(LpcmDecCmdType&& type, const CellAdecParamLpcm& lpcm_param) // Start sequence
|
||||
: lpcm_param(lpcm_param), type(type)
|
||||
{
|
||||
}
|
||||
|
||||
LpcmDecCmd(LpcmDecCmdType&& type, const s32& pcm_handle, const CellAdecAuInfo& au_info) // Decode au
|
||||
: pcm_handle(pcm_handle), au_start_addr(au_info.startAddr), au_size(au_info.size), type(type)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
CHECK_SIZE(LpcmDecCmd, 0x2c);
|
||||
|
||||
struct LpcmDecContext
|
||||
{
|
||||
AdecCmdQueue<LpcmDecCmd> cmd_queue;
|
||||
|
||||
be_t<u64> thread_id; // sys_ppu_thread_t
|
||||
|
||||
be_t<u32> queue_size_mutex; // sys_mutex_t
|
||||
be_t<u32> queue_size_cond; // sys_cond_t, unused
|
||||
be_t<u32> unk_mutex; // sys_mutex_t, unused
|
||||
be_t<u32> unk_cond; // sys_cond_t, unused
|
||||
|
||||
be_t<u32> run_thread;
|
||||
|
||||
AdecCb<AdecNotifyAuDone> notify_au_done;
|
||||
AdecCb<AdecNotifyPcmOut> notify_pcm_out;
|
||||
AdecCb<AdecNotifyError> notify_error;
|
||||
AdecCb<AdecNotifySeqDone> notify_seq_done;
|
||||
|
||||
be_t<u32> output_locked;
|
||||
vm::bptr<f32> output;
|
||||
|
||||
vm::bptr<CellAdecParamLpcm> lpcm_param;
|
||||
|
||||
vm::bcptr<void> spurs_cmd_data;
|
||||
|
||||
// HLE exclusive
|
||||
lpcm_dec_state savestate;
|
||||
u64 cmd_counter; // For debugging
|
||||
|
||||
u8 reserved1[24]; // 36 bytes on LLE
|
||||
|
||||
be_t<u32> output_mutex; // sys_mutex_t
|
||||
be_t<u32> output_consumed; // sys_cond_t
|
||||
|
||||
LpcmDecSemaphore cmd_available;
|
||||
LpcmDecSemaphore reserved2; // Unused
|
||||
|
||||
be_t<u32> queue_mutex; // sys_mutex_t
|
||||
|
||||
be_t<u32> error_occurred;
|
||||
|
||||
u8 spurs_stuff[32];
|
||||
|
||||
be_t<u32> spurs_queue_pop_mutex;
|
||||
be_t<u32> spurs_queue_push_mutex;
|
||||
|
||||
be_t<u32> using_existing_spurs_instance;
|
||||
|
||||
be_t<u32> dvd_packing;
|
||||
|
||||
be_t<u32> output_size;
|
||||
|
||||
LpcmDecCmd cmd; // HLE exclusive, name of Spurs taskset (32 bytes) + CellSpursTaskLsPattern on LLE
|
||||
|
||||
u8 more_spurs_stuff[10]; // 52 bytes on LLE
|
||||
|
||||
void exec(ppu_thread& ppu);
|
||||
|
||||
template <LpcmDecCmdType type>
|
||||
error_code send_command(ppu_thread& ppu, auto&&... args);
|
||||
|
||||
inline error_code release_output(ppu_thread& ppu);
|
||||
};
|
||||
|
||||
static_assert(std::is_standard_layout_v<LpcmDecContext>);
|
||||
CHECK_SIZE_ALIGN(LpcmDecContext, 0x1c8, 8);
|
||||
|
||||
constexpr s32 LPCM_DEC_OUTPUT_BUFFER_SIZE = 0x40000;
|
||||
|
||||
// CELP Excitation Mode
|
||||
enum CELP_ExcitationMode : s32
|
||||
{
|
||||
|
|
|
@ -556,7 +556,7 @@ void cell_audio_thread::advance(u64 timestamp)
|
|||
m_dynamic_period = 0;
|
||||
|
||||
// send aftermix event (normal audio event)
|
||||
std::array<std::shared_ptr<lv2_event_queue>, MAX_AUDIO_EVENT_QUEUES> queues;
|
||||
std::array<shared_ptr<lv2_event_queue>, MAX_AUDIO_EVENT_QUEUES> queues;
|
||||
u32 queue_count = 0;
|
||||
|
||||
event_period++;
|
||||
|
|
|
@ -400,7 +400,7 @@ public:
|
|||
u32 flags = 0; // iFlags
|
||||
u64 source = 0; // Event source
|
||||
u64 ack_timestamp = 0; // timestamp of last call of cellAudioSendAck
|
||||
std::shared_ptr<lv2_event_queue> port{}; // Underlying event port
|
||||
shared_ptr<lv2_event_queue> port{}; // Underlying event port
|
||||
};
|
||||
|
||||
std::vector<key_info> keys{};
|
||||
|
|
|
@ -784,26 +784,26 @@ s32 cellCameraIsAttached(s32 dev_num)
|
|||
|
||||
if (g_cfg.io.camera == camera_handler::null)
|
||||
{
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
auto& g_camera = g_fxo->get<camera_thread>();
|
||||
|
||||
if (!g_camera.init)
|
||||
{
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (!check_dev_num(dev_num))
|
||||
{
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
vm::var<s32> type;
|
||||
|
||||
if (cellCameraGetType(dev_num, type) != CELL_OK)
|
||||
{
|
||||
return false;
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::lock_guard lock(g_camera.mutex);
|
||||
|
@ -821,12 +821,12 @@ s32 cellCameraIsAttached(s32 dev_num)
|
|||
}
|
||||
}
|
||||
|
||||
return is_attached;
|
||||
return is_attached ? 1 : 0;
|
||||
}
|
||||
|
||||
s32 cellCameraIsOpen(s32 dev_num)
|
||||
{
|
||||
cellCamera.notice("cellCameraIsOpen(dev_num=%d)", dev_num);
|
||||
cellCamera.trace("cellCameraIsOpen(dev_num=%d)", dev_num);
|
||||
|
||||
if (g_cfg.io.camera == camera_handler::null)
|
||||
{
|
||||
|
@ -852,7 +852,7 @@ s32 cellCameraIsOpen(s32 dev_num)
|
|||
|
||||
s32 cellCameraIsStarted(s32 dev_num)
|
||||
{
|
||||
cellCamera.notice("cellCameraIsStarted(dev_num=%d)", dev_num);
|
||||
cellCamera.trace("cellCameraIsStarted(dev_num=%d)", dev_num);
|
||||
|
||||
if (g_cfg.io.camera == camera_handler::null)
|
||||
{
|
||||
|
|
|
@ -1031,7 +1031,7 @@ error_code cellDmuxClose(u32 handle)
|
|||
{
|
||||
cellDmux.warning("cellDmuxClose(handle=0x%x)", handle);
|
||||
|
||||
const auto dmux = idm::get<Demuxer>(handle);
|
||||
const auto dmux = idm::get_unlocked<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -1060,7 +1060,7 @@ error_code cellDmuxSetStream(u32 handle, u32 streamAddress, u32 streamSize, b8 d
|
|||
{
|
||||
cellDmux.trace("cellDmuxSetStream(handle=0x%x, streamAddress=0x%x, streamSize=%d, discontinuity=%d, userData=0x%llx)", handle, streamAddress, streamSize, discontinuity, userData);
|
||||
|
||||
const auto dmux = idm::get<Demuxer>(handle);
|
||||
const auto dmux = idm::get_unlocked<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -1088,7 +1088,7 @@ error_code cellDmuxResetStream(u32 handle)
|
|||
{
|
||||
cellDmux.warning("cellDmuxResetStream(handle=0x%x)", handle);
|
||||
|
||||
const auto dmux = idm::get<Demuxer>(handle);
|
||||
const auto dmux = idm::get_unlocked<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -1103,7 +1103,7 @@ error_code cellDmuxResetStreamAndWaitDone(u32 handle)
|
|||
{
|
||||
cellDmux.warning("cellDmuxResetStreamAndWaitDone(handle=0x%x)", handle);
|
||||
|
||||
const auto dmux = idm::get<Demuxer>(handle);
|
||||
const auto dmux = idm::get_unlocked<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -1164,7 +1164,7 @@ error_code cellDmuxEnableEs(u32 handle, vm::cptr<CellCodecEsFilterId> esFilterId
|
|||
{
|
||||
cellDmux.warning("cellDmuxEnableEs(handle=0x%x, esFilterId=*0x%x, esResourceInfo=*0x%x, esCb=*0x%x, esSpecificInfo=*0x%x, esHandle=*0x%x)", handle, esFilterId, esResourceInfo, esCb, esSpecificInfo, esHandle);
|
||||
|
||||
const auto dmux = idm::get<Demuxer>(handle);
|
||||
const auto dmux = idm::get_unlocked<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -1194,7 +1194,7 @@ error_code cellDmuxDisableEs(u32 esHandle)
|
|||
{
|
||||
cellDmux.warning("cellDmuxDisableEs(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1213,7 +1213,7 @@ error_code cellDmuxResetEs(u32 esHandle)
|
|||
{
|
||||
cellDmux.trace("cellDmuxResetEs(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1232,7 +1232,7 @@ error_code cellDmuxGetAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpeci
|
|||
{
|
||||
cellDmux.trace("cellDmuxGetAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1255,7 +1255,7 @@ error_code cellDmuxPeekAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpec
|
|||
{
|
||||
cellDmux.trace("cellDmuxPeekAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1278,7 +1278,7 @@ error_code cellDmuxGetAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> auS
|
|||
{
|
||||
cellDmux.trace("cellDmuxGetAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1301,7 +1301,7 @@ error_code cellDmuxPeekAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> au
|
|||
{
|
||||
cellDmux.trace("cellDmuxPeekAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1324,7 +1324,7 @@ error_code cellDmuxReleaseAu(u32 esHandle)
|
|||
{
|
||||
cellDmux.trace("cellDmuxReleaseAu(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1342,7 +1342,7 @@ error_code cellDmuxFlushEs(u32 esHandle)
|
|||
{
|
||||
cellDmux.warning("cellDmuxFlushEs(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = idm::get<ElementaryStream>(esHandle);
|
||||
const auto es = idm::get_unlocked<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
|
|
@ -598,7 +598,7 @@ error_code cellFsSetIoBufferFromDefaultContainer(u32 fd, u32 buffer_size, u32 pa
|
|||
{
|
||||
cellFs.todo("cellFsSetIoBufferFromDefaultContainer(fd=%d, buffer_size=%d, page_type=%d)", fd, buffer_size, page_type);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -695,7 +695,7 @@ s32 cellFsStReadInit(u32 fd, vm::cptr<CellFsRingBuffer> ringbuf)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -716,7 +716,7 @@ s32 cellFsStReadFinish(u32 fd)
|
|||
{
|
||||
cellFs.todo("cellFsStReadFinish(fd=%d)", fd);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -732,7 +732,7 @@ s32 cellFsStReadGetRingBuf(u32 fd, vm::ptr<CellFsRingBuffer> ringbuf)
|
|||
{
|
||||
cellFs.todo("cellFsStReadGetRingBuf(fd=%d, ringbuf=*0x%x)", fd, ringbuf);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -748,7 +748,7 @@ s32 cellFsStReadGetStatus(u32 fd, vm::ptr<u64> status)
|
|||
{
|
||||
cellFs.todo("cellFsStReadGetRingBuf(fd=%d, status=*0x%x)", fd, status);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -764,7 +764,7 @@ s32 cellFsStReadGetRegid(u32 fd, vm::ptr<u64> regid)
|
|||
{
|
||||
cellFs.todo("cellFsStReadGetRingBuf(fd=%d, regid=*0x%x)", fd, regid);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -780,7 +780,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
|
|||
{
|
||||
cellFs.todo("cellFsStReadStart(fd=%d, offset=0x%llx, size=0x%llx)", fd, offset, size);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -796,7 +796,7 @@ s32 cellFsStReadStop(u32 fd)
|
|||
{
|
||||
cellFs.todo("cellFsStReadStop(fd=%d)", fd);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -812,7 +812,7 @@ s32 cellFsStRead(u32 fd, vm::ptr<u8> buf, u64 size, vm::ptr<u64> rsize)
|
|||
{
|
||||
cellFs.todo("cellFsStRead(fd=%d, buf=*0x%x, size=0x%llx, rsize=*0x%x)", fd, buf, size, rsize);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -828,7 +828,7 @@ s32 cellFsStReadGetCurrentAddr(u32 fd, vm::ptr<u32> addr, vm::ptr<u64> size)
|
|||
{
|
||||
cellFs.todo("cellFsStReadGetCurrentAddr(fd=%d, addr=*0x%x, size=*0x%x)", fd, addr, size);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -844,7 +844,7 @@ s32 cellFsStReadPutCurrentAddr(u32 fd, vm::ptr<u8> addr, u64 size)
|
|||
{
|
||||
cellFs.todo("cellFsStReadPutCurrentAddr(fd=%d, addr=*0x%x, size=0x%llx)", fd, addr, size);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -860,7 +860,7 @@ s32 cellFsStReadWait(u32 fd, u64 size)
|
|||
{
|
||||
cellFs.todo("cellFsStReadWait(fd=%d, size=0x%llx)", fd, size);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -876,7 +876,7 @@ s32 cellFsStReadWaitCallback(u32 fd, u64 size, vm::ptr<void(s32 xfd, u64 xsize)>
|
|||
{
|
||||
cellFs.todo("cellFsStReadWaitCallback(fd=%d, size=0x%llx, func=*0x%x)", fd, size, func);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -908,7 +908,7 @@ struct fs_aio_thread : ppu_thread
|
|||
s32 error = CELL_EBADF;
|
||||
u64 result = 0;
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(aio->fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(aio->fd);
|
||||
|
||||
if (!file || (type == 1 && file->flags & CELL_FS_O_WRONLY) || (type == 2 && !(file->flags & CELL_FS_O_ACCMODE)))
|
||||
{
|
||||
|
|
|
@ -961,7 +961,7 @@ error_code cellGameContentPermit(ppu_thread& ppu, vm::ptr<char[CELL_GAME_PATH_MA
|
|||
|
||||
if (!perm.temp.empty())
|
||||
{
|
||||
std::vector<std::shared_ptr<lv2_file>> lv2_files;
|
||||
std::vector<shared_ptr<lv2_file>> lv2_files;
|
||||
|
||||
const std::string real_dir = vfs::get(dir) + "/";
|
||||
|
||||
|
|
|
@ -455,7 +455,7 @@ error_code _cellGcmInitBody(ppu_thread& ppu, vm::pptr<CellGcmContextData> contex
|
|||
vm::var<u64> _tid;
|
||||
vm::var<char[]> _name = vm::make_str("_gcm_intr_thread");
|
||||
ppu_execute<&sys_ppu_thread_create>(ppu, +_tid, 0x10000, 0, 1, 0x4000, SYS_PPU_THREAD_CREATE_INTERRUPT, +_name);
|
||||
render->intr_thread = idm::get<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
|
||||
render->intr_thread = idm::get_unlocked<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
|
||||
render->intr_thread->state -= cpu_flag::stop;
|
||||
thread_ctrl::notify(*render->intr_thread);
|
||||
|
||||
|
|
|
@ -189,10 +189,12 @@ public:
|
|||
|
||||
struct gem_color
|
||||
{
|
||||
float r, g, b;
|
||||
ENABLE_BITWISE_SERIALIZATION;
|
||||
|
||||
f32 r, g, b;
|
||||
|
||||
gem_color() : r(0.0f), g(0.0f), b(0.0f) {}
|
||||
gem_color(float r_, float g_, float b_)
|
||||
gem_color(f32 r_, f32 g_, f32 b_)
|
||||
{
|
||||
r = std::clamp(r_, 0.0f, 1.0f);
|
||||
g = std::clamp(g_, 0.0f, 1.0f);
|
||||
|
@ -223,7 +225,7 @@ public:
|
|||
u32 ext_status = CELL_GEM_NO_EXTERNAL_PORT_DEVICE; // External port connection status
|
||||
u32 ext_id = 0; // External device ID (type). For example SHARP_SHOOTER_DEVICE_ID
|
||||
u32 port = 0; // Assigned port
|
||||
bool enabled_magnetometer = false; // Whether the magnetometer is enabled (probably used for additional rotational precision)
|
||||
bool enabled_magnetometer = true; // Whether the magnetometer is enabled (probably used for additional rotational precision)
|
||||
bool calibrated_magnetometer = false; // Whether the magnetometer is calibrated
|
||||
bool enabled_filtering = false; // Whether filtering is enabled
|
||||
bool enabled_tracking = false; // Whether tracking is enabled
|
||||
|
@ -238,17 +240,16 @@ public:
|
|||
|
||||
bool is_calibrating{false}; // Whether or not we are currently calibrating
|
||||
u64 calibration_start_us{0}; // The start timestamp of the calibration in microseconds
|
||||
u64 calibration_status_flags = 0; // The calibration status flags
|
||||
|
||||
static constexpr u64 calibration_time_us = 500000; // The calibration supposedly takes 0.5 seconds (500000 microseconds)
|
||||
|
||||
ENABLE_BITWISE_SERIALIZATION;
|
||||
};
|
||||
|
||||
CellGemAttribute attribute = {};
|
||||
CellGemVideoConvertAttribute vc_attribute = {};
|
||||
s32 video_data_out_size = -1;
|
||||
std::vector<u8> video_data_in;
|
||||
u64 status_flags = 0;
|
||||
u64 runtime_status_flags = 0; // The runtime status flags
|
||||
bool enable_pitch_correction = false;
|
||||
u32 inertial_counter = 0;
|
||||
|
||||
|
@ -269,25 +270,60 @@ public:
|
|||
return controllers[gem_num].status == CELL_GEM_STATUS_READY;
|
||||
}
|
||||
|
||||
bool is_controller_calibrating(u32 gem_num)
|
||||
void update_calibration_status()
|
||||
{
|
||||
gem_controller& gem = controllers[gem_num];
|
||||
std::scoped_lock lock(mtx);
|
||||
|
||||
if (gem.is_calibrating)
|
||||
for (u32 gem_num = 0; gem_num < CELL_GEM_MAX_NUM; gem_num++)
|
||||
{
|
||||
if ((get_guest_system_time() - gem.calibration_start_us) >= gem_controller::calibration_time_us)
|
||||
{
|
||||
gem.is_calibrating = false;
|
||||
gem.calibration_start_us = 0;
|
||||
gem.calibrated_magnetometer = true;
|
||||
gem.enabled_tracking = true;
|
||||
gem.hue = 1;
|
||||
gem_controller& controller = controllers[gem_num];
|
||||
if (!controller.is_calibrating) continue;
|
||||
|
||||
status_flags = CELL_GEM_FLAG_CALIBRATION_SUCCEEDED | CELL_GEM_FLAG_CALIBRATION_OCCURRED;
|
||||
bool controller_calibrated = true;
|
||||
|
||||
// Request controller calibration
|
||||
if (g_cfg.io.move == move_handler::real)
|
||||
{
|
||||
std::lock_guard pad_lock(pad::g_pad_mutex);
|
||||
const auto handler = pad::get_current_handler();
|
||||
const auto& pad = ::at32(handler->GetPads(), pad_num(gem_num));
|
||||
if (pad && pad->m_pad_handler == pad_handler::move)
|
||||
{
|
||||
if (!pad->move_data.calibration_requested || !pad->move_data.calibration_succeeded)
|
||||
{
|
||||
pad->move_data.calibration_requested = true;
|
||||
controller_calibrated = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// The calibration takes ~0.5 seconds on real hardware
|
||||
if ((get_guest_system_time() - controller.calibration_start_us) < gem_controller::calibration_time_us) continue;
|
||||
|
||||
if (!controller_calibrated)
|
||||
{
|
||||
cellGem.warning("Reached calibration timeout but ps move controller %d is still calibrating", gem_num);
|
||||
}
|
||||
|
||||
controller.is_calibrating = false;
|
||||
controller.calibration_start_us = 0;
|
||||
controller.calibration_status_flags = CELL_GEM_FLAG_CALIBRATION_SUCCEEDED | CELL_GEM_FLAG_CALIBRATION_OCCURRED;
|
||||
controller.calibrated_magnetometer = true;
|
||||
controller.enabled_tracking = true;
|
||||
|
||||
// Reset controller calibration request
|
||||
if (g_cfg.io.move == move_handler::real)
|
||||
{
|
||||
std::lock_guard pad_lock(pad::g_pad_mutex);
|
||||
const auto handler = pad::get_current_handler();
|
||||
const auto& pad = ::at32(handler->GetPads(), pad_num(gem_num));
|
||||
if (pad && pad->m_pad_handler == pad_handler::move)
|
||||
{
|
||||
pad->move_data.calibration_requested = false;
|
||||
pad->move_data.calibration_succeeded = false;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return gem.is_calibrating;
|
||||
}
|
||||
|
||||
void reset_controller(u32 gem_num)
|
||||
|
@ -297,6 +333,10 @@ public:
|
|||
return;
|
||||
}
|
||||
|
||||
gem_controller& controller = ::at32(controllers, gem_num);
|
||||
controller = {};
|
||||
controller.sphere_rgb = gem_color::get_default_color(gem_num);
|
||||
|
||||
bool is_connected = false;
|
||||
|
||||
switch (g_cfg.io.move)
|
||||
|
@ -315,6 +355,7 @@ public:
|
|||
|
||||
if (gem_num == i)
|
||||
{
|
||||
pad->move_data.magnetometer_enabled = controller.enabled_magnetometer;
|
||||
is_connected = true;
|
||||
}
|
||||
}
|
||||
|
@ -377,10 +418,6 @@ public:
|
|||
break;
|
||||
}
|
||||
|
||||
gem_controller& controller = ::at32(controllers, gem_num);
|
||||
controller = {};
|
||||
controller.sphere_rgb = gem_color::get_default_color(gem_num);
|
||||
|
||||
// Assign status and port number
|
||||
if (is_connected)
|
||||
{
|
||||
|
@ -389,20 +426,11 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
void paint_spheres(CellGemVideoConvertFormatEnum output_format, u32 width, u32 height, u8* video_data_out, u32 video_data_out_size);
|
||||
|
||||
gem_config_data()
|
||||
{
|
||||
if (!g_cfg_gem_real.load())
|
||||
{
|
||||
cellGem.notice("Could not load real gem config. Using defaults.");
|
||||
}
|
||||
|
||||
if (!g_cfg_gem_fake.load())
|
||||
{
|
||||
cellGem.notice("Could not load fake gem config. Using defaults.");
|
||||
}
|
||||
|
||||
cellGem.notice("Real gem config=\n", g_cfg_gem_real.to_string());
|
||||
cellGem.notice("Fake gem config=\n", g_cfg_gem_fake.to_string());
|
||||
load_configs();
|
||||
};
|
||||
|
||||
SAVESTATE_INIT_POS(15);
|
||||
|
@ -416,19 +444,46 @@ public:
|
|||
return;
|
||||
}
|
||||
|
||||
[[maybe_unused]] const s32 version = GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), cellGem);
|
||||
const s32 version = GET_OR_USE_SERIALIZATION_VERSION(ar.is_writing(), cellGem);
|
||||
|
||||
ar(attribute, vc_attribute, status_flags, enable_pitch_correction, inertial_counter, controllers
|
||||
, connected_controllers, updating, camera_frame, memory_ptr, start_timestamp_us);
|
||||
ar(attribute, vc_attribute, runtime_status_flags, enable_pitch_correction, inertial_counter);
|
||||
|
||||
for (gem_controller& c : controllers)
|
||||
{
|
||||
ar(c.status, c.ext_status, c.ext_id, c.port, c.enabled_magnetometer, c.calibrated_magnetometer, c.enabled_filtering, c.enabled_tracking, c.enabled_LED, c.hue_set, c.rumble);
|
||||
|
||||
// We need to add padding because we used bitwise serialization in version 1
|
||||
if (version < 2)
|
||||
{
|
||||
ar.add_padding(&gem_controller::rumble, &gem_controller::sphere_rgb);
|
||||
}
|
||||
|
||||
ar(c.sphere_rgb, c.hue, c.distance_mm, c.radius, c.radius_valid, c.is_calibrating);
|
||||
|
||||
if (version < 2)
|
||||
{
|
||||
ar.add_padding(&gem_controller::is_calibrating, &gem_controller::calibration_start_us);
|
||||
}
|
||||
|
||||
ar(c.calibration_start_us);
|
||||
|
||||
if (ar.is_writing() || version >= 2)
|
||||
{
|
||||
ar(c.calibration_status_flags);
|
||||
}
|
||||
}
|
||||
|
||||
ar(connected_controllers, updating, camera_frame, memory_ptr, start_timestamp_us);
|
||||
}
|
||||
|
||||
gem_config_data(utils::serial& ar)
|
||||
{
|
||||
save(ar);
|
||||
load_configs();
|
||||
}
|
||||
|
||||
if (ar.is_writing())
|
||||
return;
|
||||
|
||||
static void load_configs()
|
||||
{
|
||||
if (!g_cfg_gem_real.load())
|
||||
{
|
||||
cellGem.notice("Could not load real gem config. Using defaults.");
|
||||
|
@ -447,7 +502,7 @@ public:
|
|||
extern std::pair<u32, u32> get_video_resolution(const CellCameraInfoEx& info);
|
||||
extern u32 get_buffer_size_by_format(s32 format, s32 width, s32 height);
|
||||
|
||||
static inline int32_t cellGemGetVideoConvertSize(s32 output_format)
|
||||
static inline s32 cellGemGetVideoConvertSize(s32 output_format)
|
||||
{
|
||||
switch (output_format)
|
||||
{
|
||||
|
@ -474,6 +529,29 @@ static inline int32_t cellGemGetVideoConvertSize(s32 output_format)
|
|||
|
||||
namespace gem
|
||||
{
|
||||
struct gem_position
|
||||
{
|
||||
public:
|
||||
void set_position(f32 x, f32 y)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
m_x = x;
|
||||
m_y = y;
|
||||
}
|
||||
void get_position(f32& x, f32& y)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
x = m_x;
|
||||
y = m_y;
|
||||
}
|
||||
private:
|
||||
std::mutex m_mutex;
|
||||
f32 m_x = 0.0f;
|
||||
f32 m_y = 0.0f;
|
||||
};
|
||||
|
||||
std::array<gem_position, CELL_GEM_MAX_NUM> positions {};
|
||||
|
||||
bool convert_image_format(CellCameraFormat input_format, CellGemVideoConvertFormatEnum output_format,
|
||||
const std::vector<u8>& video_data_in, u32 width, u32 height,
|
||||
u8* video_data_out, u32 video_data_out_size)
|
||||
|
@ -524,15 +602,15 @@ namespace gem
|
|||
u8* dst0 = dst_row;
|
||||
u8* dst1 = dst_row + out_pitch;
|
||||
|
||||
for (uint32_t x = 0; x < width - 1; x += 2, src0 += 2, src1 += 2, dst0 += 8, dst1 += 8)
|
||||
for (u32 x = 0; x < width - 1; x += 2, src0 += 2, src1 += 2, dst0 += 8, dst1 += 8)
|
||||
{
|
||||
const uint8_t b = src0[0];
|
||||
const uint8_t g0 = src0[1];
|
||||
const uint8_t g1 = src1[0];
|
||||
const uint8_t r = src1[1];
|
||||
const u8 b = src0[0];
|
||||
const u8 g0 = src0[1];
|
||||
const u8 g1 = src1[0];
|
||||
const u8 r = src1[1];
|
||||
|
||||
const uint8_t top[4] = { r, g0, b, 255 };
|
||||
const uint8_t bottom[4] = { r, g1, b, 255 };
|
||||
const u8 top[4] = { r, g0, b, 255 };
|
||||
const u8 bottom[4] = { r, g1, b, 255 };
|
||||
|
||||
// Top-Left
|
||||
std::memcpy(dst0, top, 4);
|
||||
|
@ -602,6 +680,101 @@ namespace gem
|
|||
}
|
||||
}
|
||||
|
||||
void gem_config_data::paint_spheres(CellGemVideoConvertFormatEnum output_format, u32 width, u32 height, u8* video_data_out, u32 video_data_out_size)
|
||||
{
|
||||
if (!width || !height || !video_data_out || !video_data_out_size)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
struct sphere_information
|
||||
{
|
||||
f32 radius = 0.0f;
|
||||
s16 x = 0;
|
||||
s16 y = 0;
|
||||
u8 r = 0;
|
||||
u8 g = 0;
|
||||
u8 b = 0;
|
||||
};
|
||||
|
||||
std::vector<sphere_information> sphere_info;
|
||||
{
|
||||
reader_lock lock(mtx);
|
||||
|
||||
for (u32 gem_num = 0; gem_num < CELL_GEM_MAX_NUM; gem_num++)
|
||||
{
|
||||
const gem_config_data::gem_controller& controller = controllers[gem_num];
|
||||
if (!controller.radius_valid || controller.radius <= 0.0f) continue;
|
||||
|
||||
f32 x, y;
|
||||
::at32(gem::positions, gem_num).get_position(x, y);
|
||||
|
||||
const u8 r = static_cast<u8>(std::clamp(controller.sphere_rgb.r * 255.0f, 0.0f, 255.0f));
|
||||
const u8 g = static_cast<u8>(std::clamp(controller.sphere_rgb.g * 255.0f, 0.0f, 255.0f));
|
||||
const u8 b = static_cast<u8>(std::clamp(controller.sphere_rgb.b * 255.0f, 0.0f, 255.0f));
|
||||
|
||||
sphere_info.push_back({ controller.radius, static_cast<s16>(x), static_cast<s16>(y), r, g, b });
|
||||
}
|
||||
}
|
||||
|
||||
switch (output_format)
|
||||
{
|
||||
case CELL_GEM_RGBA_640x480: // RGBA output; 640*480*4-byte output buffer required
|
||||
{
|
||||
cellGem.trace("Painting spheres for CELL_GEM_RGBA_640x480");
|
||||
|
||||
const u32 out_pitch = width * 4;
|
||||
|
||||
for (const sphere_information& info : sphere_info)
|
||||
{
|
||||
const s32 x_begin = std::max(0, static_cast<s32>(std::floor(info.x - info.radius)));
|
||||
const s32 x_end = std::min<s32>(width, static_cast<s32>(std::ceil(info.x + info.radius)));
|
||||
const s32 y_begin = std::max(0, static_cast<s32>(std::floor(info.y - info.radius)));
|
||||
const s32 y_end = std::min<s32>(height, static_cast<s32>(std::ceil(info.y + info.radius)));
|
||||
|
||||
for (s32 y = y_begin; y < y_end; y++)
|
||||
{
|
||||
u8* dst = video_data_out + y * out_pitch + x_begin * 4;
|
||||
|
||||
for (s32 x = x_begin; x < x_end; x++, dst += 4)
|
||||
{
|
||||
const f32 distance = static_cast<f32>(std::sqrt(std::pow(info.x - x, 2) + std::pow(info.y - y, 2)));
|
||||
if (distance > info.radius) continue;
|
||||
|
||||
dst[0] = info.r;
|
||||
dst[1] = info.g;
|
||||
dst[2] = info.b;
|
||||
dst[3] = 255;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
case CELL_GEM_BAYER_RESTORED: // Bayer pattern output, 640x480, gamma and white balance applied, output buffer required
|
||||
case CELL_GEM_RGBA_320x240: // RGBA output; 320*240*4-byte output buffer required
|
||||
case CELL_GEM_YUV_640x480: // YUV output; 640*480+640*480+640*480-byte output buffer required (contiguous)
|
||||
case CELL_GEM_YUV422_640x480: // YUV output; 640*480+320*480+320*480-byte output buffer required (contiguous)
|
||||
case CELL_GEM_YUV411_640x480: // YUV411 output; 640*480+320*240+320*240-byte output buffer required (contiguous)
|
||||
case CELL_GEM_BAYER_RESTORED_RGGB: // Restored Bayer output, 2x2 pixels rearranged into 320x240 RG1G2B
|
||||
case CELL_GEM_BAYER_RESTORED_RASTERIZED: // Restored Bayer output, R,G1,G2,B rearranged into 4 contiguous 320x240 1-channel rasters
|
||||
{
|
||||
cellGem.trace("Unimplemented: painting spheres for %s", output_format);
|
||||
break;
|
||||
}
|
||||
case CELL_GEM_NO_VIDEO_OUTPUT: // Disable video output
|
||||
{
|
||||
cellGem.trace("Ignoring painting spheres for CELL_GEM_NO_VIDEO_OUTPUT");
|
||||
break;
|
||||
}
|
||||
default:
|
||||
{
|
||||
cellGem.trace("Ignoring painting spheres for %d", static_cast<u32>(output_format));
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
void gem_config_data::operator()()
|
||||
{
|
||||
cellGem.notice("Starting thread");
|
||||
|
@ -610,6 +783,11 @@ void gem_config_data::operator()()
|
|||
{
|
||||
while (!video_conversion_in_progress && thread_ctrl::state() != thread_state::aborting && !Emu.IsStopped())
|
||||
{
|
||||
if (state)
|
||||
{
|
||||
update_calibration_status();
|
||||
}
|
||||
|
||||
thread_ctrl::wait_for(1000);
|
||||
}
|
||||
|
||||
|
@ -635,6 +813,11 @@ void gem_config_data::operator()()
|
|||
if (gem::convert_image_format(shared_data.format, vc.output_format, video_data_in, shared_data.width, shared_data.height, vc_attribute.video_data_out ? vc_attribute.video_data_out.get_ptr() : nullptr, video_data_out_size))
|
||||
{
|
||||
cellGem.trace("Converted video frame of format %s to %s", shared_data.format.load(), vc.output_format.get());
|
||||
|
||||
if (g_cfg.io.paint_move_spheres)
|
||||
{
|
||||
paint_spheres(vc.output_format, shared_data.width, shared_data.height, vc_attribute.video_data_out ? vc_attribute.video_data_out.get_ptr() : nullptr, video_data_out_size);
|
||||
}
|
||||
}
|
||||
|
||||
video_conversion_in_progress = false;
|
||||
|
@ -710,7 +893,7 @@ public:
|
|||
{
|
||||
if (g_cfg.io.move != move_handler::real)
|
||||
{
|
||||
return 1; // potentially true if less than 20 pixels have the hue
|
||||
return true; // potentially true if less than 20 pixels have the hue
|
||||
}
|
||||
|
||||
return hue < m_hues.size() && m_hues[hue] < 20; // potentially true if less than 20 pixels have the hue
|
||||
|
@ -763,7 +946,7 @@ public:
|
|||
std::lock_guard lock(pad::g_pad_mutex);
|
||||
const auto handler = pad::get_current_handler();
|
||||
auto& handlers = handler->get_handlers();
|
||||
if (auto it = handlers.find(pad_handler::move); it != handlers.end())
|
||||
if (auto it = handlers.find(pad_handler::move); it != handlers.end() && it->second)
|
||||
{
|
||||
for (auto& binding : it->second->bindings())
|
||||
{
|
||||
|
@ -774,12 +957,22 @@ public:
|
|||
|
||||
if (gem_num < 0 || gem_num >= CELL_GEM_MAX_NUM) continue;
|
||||
|
||||
const cfg_ps_move* config = ::at32(g_cfg_move.move, gem_num);
|
||||
|
||||
binding.device->color_override_active = true;
|
||||
binding.device->color_override.r = config->r.get();
|
||||
binding.device->color_override.g = config->g.get();
|
||||
binding.device->color_override.b = config->b.get();
|
||||
|
||||
if (g_cfg.io.allow_move_hue_set_by_game)
|
||||
{
|
||||
const auto& controller = gem.controllers[gem_num];
|
||||
binding.device->color_override.r = static_cast<u8>(std::clamp(controller.sphere_rgb.r * 255.0f, 0.0f, 255.0f));
|
||||
binding.device->color_override.g = static_cast<u8>(std::clamp(controller.sphere_rgb.g * 255.0f, 0.0f, 255.0f));
|
||||
binding.device->color_override.b = static_cast<u8>(std::clamp(controller.sphere_rgb.b * 255.0f, 0.0f, 255.0f));
|
||||
}
|
||||
else
|
||||
{
|
||||
const cfg_ps_move* config = ::at32(g_cfg_move.move, gem_num);
|
||||
binding.device->color_override.r = config->r.get();
|
||||
binding.device->color_override.g = config->g.get();
|
||||
binding.device->color_override.b = config->b.get();
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -791,13 +984,13 @@ public:
|
|||
const cfg_ps_move* config = g_cfg_move.move[gem_num];
|
||||
|
||||
m_tracker.set_active(gem_num, controller.enabled_tracking && controller.status == CELL_GEM_STATUS_READY);
|
||||
m_tracker.set_hue(gem_num, config->hue);
|
||||
m_tracker.set_hue(gem_num, g_cfg.io.allow_move_hue_set_by_game ? controller.hue : config->hue);
|
||||
m_tracker.set_hue_threshold(gem_num, config->hue_threshold);
|
||||
m_tracker.set_saturation_threshold(gem_num, config->saturation_threshold);
|
||||
}
|
||||
|
||||
m_tracker.set_min_radius(static_cast<f32>(g_cfg_move.min_radius.get() / g_cfg_move.min_radius.max));
|
||||
m_tracker.set_max_radius(static_cast<f32>(g_cfg_move.max_radius.get() / g_cfg_move.max_radius.max));
|
||||
m_tracker.set_min_radius(static_cast<f32>(g_cfg_move.min_radius) / 100.0f);
|
||||
m_tracker.set_max_radius(static_cast<f32>(g_cfg_move.max_radius) / 100.0f);
|
||||
|
||||
// Process camera image
|
||||
m_tracker.process_image();
|
||||
|
@ -902,6 +1095,11 @@ static inline void pos_to_gem_image_state(u32 gem_num, const gem_config::gem_con
|
|||
{
|
||||
draw_overlay_cursor(gem_num, controller, x_pos, y_pos, x_max, y_max);
|
||||
}
|
||||
|
||||
if (g_cfg.io.paint_move_spheres)
|
||||
{
|
||||
::at32(gem::positions, gem_num).set_position(image_x, image_y);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void pos_to_gem_state(u32 gem_num, gem_config::gem_controller& controller, vm::ptr<CellGemState>& gem_state, s32 x_pos, s32 y_pos, s32 x_max, s32 y_max, const ps_move_data& move_data)
|
||||
|
@ -946,19 +1144,20 @@ static inline void pos_to_gem_state(u32 gem_num, gem_config::gem_controller& con
|
|||
// Calculate orientation
|
||||
if (g_cfg.io.move == move_handler::real)
|
||||
{
|
||||
gem_state->quat[0] = move_data.quaternion[1]; // x
|
||||
gem_state->quat[1] = move_data.quaternion[2]; // y
|
||||
gem_state->quat[2] = move_data.quaternion[3]; // z
|
||||
gem_state->quat[3] = move_data.quaternion[0]; // w
|
||||
gem_state->quat[0] = move_data.quaternion[0]; // x
|
||||
gem_state->quat[1] = move_data.quaternion[1]; // y
|
||||
gem_state->quat[2] = move_data.quaternion[2]; // z
|
||||
gem_state->quat[3] = move_data.quaternion[3]; // w
|
||||
}
|
||||
else
|
||||
{
|
||||
static constexpr f32 PI = 3.14159265f;
|
||||
const auto degree_to_rad = [](f32 degree) -> f32 { return degree * PI / 180.0f; };
|
||||
|
||||
static constexpr f32 CONE = 10.0f / 2.0f;
|
||||
const f32 roll = -degree_to_rad((image_y - half_height) / half_height * CONE); // This is actually the pitch
|
||||
const f32 pitch = -degree_to_rad((image_x - half_width) / half_width * CONE); // This is actually the yaw
|
||||
const f32 max_angle_per_side_h = g_cfg.io.fake_move_rotation_cone_h / 2.0f;
|
||||
const f32 max_angle_per_side_v = g_cfg.io.fake_move_rotation_cone_v / 2.0f;
|
||||
const f32 roll = -degree_to_rad((image_y - half_height) / half_height * max_angle_per_side_v); // This is actually the pitch
|
||||
const f32 pitch = -degree_to_rad((image_x - half_width) / half_width * max_angle_per_side_h); // This is actually the yaw
|
||||
const f32 yaw = degree_to_rad(0.0f);
|
||||
const f32 cr = std::cos(roll * 0.5f);
|
||||
const f32 sr = std::sin(roll * 0.5f);
|
||||
|
@ -982,6 +1181,11 @@ static inline void pos_to_gem_state(u32 gem_num, gem_config::gem_controller& con
|
|||
{
|
||||
draw_overlay_cursor(gem_num, controller, x_pos, y_pos, x_max, y_max);
|
||||
}
|
||||
|
||||
if (g_cfg.io.paint_move_spheres)
|
||||
{
|
||||
::at32(gem::positions, gem_num).set_position(image_x, image_y);
|
||||
}
|
||||
}
|
||||
|
||||
extern bool is_input_allowed();
|
||||
|
@ -1430,13 +1634,15 @@ error_code cellGemCalibrate(u32 gem_num)
|
|||
return CELL_GEM_ERROR_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
if (gem.is_controller_calibrating(gem_num))
|
||||
auto& controller = gem.controllers[gem_num];
|
||||
|
||||
if (controller.is_calibrating)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
gem.controllers[gem_num].is_calibrating = true;
|
||||
gem.controllers[gem_num].calibration_start_us = get_guest_system_time();
|
||||
controller.is_calibrating = true;
|
||||
controller.calibration_start_us = get_guest_system_time();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1459,7 +1665,7 @@ error_code cellGemClearStatusFlags(u32 gem_num, u64 mask)
|
|||
return CELL_GEM_ERROR_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
gem.status_flags &= ~mask;
|
||||
gem.controllers[gem_num].calibration_status_flags &= ~mask;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1563,13 +1769,28 @@ error_code cellGemEnableMagnetometer(u32 gem_num, u32 enable)
|
|||
return CELL_GEM_NOT_CONNECTED;
|
||||
}
|
||||
|
||||
auto& controller = gem.controllers[gem_num];
|
||||
|
||||
// NOTE: RE doesn't show this check but it is mentioned in the docs, so I'll leave it here for now.
|
||||
//if (!gem.controllers[gem_num].calibrated_magnetometer)
|
||||
//if (!controller.calibrated_magnetometer)
|
||||
//{
|
||||
// return CELL_GEM_NOT_CALIBRATED;
|
||||
//}
|
||||
|
||||
gem.controllers[gem_num].enabled_magnetometer = !!enable;
|
||||
controller.enabled_magnetometer = !!enable;
|
||||
|
||||
if (g_cfg.io.move == move_handler::real)
|
||||
{
|
||||
std::lock_guard lock(pad::g_pad_mutex);
|
||||
|
||||
const auto handler = pad::get_current_handler();
|
||||
const auto& pad = ::at32(handler->GetPads(), pad_num(gem_num));
|
||||
|
||||
if (pad && pad->m_pad_handler == pad_handler::move)
|
||||
{
|
||||
pad->move_data.magnetometer_enabled = controller.enabled_magnetometer;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1597,12 +1818,27 @@ error_code cellGemEnableMagnetometer2(u32 gem_num, u32 enable)
|
|||
return CELL_GEM_NOT_CONNECTED;
|
||||
}
|
||||
|
||||
if (!gem.controllers[gem_num].calibrated_magnetometer)
|
||||
auto& controller = gem.controllers[gem_num];
|
||||
|
||||
if (!controller.calibrated_magnetometer)
|
||||
{
|
||||
return CELL_GEM_NOT_CALIBRATED;
|
||||
}
|
||||
|
||||
gem.controllers[gem_num].enabled_magnetometer = !!enable;
|
||||
controller.enabled_magnetometer = !!enable;
|
||||
|
||||
if (g_cfg.io.move == move_handler::real)
|
||||
{
|
||||
std::lock_guard lock(pad::g_pad_mutex);
|
||||
|
||||
const auto handler = pad::get_current_handler();
|
||||
const auto& pad = ::at32(handler->GetPads(), pad_num(gem_num));
|
||||
|
||||
if (pad && pad->m_pad_handler == pad_handler::move)
|
||||
{
|
||||
pad->move_data.magnetometer_enabled = controller.enabled_magnetometer;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1656,7 +1892,7 @@ error_code cellGemFilterState(u32 gem_num, u32 enable)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
error_code cellGemForceRGB(u32 gem_num, float r, float g, float b)
|
||||
error_code cellGemForceRGB(u32 gem_num, f32 r, f32 g, f32 b)
|
||||
{
|
||||
cellGem.todo("cellGemForceRGB(gem_num=%d, r=%f, g=%f, b=%f)", gem_num, r, g, b);
|
||||
|
||||
|
@ -1680,8 +1916,13 @@ error_code cellGemForceRGB(u32 gem_num, float r, float g, float b)
|
|||
// color = color * (2.f / sum)
|
||||
//}
|
||||
|
||||
gem.controllers[gem_num].sphere_rgb = gem_config::gem_color(r, g, b);
|
||||
gem.controllers[gem_num].enabled_tracking = false;
|
||||
auto& controller = gem.controllers[gem_num];
|
||||
|
||||
controller.sphere_rgb = gem_config::gem_color(r, g, b);
|
||||
controller.enabled_tracking = false;
|
||||
|
||||
const auto [h, s, v] = ps_move_tracker<false>::rgb_to_hsv(r, g, b);
|
||||
controller.hue = h;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1752,9 +1993,9 @@ error_code cellGemGetCameraState(vm::ptr<CellGemCameraState> camera_state)
|
|||
// TODO: use correct camera settings
|
||||
camera_state->exposure = 0;
|
||||
camera_state->exposure_time = 1.0f / 60.0f;
|
||||
camera_state->gain = 1.0;
|
||||
camera_state->pitch_angle = 0.0;
|
||||
camera_state->pitch_angle_estimate = 0.0;
|
||||
camera_state->gain = 1.0f;
|
||||
camera_state->pitch_angle = 0.0f;
|
||||
camera_state->pitch_angle_estimate = 0.0f;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -2059,7 +2300,7 @@ error_code cellGemGetMemorySize(s32 max_connect)
|
|||
return not_an_error(GemGetMemorySize(max_connect));
|
||||
}
|
||||
|
||||
error_code cellGemGetRGB(u32 gem_num, vm::ptr<float> r, vm::ptr<float> g, vm::ptr<float> b)
|
||||
error_code cellGemGetRGB(u32 gem_num, vm::ptr<f32> r, vm::ptr<f32> g, vm::ptr<f32> b)
|
||||
{
|
||||
cellGem.todo("cellGemGetRGB(gem_num=%d, r=*0x%x, g=*0x%x, b=*0x%x)", gem_num, r, g, b);
|
||||
|
||||
|
@ -2209,7 +2450,7 @@ error_code cellGemGetState(u32 gem_num, u32 flag, u64 time_parameter, vm::ptr<Ce
|
|||
return CELL_GEM_COMPUTING_AVAILABLE_COLORS;
|
||||
}
|
||||
|
||||
if (gem.is_controller_calibrating(gem_num))
|
||||
if (controller.is_calibrating)
|
||||
{
|
||||
return CELL_GEM_SPHERE_CALIBRATING;
|
||||
}
|
||||
|
@ -2245,7 +2486,7 @@ error_code cellGemGetStatusFlags(u32 gem_num, vm::ptr<u64> flags)
|
|||
return CELL_GEM_ERROR_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
*flags = gem.status_flags;
|
||||
*flags = gem.runtime_status_flags | gem.controllers[gem_num].calibration_status_flags;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -2374,7 +2615,7 @@ error_code cellGemInit(ppu_thread& ppu, vm::cptr<CellGemAttribute> attribute)
|
|||
|
||||
gem.updating = false;
|
||||
gem.camera_frame = 0;
|
||||
gem.status_flags = 0;
|
||||
gem.runtime_status_flags = 0;
|
||||
gem.attribute = *attribute;
|
||||
|
||||
for (int gem_num = 0; gem_num < CELL_GEM_MAX_NUM; gem_num++)
|
||||
|
@ -2406,13 +2647,15 @@ error_code cellGemInvalidateCalibration(s32 gem_num)
|
|||
return CELL_GEM_ERROR_INVALID_PARAMETER;
|
||||
}
|
||||
|
||||
gem.controllers[gem_num].calibrated_magnetometer = false;
|
||||
auto& controller = gem.controllers[gem_num];
|
||||
|
||||
// TODO: does this really stop an ongoing calibration ?
|
||||
gem.controllers[gem_num].is_calibrating = false;
|
||||
gem.controllers[gem_num].calibration_start_us = 0;
|
||||
|
||||
// TODO: gem.status_flags (probably not changed)
|
||||
controller.calibrated_magnetometer = false;
|
||||
controller.is_calibrating = false;
|
||||
controller.calibration_start_us = 0;
|
||||
controller.calibration_status_flags = 0;
|
||||
controller.hue_set = false;
|
||||
controller.enabled_tracking = false;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -2640,6 +2883,25 @@ error_code cellGemSetRumble(u32 gem_num, u8 rumble)
|
|||
|
||||
gem.controllers[gem_num].rumble = rumble;
|
||||
|
||||
// Set actual device rumble
|
||||
if (g_cfg.io.move == move_handler::real)
|
||||
{
|
||||
std::lock_guard pad_lock(pad::g_pad_mutex);
|
||||
const auto handler = pad::get_current_handler();
|
||||
auto& handlers = handler->get_handlers();
|
||||
if (auto it = handlers.find(pad_handler::move); it != handlers.end() && it->second)
|
||||
{
|
||||
const u32 pad_index = pad_num(gem_num);
|
||||
for (const auto& binding : it->second->bindings())
|
||||
{
|
||||
if (!binding.device || binding.device->player_id != pad_index) continue;
|
||||
|
||||
handler->SetRumble(pad_index, rumble, rumble > 0);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -2668,7 +2930,7 @@ error_code cellGemSetYaw(u32 gem_num, vm::ptr<f32> z_direction)
|
|||
|
||||
error_code cellGemTrackHues(vm::cptr<u32> req_hues, vm::ptr<u32> res_hues)
|
||||
{
|
||||
cellGem.todo("cellGemTrackHues(req_hues=*0x%x, res_hues=*0x%x)", req_hues, res_hues);
|
||||
cellGem.todo("cellGemTrackHues(req_hues=%s, res_hues=*0x%x)", req_hues ? fmt::format("*0x%x [%d, %d, %d, %d]", req_hues, req_hues[0], req_hues[1], req_hues[2], req_hues[3]) : "*0x0", res_hues);
|
||||
|
||||
auto& gem = g_fxo->get<gem_config>();
|
||||
|
||||
|
@ -2692,8 +2954,6 @@ error_code cellGemTrackHues(vm::cptr<u32> req_hues, vm::ptr<u32> res_hues)
|
|||
gem.controllers[i].enabled_LED = true;
|
||||
gem.controllers[i].hue_set = true;
|
||||
|
||||
// TODO: set hue based on tracker data
|
||||
|
||||
switch (i)
|
||||
{
|
||||
default:
|
||||
|
@ -2711,6 +2971,9 @@ error_code cellGemTrackHues(vm::cptr<u32> req_hues, vm::ptr<u32> res_hues)
|
|||
break;
|
||||
}
|
||||
|
||||
const auto [r, g, b] = ps_move_tracker<false>::hsv_to_rgb(gem.controllers[i].hue, 1.0f, 1.0f);
|
||||
gem.controllers[i].sphere_rgb = gem_config::gem_color(r / 255.0f, g / 255.0f, b / 255.0f);
|
||||
|
||||
if (res_hues)
|
||||
{
|
||||
res_hues[i] = gem.controllers[i].hue;
|
||||
|
@ -2739,7 +3002,8 @@ error_code cellGemTrackHues(vm::cptr<u32> req_hues, vm::ptr<u32> res_hues)
|
|||
gem.controllers[i].hue_set = true;
|
||||
gem.controllers[i].hue = req_hues[i];
|
||||
|
||||
// TODO: set hue of tracker
|
||||
const auto [r, g, b] = ps_move_tracker<false>::hsv_to_rgb(gem.controllers[i].hue, 1.0f, 1.0f);
|
||||
gem.controllers[i].sphere_rgb = gem_config::gem_color(r / 255.0f, g / 255.0f, b / 255.0f);
|
||||
|
||||
if (res_hues)
|
||||
{
|
||||
|
|
|
@ -273,7 +273,7 @@ error_code cellGifDecReadHeader(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStrea
|
|||
return CELL_GIFDEC_ERROR_ARG;
|
||||
}
|
||||
|
||||
const u32& fd = subHandle->fd;
|
||||
const u32 fd = subHandle->fd;
|
||||
CellGifDecInfo& current_info = subHandle->info;
|
||||
|
||||
// Write the header to buffer
|
||||
|
@ -288,7 +288,7 @@ error_code cellGifDecReadHeader(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStrea
|
|||
}
|
||||
case CELL_GIFDEC_FILE:
|
||||
{
|
||||
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
file->file.seek(0);
|
||||
file->file.read(buffer, sizeof(buffer));
|
||||
break;
|
||||
|
@ -302,7 +302,7 @@ error_code cellGifDecReadHeader(vm::ptr<GifDecoder> mainHandle, vm::ptr<GifStrea
|
|||
return CELL_GIFDEC_ERROR_STREAM_FORMAT; // Surprisingly there is no error code related with headerss
|
||||
}
|
||||
|
||||
u8 packedField = buffer[10];
|
||||
const u8 packedField = buffer[10];
|
||||
current_info.SWidth = buffer[6] + buffer[7] * 0x100;
|
||||
current_info.SHeight = buffer[8] + buffer[9] * 0x100;
|
||||
current_info.SGlobalColorTableFlag = packedField >> 7;
|
||||
|
@ -500,7 +500,7 @@ error_code cellGifDecDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStre
|
|||
|
||||
case CELL_GIFDEC_FILE:
|
||||
{
|
||||
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
file->file.seek(0);
|
||||
file->file.read(gif.get(), fileSize);
|
||||
break;
|
||||
|
@ -520,8 +520,8 @@ error_code cellGifDecDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStre
|
|||
return CELL_GIFDEC_ERROR_STREAM_FORMAT;
|
||||
|
||||
const int bytesPerLine = static_cast<int>(dataCtrlParam->outputBytesPerLine);
|
||||
const char nComponents = 4;
|
||||
uint image_size = width * height * nComponents;
|
||||
constexpr char nComponents = 4;
|
||||
const u32 image_size = width * height * nComponents;
|
||||
|
||||
switch(current_outParam.outputColorSpace)
|
||||
{
|
||||
|
@ -541,9 +541,8 @@ error_code cellGifDecDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStre
|
|||
{
|
||||
memcpy(data.get_ptr(), image.get(), image_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case CELL_GIFDEC_ARGB:
|
||||
{
|
||||
if (bytesPerLine > width * nComponents) // Check if we need padding
|
||||
|
@ -579,9 +578,8 @@ error_code cellGifDecDecodeData(vm::ptr<GifDecoder> mainHandle, vm::cptr<GifStre
|
|||
}
|
||||
std::memcpy(data.get_ptr(), img.get(), image_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
default:
|
||||
return CELL_GIFDEC_ERROR_ARG;
|
||||
}
|
||||
|
|
|
@ -516,7 +516,7 @@ error_code cellHttpUtilEscapeUri(vm::ptr<char> out, u32 outSize, vm::cptr<u8> in
|
|||
|
||||
for (u32 pos = 0; rindex >= 0; rindex--, pos++)
|
||||
{
|
||||
char c1 = in[pos];
|
||||
const char c1 = in[pos];
|
||||
|
||||
if (false) // DAT[c1] == '\x03') // TODO
|
||||
{
|
||||
|
@ -529,7 +529,7 @@ error_code cellHttpUtilEscapeUri(vm::ptr<char> out, u32 outSize, vm::cptr<u8> in
|
|||
return CELL_HTTP_UTIL_ERROR_NO_MEMORY;
|
||||
}
|
||||
|
||||
const char* chars = "0123456789ABCDEF";
|
||||
constexpr const char* chars = "0123456789ABCDEF";
|
||||
out[out_pos++] = '%'; // 0x25
|
||||
out[out_pos++] = chars[c1 >> 4];
|
||||
out[out_pos++] = chars[c1 & 0xf];
|
||||
|
@ -618,7 +618,7 @@ error_code cellHttpUtilFormUrlEncode(vm::ptr<char> out, u32 outSize, vm::cptr<u8
|
|||
|
||||
for (u32 pos = 0; rindex >= 0; rindex--, pos++)
|
||||
{
|
||||
char c1 = in[pos];
|
||||
const char c1 = in[pos];
|
||||
|
||||
if (c1 == ' ')
|
||||
{
|
||||
|
@ -645,7 +645,7 @@ error_code cellHttpUtilFormUrlEncode(vm::ptr<char> out, u32 outSize, vm::cptr<u8
|
|||
return CELL_HTTP_UTIL_ERROR_NO_MEMORY;
|
||||
}
|
||||
|
||||
const char* chars = "0123456789ABCDEF";
|
||||
constexpr const char* chars = "0123456789ABCDEF";
|
||||
out[out_pos++] = '%'; // 0x25
|
||||
out[out_pos++] = chars[c1 >> 4];
|
||||
out[out_pos++] = chars[c1 & 0xf];
|
||||
|
@ -707,7 +707,7 @@ error_code cellHttpUtilFormUrlDecode(vm::ptr<u8> out, u32 size, vm::cptr<char> i
|
|||
for (s32 index = 0, pos = 0;; index++)
|
||||
{
|
||||
size_needed = index + 1;
|
||||
char c1 = in[pos++];
|
||||
const char c1 = in[pos++];
|
||||
|
||||
if (!c1)
|
||||
{
|
||||
|
@ -731,7 +731,7 @@ error_code cellHttpUtilFormUrlDecode(vm::ptr<u8> out, u32 size, vm::cptr<char> i
|
|||
|
||||
const auto check_char = [](b8 c)
|
||||
{
|
||||
u32 utmp = static_cast<u32>(c);
|
||||
const u32 utmp = static_cast<u32>(c);
|
||||
s32 stmp = utmp - 48;
|
||||
if (static_cast<u8>(c - 48) > 9)
|
||||
{
|
||||
|
|
|
@ -76,7 +76,7 @@ error_code cellJpgDecOpen(u32 mainHandle, vm::ptr<u32> subHandle, vm::ptr<CellJp
|
|||
case CELL_JPGDEC_FILE:
|
||||
{
|
||||
// Get file descriptor and size
|
||||
const auto real_path = vfs::get(src->fileName.get_ptr());
|
||||
const std::string real_path = vfs::get(src->fileName.get_ptr());
|
||||
fs::file file_s(real_path);
|
||||
if (!file_s) return CELL_JPGDEC_ERROR_OPEN_FILE;
|
||||
|
||||
|
@ -103,7 +103,7 @@ error_code cellJpgDecClose(u32 mainHandle, u32 subHandle)
|
|||
{
|
||||
cellJpgDec.warning("cellJpgDecOpen(mainHandle=0x%x, subHandle=0x%x)", mainHandle, subHandle);
|
||||
|
||||
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -120,15 +120,15 @@ error_code cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDe
|
|||
{
|
||||
cellJpgDec.trace("cellJpgDecReadHeader(mainHandle=0x%x, subHandle=0x%x, info=*0x%x)", mainHandle, subHandle, info);
|
||||
|
||||
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
return CELL_JPGDEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
const u32& fd = subHandle_data->fd;
|
||||
const u64& fileSize = subHandle_data->fileSize;
|
||||
const u32 fd = subHandle_data->fd;
|
||||
const u64 fileSize = subHandle_data->fileSize;
|
||||
CellJpgDecInfo& current_info = subHandle_data->info;
|
||||
|
||||
// Write the header to buffer
|
||||
|
@ -142,7 +142,7 @@ error_code cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDe
|
|||
|
||||
case CELL_JPGDEC_FILE:
|
||||
{
|
||||
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
file->file.seek(0);
|
||||
file->file.read(buffer.get(), fileSize);
|
||||
break;
|
||||
|
@ -158,12 +158,12 @@ error_code cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDe
|
|||
|
||||
u32 i = 4;
|
||||
|
||||
if(i >= fileSize)
|
||||
if (i >= fileSize)
|
||||
return CELL_JPGDEC_ERROR_HEADER;
|
||||
|
||||
u16 block_length = buffer[i] * 0xFF + buffer[i+1];
|
||||
u16 block_length = buffer[i] * 0xFF + buffer[i + 1];
|
||||
|
||||
while(true)
|
||||
while (true)
|
||||
{
|
||||
i += block_length; // Increase the file index to get to the next block
|
||||
if (i >= fileSize || // Check to protect against segmentation faults
|
||||
|
@ -172,15 +172,15 @@ error_code cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDe
|
|||
return CELL_JPGDEC_ERROR_HEADER;
|
||||
}
|
||||
|
||||
if(buffer[i+1] == 0xC0)
|
||||
if (buffer[i + 1] == 0xC0)
|
||||
break; // 0xFFC0 is the "Start of frame" marker which contains the file size
|
||||
|
||||
i += 2; // Skip the block marker
|
||||
block_length = buffer[i] * 0xFF + buffer[i+1]; // Go to the next block
|
||||
block_length = buffer[i] * 0xFF + buffer[i + 1]; // Go to the next block
|
||||
}
|
||||
|
||||
current_info.imageWidth = buffer[i+7]*0x100 + buffer[i+8];
|
||||
current_info.imageHeight = buffer[i+5]*0x100 + buffer[i+6];
|
||||
current_info.imageWidth = buffer[i + 7] * 0x100 + buffer[i + 8];
|
||||
current_info.imageHeight = buffer[i + 5] * 0x100 + buffer[i + 6];
|
||||
current_info.numComponents = 3; // Unimplemented
|
||||
current_info.colorSpace = CELL_JPG_RGB;
|
||||
|
||||
|
@ -201,7 +201,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
|
||||
dataOutInfo->status = CELL_JPGDEC_DEC_STATUS_STOP;
|
||||
|
||||
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -223,7 +223,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
|
||||
case CELL_JPGDEC_FILE:
|
||||
{
|
||||
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
file->file.seek(0);
|
||||
file->file.read(jpg.get(), fileSize);
|
||||
break;
|
||||
|
@ -267,12 +267,11 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
{
|
||||
memcpy(data.get_ptr(), image.get(), image_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case CELL_JPG_ARGB:
|
||||
{
|
||||
const int nComponents = 4;
|
||||
constexpr int nComponents = 4;
|
||||
image_size *= nComponents;
|
||||
if (bytesPerLine > width * nComponents || flip) //check if we need padding
|
||||
{
|
||||
|
@ -307,16 +306,15 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
}
|
||||
std::memcpy(data.get_ptr(), img.get(), image_size);
|
||||
}
|
||||
break;
|
||||
}
|
||||
break;
|
||||
|
||||
case CELL_JPG_GRAYSCALE:
|
||||
case CELL_JPG_YCbCr:
|
||||
case CELL_JPG_UPSAMPLE_ONLY:
|
||||
case CELL_JPG_GRAYSCALE_TO_ALPHA_RGBA:
|
||||
case CELL_JPG_GRAYSCALE_TO_ALPHA_ARGB:
|
||||
cellJpgDec.error("cellJpgDecDecodeData: Unsupported color space (%d)", current_outParam.outputColorSpace);
|
||||
break;
|
||||
break;
|
||||
|
||||
default:
|
||||
return CELL_JPGDEC_ERROR_ARG;
|
||||
|
@ -324,7 +322,7 @@ error_code cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data,
|
|||
|
||||
dataOutInfo->status = CELL_JPGDEC_DEC_STATUS_FINISH;
|
||||
|
||||
if(dataCtrlParam->outputBytesPerLine)
|
||||
if (dataCtrlParam->outputBytesPerLine)
|
||||
dataOutInfo->outputLines = static_cast<u32>(image_size / dataCtrlParam->outputBytesPerLine);
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -340,7 +338,7 @@ error_code cellJpgDecSetParameter(u32 mainHandle, u32 subHandle, vm::cptr<CellJp
|
|||
{
|
||||
cellJpgDec.trace("cellJpgDecSetParameter(mainHandle=0x%x, subHandle=0x%x, inParam=*0x%x, outParam=*0x%x)", mainHandle, subHandle, inParam, outParam);
|
||||
|
||||
const auto subHandle_data = idm::get<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = idm::get_unlocked<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
|
|
@ -144,9 +144,9 @@ error_code cellPhotoDecodeFromFile(vm::cptr<char> srcHddDir, vm::cptr<char> srcH
|
|||
const std::string vpath = fmt::format("%s/%s", srcHddDir.get_ptr(), srcHddFile.get_ptr());
|
||||
const std::string path = vfs::get(vpath);
|
||||
|
||||
if (!vpath.starts_with("/dev_hdd0") && !vpath.starts_with("/dev_hdd1"))
|
||||
if (!vpath.starts_with("/dev_hdd0") && !vpath.starts_with("/dev_hdd1") && !vpath.starts_with("/dev_bdvd"))
|
||||
{
|
||||
cellPhotoDecode.error("Source '%s' is not inside dev_hdd0 or dev_hdd1", vpath);
|
||||
cellPhotoDecode.error("Source '%s' is not inside dev_hdd0 or dev_hdd1 or dev_bdvd", vpath);
|
||||
return CELL_PHOTO_DECODE_ERROR_ACCESS_ERROR; // TODO: is this correct?
|
||||
}
|
||||
|
||||
|
|
|
@ -93,7 +93,7 @@ void pngDecReadBuffer(png_structp png_ptr, png_bytep out, png_size_t length)
|
|||
if (buffer.file)
|
||||
{
|
||||
// Get the file
|
||||
auto file = idm::get<lv2_fs_object, lv2_file>(buffer.fd);
|
||||
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(buffer.fd);
|
||||
|
||||
// Read the data
|
||||
file->file.read(out, length);
|
||||
|
|
|
@ -94,11 +94,11 @@ struct search_content_t
|
|||
ENABLE_BITWISE_SERIALIZATION;
|
||||
};
|
||||
|
||||
using content_id_type = std::pair<u64, std::shared_ptr<search_content_t>>;
|
||||
using content_id_type = std::pair<u64, shared_ptr<search_content_t>>;
|
||||
|
||||
struct content_id_map
|
||||
{
|
||||
std::unordered_map<u64, std::shared_ptr<search_content_t>> map;
|
||||
std::unordered_map<u64, shared_ptr<search_content_t>> map;
|
||||
|
||||
shared_mutex mutex;
|
||||
|
||||
|
@ -539,7 +539,7 @@ error_code cellSearchStartListSearch(CellSearchListSearchType type, CellSearchSo
|
|||
|
||||
sysutil_register_cb([=, &content_map = g_fxo->get<content_id_map>(), &search](ppu_thread& ppu) -> s32
|
||||
{
|
||||
auto curr_search = idm::get<search_object_t>(id);
|
||||
auto curr_search = idm::get_unlocked<search_object_t>(id);
|
||||
vm::var<CellSearchResultParam> resultParam;
|
||||
resultParam->searchId = id;
|
||||
resultParam->resultNum = 0; // Set again later
|
||||
|
@ -613,7 +613,7 @@ error_code cellSearchStartListSearch(CellSearchListSearchType type, CellSearchSo
|
|||
auto found = content_map.map.find(hash);
|
||||
if (found == content_map.map.end()) // content isn't yet being tracked
|
||||
{
|
||||
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
|
||||
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
|
||||
if (item_path.length() > CELL_SEARCH_PATH_LEN_MAX)
|
||||
{
|
||||
// TODO: Create mapping which will be resolved to an actual hard link in VFS by cellSearchPrepareFile
|
||||
|
@ -800,7 +800,7 @@ error_code cellSearchStartContentSearchInList(vm::cptr<CellSearchContentId> list
|
|||
|
||||
sysutil_register_cb([=, list_path = std::string(content_info->infoPath.contentPath), &search, &content_map](ppu_thread& ppu) -> s32
|
||||
{
|
||||
auto curr_search = idm::get<search_object_t>(id);
|
||||
auto curr_search = idm::get_unlocked<search_object_t>(id);
|
||||
vm::var<CellSearchResultParam> resultParam;
|
||||
resultParam->searchId = id;
|
||||
resultParam->resultNum = 0; // Set again later
|
||||
|
@ -855,7 +855,7 @@ error_code cellSearchStartContentSearchInList(vm::cptr<CellSearchContentId> list
|
|||
auto found = content_map.map.find(hash);
|
||||
if (found == content_map.map.end()) // content isn't yet being tracked
|
||||
{
|
||||
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
|
||||
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
|
||||
if (item_path.length() > CELL_SEARCH_PATH_LEN_MAX)
|
||||
{
|
||||
// Create mapping which will be resolved to an actual hard link in VFS by cellSearchPrepareFile
|
||||
|
@ -1060,7 +1060,7 @@ error_code cellSearchStartContentSearch(CellSearchContentSearchType type, CellSe
|
|||
|
||||
sysutil_register_cb([=, &content_map = g_fxo->get<content_id_map>(), &search](ppu_thread& ppu) -> s32
|
||||
{
|
||||
auto curr_search = idm::get<search_object_t>(id);
|
||||
auto curr_search = idm::get_unlocked<search_object_t>(id);
|
||||
vm::var<CellSearchResultParam> resultParam;
|
||||
resultParam->searchId = id;
|
||||
resultParam->resultNum = 0; // Set again later
|
||||
|
@ -1096,7 +1096,7 @@ error_code cellSearchStartContentSearch(CellSearchContentSearchType type, CellSe
|
|||
auto found = content_map.map.find(hash);
|
||||
if (found == content_map.map.end()) // content isn't yet being tracked
|
||||
{
|
||||
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
|
||||
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
|
||||
if (item_path.length() > CELL_SEARCH_PATH_LEN_MAX)
|
||||
{
|
||||
// Create mapping which will be resolved to an actual hard link in VFS by cellSearchPrepareFile
|
||||
|
@ -1372,7 +1372,7 @@ error_code cellSearchGetContentInfoByOffset(CellSearchId searchId, s32 offset, v
|
|||
std::memset(outContentId->data + 4, -1, CELL_SEARCH_CONTENT_ID_SIZE - 4);
|
||||
}
|
||||
|
||||
const auto searchObject = idm::get<search_object_t>(searchId);
|
||||
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
|
||||
|
||||
if (!searchObject)
|
||||
{
|
||||
|
@ -1518,7 +1518,7 @@ error_code cellSearchGetOffsetByContentId(CellSearchId searchId, vm::cptr<CellSe
|
|||
return error;
|
||||
}
|
||||
|
||||
const auto searchObject = idm::get<search_object_t>(searchId);
|
||||
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
|
||||
|
||||
if (!searchObject)
|
||||
{
|
||||
|
@ -1568,7 +1568,7 @@ error_code cellSearchGetContentIdByOffset(CellSearchId searchId, s32 offset, vm:
|
|||
std::memset(outContentId->data + 4, -1, CELL_SEARCH_CONTENT_ID_SIZE - 4);
|
||||
}
|
||||
|
||||
const auto searchObject = idm::get<search_object_t>(searchId);
|
||||
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
|
||||
|
||||
if (!searchObject)
|
||||
{
|
||||
|
@ -1663,7 +1663,7 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
|
|||
// Reset values first
|
||||
std::memset(outContext->data, 0, 4);
|
||||
|
||||
const auto searchObject = idm::get<search_object_t>(searchId);
|
||||
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
|
||||
|
||||
if (!searchObject)
|
||||
{
|
||||
|
@ -1690,17 +1690,17 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
|
|||
const auto& first_content = first_content_id.second;
|
||||
ensure(first_content);
|
||||
|
||||
const auto get_random_content = [&searchObject, &first_content]() -> std::shared_ptr<search_content_t>
|
||||
const auto get_random_content = [&searchObject, &first_content]() -> shared_ptr<search_content_t>
|
||||
{
|
||||
if (searchObject->content_ids.size() == 1)
|
||||
{
|
||||
return first_content;
|
||||
}
|
||||
|
||||
std::vector<content_id_type> result;
|
||||
std::sample(searchObject->content_ids.begin(), searchObject->content_ids.end(), std::back_inserter(result), 1, std::mt19937{std::random_device{}()});
|
||||
ensure(result.size() == 1);
|
||||
std::shared_ptr<search_content_t> content = result[0].second;
|
||||
ensure(!!content);
|
||||
shared_ptr<search_content_t> content = ensure(result[0].second);
|
||||
return content;
|
||||
};
|
||||
|
||||
|
@ -1736,7 +1736,7 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
|
|||
{
|
||||
// Select random track
|
||||
// TODO: whole playlist
|
||||
std::shared_ptr<search_content_t> content = get_random_content();
|
||||
shared_ptr<search_content_t> content = get_random_content();
|
||||
context.playlist.push_back(content->infoPath.contentPath);
|
||||
cellSearch.notice("cellSearchGetMusicSelectionContext(): Hash=%08X, Assigning random track: Type=0x%x, Path=%s", content_hash, +content->type, context.playlist.back());
|
||||
}
|
||||
|
@ -1757,7 +1757,7 @@ error_code cellSearchGetMusicSelectionContext(CellSearchId searchId, vm::cptr<Ce
|
|||
{
|
||||
// Select random track
|
||||
// TODO: whole playlist
|
||||
std::shared_ptr<search_content_t> content = get_random_content();
|
||||
shared_ptr<search_content_t> content = get_random_content();
|
||||
context.playlist.push_back(content->infoPath.contentPath);
|
||||
cellSearch.notice("cellSearchGetMusicSelectionContext(): Assigning random track: Type=0x%x, Path=%s", +content->type, context.playlist.back());
|
||||
}
|
||||
|
@ -2044,7 +2044,7 @@ error_code cellSearchCancel(CellSearchId searchId)
|
|||
{
|
||||
cellSearch.todo("cellSearchCancel(searchId=0x%x)", searchId);
|
||||
|
||||
const auto searchObject = idm::get<search_object_t>(searchId);
|
||||
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
|
||||
|
||||
if (!searchObject)
|
||||
{
|
||||
|
@ -2075,7 +2075,7 @@ error_code cellSearchEnd(CellSearchId searchId)
|
|||
return error;
|
||||
}
|
||||
|
||||
const auto searchObject = idm::get<search_object_t>(searchId);
|
||||
const auto searchObject = idm::get_unlocked<search_object_t>(searchId);
|
||||
|
||||
if (!searchObject)
|
||||
{
|
||||
|
@ -2120,7 +2120,7 @@ error_code music_selection_context::find_content_id(vm::ptr<CellSearchContentId>
|
|||
|
||||
// Search for the content that matches our current selection
|
||||
auto& content_map = g_fxo->get<content_id_map>();
|
||||
std::shared_ptr<search_content_t> found_content;
|
||||
shared_ptr<search_content_t> found_content;
|
||||
u64 hash = 0;
|
||||
|
||||
for (const std::string& track : playlist)
|
||||
|
@ -2187,7 +2187,7 @@ error_code music_selection_context::find_content_id(vm::ptr<CellSearchContentId>
|
|||
}
|
||||
|
||||
// TODO: check for actual content inside the directory
|
||||
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
|
||||
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
|
||||
curr_find->type = CELL_SEARCH_CONTENTTYPE_MUSICLIST;
|
||||
curr_find->repeat_mode = repeat_mode;
|
||||
curr_find->context_option = context_option;
|
||||
|
@ -2243,7 +2243,7 @@ error_code music_selection_context::find_content_id(vm::ptr<CellSearchContentId>
|
|||
continue;
|
||||
}
|
||||
|
||||
std::shared_ptr<search_content_t> curr_find = std::make_shared<search_content_t>();
|
||||
shared_ptr<search_content_t> curr_find = make_shared<search_content_t>();
|
||||
curr_find->type = CELL_SEARCH_CONTENTTYPE_MUSIC;
|
||||
curr_find->repeat_mode = repeat_mode;
|
||||
curr_find->context_option = context_option;
|
||||
|
|
|
@ -1265,7 +1265,7 @@ s32 _spurs::initialize(ppu_thread& ppu, vm::ptr<CellSpurs> spurs, u32 revision,
|
|||
}
|
||||
|
||||
// entry point cannot be initialized immediately because SPU LS will be rewritten by sys_spu_thread_group_start()
|
||||
//idm::get<named_thread<spu_thread>>(spurs->spus[num])->custom_task = [entry = spurs->spuImg.entry_point](spu_thread& spu)
|
||||
//idm::get_unlocked<named_thread<spu_thread>>(spurs->spus[num])->custom_task = [entry = spurs->spuImg.entry_point](spu_thread& spu)
|
||||
{
|
||||
// Disabled
|
||||
//spu.RegisterHleFunction(entry, spursKernelEntry);
|
||||
|
|
|
@ -622,7 +622,7 @@ bool spursKernel2SelectWorkload(spu_thread& spu)
|
|||
void spursKernelDispatchWorkload(spu_thread& spu, u64 widAndPollStatus)
|
||||
{
|
||||
const auto ctxt = spu._ptr<SpursKernelContext>(0x100);
|
||||
auto isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
|
||||
const bool isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
|
||||
|
||||
auto pollStatus = static_cast<u32>(widAndPollStatus);
|
||||
auto wid = static_cast<u32>(widAndPollStatus >> 32);
|
||||
|
@ -674,7 +674,7 @@ void spursKernelDispatchWorkload(spu_thread& spu, u64 widAndPollStatus)
|
|||
bool spursKernelWorkloadExit(spu_thread& spu)
|
||||
{
|
||||
const auto ctxt = spu._ptr<SpursKernelContext>(0x100);
|
||||
auto isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
|
||||
const bool isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
|
||||
|
||||
// Select next workload to run
|
||||
spu.gpr[3].clear();
|
||||
|
@ -701,7 +701,7 @@ bool spursKernelEntry(spu_thread& spu)
|
|||
ctxt->spuNum = spu.gpr[3]._u32[3];
|
||||
ctxt->spurs.set(spu.gpr[4]._u64[1]);
|
||||
|
||||
auto isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
|
||||
const bool isKernel2 = ctxt->spurs->flags1 & SF1_32_WORKLOADS ? true : false;
|
||||
|
||||
// Initialise the SPURS context to its initial values
|
||||
ctxt->dmaTagId = CELL_SPURS_KERNEL_DMA_TAG_ID;
|
||||
|
@ -785,8 +785,8 @@ void spursSysServiceIdleHandler(spu_thread& spu, SpursKernelContext* ctxt)
|
|||
}
|
||||
}
|
||||
|
||||
bool allSpusIdle = nIdlingSpus == spurs->nSpus ? true : false;
|
||||
bool exitIfNoWork = spurs->flags1 & SF1_EXIT_IF_NO_WORK ? true : false;
|
||||
const bool allSpusIdle = nIdlingSpus == spurs->nSpus;
|
||||
const bool exitIfNoWork = spurs->flags1 & SF1_EXIT_IF_NO_WORK ? true : false;
|
||||
shouldExit = allSpusIdle && exitIfNoWork;
|
||||
|
||||
// Check if any workloads can be scheduled
|
||||
|
@ -843,7 +843,7 @@ void spursSysServiceIdleHandler(spu_thread& spu, SpursKernelContext* ctxt)
|
|||
}
|
||||
}
|
||||
|
||||
bool spuIdling = spurs->spuIdling & (1 << ctxt->spuNum) ? true : false;
|
||||
const bool spuIdling = spurs->spuIdling & (1 << ctxt->spuNum) ? true : false;
|
||||
if (foundReadyWorkload && shouldExit == false)
|
||||
{
|
||||
spurs->spuIdling &= ~(1 << ctxt->spuNum);
|
||||
|
|
|
@ -659,7 +659,7 @@ extern bool check_if_vdec_contexts_exist()
|
|||
|
||||
extern void vdecEntry(ppu_thread& ppu, u32 vid)
|
||||
{
|
||||
idm::get<vdec_context>(vid)->exec(ppu, vid);
|
||||
idm::get_unlocked<vdec_context>(vid)->exec(ppu, vid);
|
||||
|
||||
ppu.state += cpu_flag::exit;
|
||||
}
|
||||
|
@ -886,7 +886,7 @@ static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb>
|
|||
}
|
||||
|
||||
// Create decoder context
|
||||
std::shared_ptr<vdec_context> vdec;
|
||||
shared_ptr<vdec_context> vdec;
|
||||
|
||||
if (std::unique_lock lock{g_fxo->get<hle_locks_t>(), std::try_to_lock})
|
||||
{
|
||||
|
@ -909,7 +909,7 @@ static error_code vdecOpen(ppu_thread& ppu, T type, U res, vm::cptr<CellVdecCb>
|
|||
ppu_execute<&sys_ppu_thread_create>(ppu, +_tid, 0x10000, vid, +res->ppuThreadPriority, +res->ppuThreadStackSize, SYS_PPU_THREAD_CREATE_INTERRUPT, +_name);
|
||||
*handle = vid;
|
||||
|
||||
const auto thrd = idm::get<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
|
||||
const auto thrd = idm::get_unlocked<named_thread<ppu_thread>>(static_cast<u32>(*_tid));
|
||||
|
||||
thrd->cmd_list
|
||||
({
|
||||
|
@ -949,7 +949,7 @@ error_code cellVdecClose(ppu_thread& ppu, u32 handle)
|
|||
return {};
|
||||
}
|
||||
|
||||
auto vdec = idm::get<vdec_context>(handle);
|
||||
auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -1003,7 +1003,7 @@ error_code cellVdecStartSeq(ppu_thread& ppu, u32 handle)
|
|||
|
||||
cellVdec.warning("cellVdecStartSeq(handle=0x%x)", handle);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -1055,7 +1055,7 @@ error_code cellVdecEndSeq(ppu_thread& ppu, u32 handle)
|
|||
|
||||
cellVdec.warning("cellVdecEndSeq(handle=0x%x)", handle);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -1088,7 +1088,7 @@ error_code cellVdecDecodeAu(ppu_thread& ppu, u32 handle, CellVdecDecodeMode mode
|
|||
|
||||
cellVdec.trace("cellVdecDecodeAu(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, +mode, auInfo);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec || !auInfo || !auInfo->size || !auInfo->startAddr)
|
||||
{
|
||||
|
@ -1136,7 +1136,7 @@ error_code cellVdecDecodeAuEx2(ppu_thread& ppu, u32 handle, CellVdecDecodeMode m
|
|||
|
||||
cellVdec.todo("cellVdecDecodeAuEx2(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, +mode, auInfo);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec || !auInfo || !auInfo->size || !auInfo->startAddr)
|
||||
{
|
||||
|
@ -1192,7 +1192,7 @@ error_code cellVdecGetPictureExt(ppu_thread& ppu, u32 handle, vm::cptr<CellVdecP
|
|||
|
||||
cellVdec.trace("cellVdecGetPictureExt(handle=0x%x, format=*0x%x, outBuff=*0x%x, arg4=*0x%x)", handle, format, outBuff, arg4);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec || !format)
|
||||
{
|
||||
|
@ -1245,7 +1245,7 @@ error_code cellVdecGetPictureExt(ppu_thread& ppu, u32 handle, vm::cptr<CellVdecP
|
|||
|
||||
if (notify)
|
||||
{
|
||||
auto vdec_ppu = idm::get<named_thread<ppu_thread>>(vdec->ppu_tid);
|
||||
auto vdec_ppu = idm::get_unlocked<named_thread<ppu_thread>>(vdec->ppu_tid);
|
||||
if (vdec_ppu) thread_ctrl::notify(*vdec_ppu);
|
||||
}
|
||||
|
||||
|
@ -1354,7 +1354,7 @@ error_code cellVdecGetPicItem(ppu_thread& ppu, u32 handle, vm::pptr<CellVdecPicI
|
|||
|
||||
cellVdec.trace("cellVdecGetPicItem(handle=0x%x, picItem=**0x%x)", handle, picItem);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec || !picItem)
|
||||
{
|
||||
|
@ -1596,7 +1596,7 @@ error_code cellVdecSetFrameRate(u32 handle, CellVdecFrameRate frameRateCode)
|
|||
{
|
||||
cellVdec.trace("cellVdecSetFrameRate(handle=0x%x, frameRateCode=0x%x)", handle, +frameRateCode);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
// 0x80 seems like a common prefix
|
||||
if (!vdec || (frameRateCode & 0xf8) != 0x80)
|
||||
|
@ -1659,7 +1659,7 @@ error_code cellVdecSetPts(u32 handle, vm::ptr<void> unk)
|
|||
{
|
||||
cellVdec.error("cellVdecSetPts(handle=0x%x, unk=*0x%x)", handle, unk);
|
||||
|
||||
const auto vdec = idm::get<vdec_context>(handle);
|
||||
const auto vdec = idm::get_unlocked<vdec_context>(handle);
|
||||
|
||||
if (!vdec || !unk)
|
||||
{
|
||||
|
|
|
@ -205,7 +205,7 @@ error_code cellVpostClose(u32 handle)
|
|||
{
|
||||
cellVpost.warning("cellVpostClose(handle=0x%x)", handle);
|
||||
|
||||
const auto vpost = idm::get<VpostInstance>(handle);
|
||||
const auto vpost = idm::get_unlocked<VpostInstance>(handle);
|
||||
|
||||
if (!vpost)
|
||||
{
|
||||
|
@ -220,7 +220,7 @@ error_code cellVpostExec(u32 handle, vm::cptr<u8> inPicBuff, vm::cptr<CellVpostC
|
|||
{
|
||||
cellVpost.trace("cellVpostExec(handle=0x%x, inPicBuff=*0x%x, ctrlParam=*0x%x, outPicBuff=*0x%x, picInfo=*0x%x)", handle, inPicBuff, ctrlParam, outPicBuff, picInfo);
|
||||
|
||||
const auto vpost = idm::get<VpostInstance>(handle);
|
||||
const auto vpost = idm::get_unlocked<VpostInstance>(handle);
|
||||
|
||||
if (!vpost)
|
||||
{
|
||||
|
|
|
@ -510,7 +510,7 @@ s32 cellSurMixerCreate(vm::cptr<CellSurMixerConfig> config)
|
|||
|
||||
libmixer.warning("*** surMixer created (ch1=%d, ch2=%d, ch6=%d, ch8=%d)", config->chStrips1, config->chStrips2, config->chStrips6, config->chStrips8);
|
||||
|
||||
//auto thread = idm::make_ptr<ppu_thread>("Surmixer Thread");
|
||||
//auto thread = idm::make_ptr<named_thread<ppu_thread>>("Surmixer Thread");
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -652,7 +652,7 @@ error_code npDrmIsAvailable(vm::cptr<u8> k_licensee_addr, vm::cptr<char> drm_pat
|
|||
std::string enc_drm_path;
|
||||
ensure(vm::read_string(drm_path.addr(), 0x100, enc_drm_path, true), "Secret access violation");
|
||||
|
||||
sceNp.warning(u8"npDrmIsAvailable(): drm_path=“%s”", enc_drm_path);
|
||||
sceNp.warning("npDrmIsAvailable(): drm_path=\"%s\"", enc_drm_path);
|
||||
|
||||
auto& npdrmkeys = g_fxo->get<loaded_npdrm_keys>();
|
||||
|
||||
|
@ -5347,7 +5347,7 @@ error_code sceNpScoreCreateTransactionCtx(s32 titleCtxId)
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto score = idm::get<score_ctx>(titleCtxId);
|
||||
auto score = idm::get_unlocked<score_ctx>(titleCtxId);
|
||||
|
||||
if (!score)
|
||||
{
|
||||
|
@ -5399,24 +5399,12 @@ error_code sceNpScoreSetTimeout(s32 ctxId, usecond_t timeout)
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const u32 idm_id = static_cast<u32>(ctxId);
|
||||
|
||||
if (idm_id >= score_transaction_ctx::id_base && idm_id < (score_transaction_ctx::id_base + score_transaction_ctx::id_count))
|
||||
if (auto trans = idm::get_unlocked<score_transaction_ctx>(ctxId))
|
||||
{
|
||||
auto trans = idm::get<score_transaction_ctx>(ctxId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
trans->timeout = timeout;
|
||||
}
|
||||
else if (idm_id >= score_ctx::id_base && idm_id < (score_ctx::id_base + score_ctx::id_count))
|
||||
else if (auto score = idm::get_unlocked<score_ctx>(ctxId))
|
||||
{
|
||||
auto score = idm::get<score_ctx>(ctxId);
|
||||
if (!ctxId)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
score->timeout = timeout;
|
||||
}
|
||||
else
|
||||
|
@ -5443,23 +5431,17 @@ error_code sceNpScoreSetPlayerCharacterId(s32 ctxId, SceNpScorePcId pcId)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
if (static_cast<u32>(ctxId) >= score_transaction_ctx::id_base)
|
||||
if (auto trans = idm::get_unlocked<score_transaction_ctx>(ctxId))
|
||||
{
|
||||
auto trans = idm::get<score_transaction_ctx>(ctxId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
trans->pcId = pcId;
|
||||
}
|
||||
else if (auto score = idm::get_unlocked<score_ctx>(ctxId))
|
||||
{
|
||||
score->pcId = pcId;
|
||||
}
|
||||
else
|
||||
{
|
||||
auto score = idm::get<score_ctx>(ctxId);
|
||||
if (!ctxId)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
score->pcId = pcId;
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -5476,7 +5458,7 @@ error_code sceNpScoreWaitAsync(s32 transId, vm::ptr<s32> result)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
auto trans = idm::get<score_transaction_ctx>(transId);
|
||||
auto trans = idm::get_unlocked<score_transaction_ctx>(transId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
|
@ -5498,7 +5480,7 @@ error_code sceNpScorePollAsync(s32 transId, vm::ptr<s32> result)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
auto trans = idm::get<score_transaction_ctx>(transId);
|
||||
auto trans = idm::get_unlocked<score_transaction_ctx>(transId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
|
@ -5515,9 +5497,9 @@ error_code sceNpScorePollAsync(s32 transId, vm::ptr<s32> result)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
std::pair<std::optional<error_code>, std::shared_ptr<score_transaction_ctx>> get_score_transaction_context(s32 transId, bool reset_transaction = true)
|
||||
std::pair<std::optional<error_code>, shared_ptr<score_transaction_ctx>> get_score_transaction_context(s32 transId, bool reset_transaction = true)
|
||||
{
|
||||
auto trans_ctx = idm::get<score_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<score_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -6217,7 +6199,7 @@ error_code sceNpScoreAbortTransaction(s32 transId)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
auto trans = idm::get<score_transaction_ctx>(transId);
|
||||
auto trans = idm::get_unlocked<score_transaction_ctx>(transId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
|
|
|
@ -1837,7 +1837,7 @@ public:
|
|||
virtual ~RecvMessageDialogBase() = default;
|
||||
|
||||
virtual error_code Exec(SceNpBasicMessageMainType type, SceNpBasicMessageRecvOptions options, SceNpBasicMessageRecvAction& recv_result, u64& chosen_msg_id) = 0;
|
||||
virtual void callback_handler(const std::shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) = 0;
|
||||
virtual void callback_handler(const shared_ptr<std::pair<std::string, message_data>> new_msg, u64 msg_id) = 0;
|
||||
|
||||
protected:
|
||||
std::shared_ptr<rpcn::rpcn_client> m_rpcn;
|
||||
|
|
|
@ -139,7 +139,7 @@ error_code sceNpSnsFbAbortHandle(u32 handle)
|
|||
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const auto sfh = idm::get<sns_fb_handle_t>(handle);
|
||||
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
|
||||
|
||||
if (!sfh)
|
||||
{
|
||||
|
@ -172,7 +172,7 @@ error_code sceNpSnsFbGetAccessToken(u32 handle, vm::cptr<SceNpSnsFbAccessTokenPa
|
|||
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const auto sfh = idm::get<sns_fb_handle_t>(handle);
|
||||
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
|
||||
|
||||
if (!sfh)
|
||||
{
|
||||
|
@ -200,7 +200,7 @@ s32 sceNpSnsFbStreamPublish(u32 handle) // add more arguments
|
|||
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const auto sfh = idm::get<sns_fb_handle_t>(handle);
|
||||
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
|
||||
|
||||
if (!sfh)
|
||||
{
|
||||
|
@ -258,7 +258,7 @@ s32 sceNpSnsFbLoadThrottle(u32 handle)
|
|||
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const auto sfh = idm::get<sns_fb_handle_t>(handle);
|
||||
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
|
||||
|
||||
if (!sfh)
|
||||
{
|
||||
|
@ -299,7 +299,7 @@ error_code sceNpSnsFbGetLongAccessToken(u32 handle, vm::cptr<SceNpSnsFbAccessTok
|
|||
return SCE_NP_SNS_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const auto sfh = idm::get<sns_fb_handle_t>(handle);
|
||||
const auto sfh = idm::get_unlocked<sns_fb_handle_t>(handle);
|
||||
|
||||
if (!sfh)
|
||||
{
|
||||
|
|
|
@ -123,7 +123,7 @@ struct sce_np_trophy_manager
|
|||
return res;
|
||||
}
|
||||
|
||||
ctxt = idm::check<trophy_context_t>(context);
|
||||
ctxt = idm::check_unlocked<trophy_context_t>(context);
|
||||
|
||||
if (!ctxt)
|
||||
{
|
||||
|
@ -144,7 +144,7 @@ struct sce_np_trophy_manager
|
|||
return res;
|
||||
}
|
||||
|
||||
const auto hndl = idm::check<trophy_handle_t>(handle);
|
||||
const auto hndl = idm::check_unlocked<trophy_handle_t>(handle);
|
||||
|
||||
if (!hndl)
|
||||
{
|
||||
|
@ -409,7 +409,7 @@ error_code sceNpTrophyAbortHandle(u32 handle)
|
|||
return SCE_NP_TROPHY_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const auto hndl = idm::check<trophy_handle_t>(handle);
|
||||
const auto hndl = idm::check_unlocked<trophy_handle_t>(handle);
|
||||
|
||||
if (!hndl)
|
||||
{
|
||||
|
@ -552,7 +552,7 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
|
|||
}
|
||||
|
||||
const auto [ctxt, error] = trophy_manager.get_context_ex(context, handle, true);
|
||||
const auto handle_ptr = idm::get<trophy_handle_t>(handle);
|
||||
const auto handle_ptr = idm::get_unlocked<trophy_handle_t>(handle);
|
||||
|
||||
if (error)
|
||||
{
|
||||
|
@ -641,7 +641,7 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
|
|||
return SCE_NP_TROPHY_ERROR_UNKNOWN_CONTEXT;
|
||||
}
|
||||
|
||||
if (handle_ptr.get() != idm::check<trophy_handle_t>(handle))
|
||||
if (handle_ptr.get() != idm::check_unlocked<trophy_handle_t>(handle))
|
||||
{
|
||||
on_error();
|
||||
return SCE_NP_TROPHY_ERROR_UNKNOWN_HANDLE;
|
||||
|
@ -716,7 +716,6 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
|
|||
|
||||
// Create a counter which is destroyed after the function ends
|
||||
const auto queued = std::make_shared<atomic_t<u32>>(0);
|
||||
std::weak_ptr<atomic_t<u32>> wkptr = queued;
|
||||
|
||||
for (auto status : statuses)
|
||||
{
|
||||
|
@ -724,12 +723,11 @@ error_code sceNpTrophyRegisterContext(ppu_thread& ppu, u32 context, u32 handle,
|
|||
*queued += status.second;
|
||||
for (s32 completed = 0; completed <= status.second; completed++)
|
||||
{
|
||||
sysutil_register_cb([statusCb, status, context, completed, arg, wkptr](ppu_thread& cb_ppu) -> s32
|
||||
sysutil_register_cb([statusCb, status, context, completed, arg, queued](ppu_thread& cb_ppu) -> s32
|
||||
{
|
||||
// TODO: it is possible that we need to check the return value here as well.
|
||||
statusCb(cb_ppu, context, status.first, completed, status.second, arg);
|
||||
|
||||
const auto queued = wkptr.lock();
|
||||
if (queued && (*queued)-- == 1)
|
||||
{
|
||||
queued->notify_one();
|
||||
|
|
|
@ -133,7 +133,7 @@ error_code sceNpTusCreateTransactionCtx(s32 titleCtxId)
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto tus = idm::get<tus_ctx>(titleCtxId);
|
||||
auto tus = idm::get_unlocked<tus_ctx>(titleCtxId);
|
||||
|
||||
if (!tus)
|
||||
{
|
||||
|
@ -185,24 +185,12 @@ error_code sceNpTusSetTimeout(s32 ctxId, u32 timeout)
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ARGUMENT;
|
||||
}
|
||||
|
||||
const u32 idm_id = static_cast<u32>(ctxId);
|
||||
|
||||
if (idm_id >= tus_transaction_ctx::id_base && idm_id < (tus_transaction_ctx::id_base + tus_transaction_ctx::id_count))
|
||||
if (auto trans = idm::get_unlocked<tus_transaction_ctx>(ctxId))
|
||||
{
|
||||
auto trans = idm::get<tus_transaction_ctx>(ctxId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
trans->timeout = timeout;
|
||||
}
|
||||
else if (idm_id >= tus_ctx::id_base && idm_id < (tus_ctx::id_base + tus_ctx::id_count))
|
||||
else if (auto tus = idm::get_unlocked<tus_ctx>(ctxId))
|
||||
{
|
||||
auto tus = idm::get<tus_ctx>(ctxId);
|
||||
if (!ctxId)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
}
|
||||
tus->timeout = timeout;
|
||||
}
|
||||
else
|
||||
|
@ -224,7 +212,7 @@ error_code sceNpTusAbortTransaction(s32 transId)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
auto trans = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
|
@ -246,7 +234,7 @@ error_code sceNpTusWaitAsync(s32 transId, vm::ptr<s32> result)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
auto trans = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
|
@ -268,7 +256,7 @@ error_code sceNpTusPollAsync(s32 transId, vm::ptr<s32> result)
|
|||
return SCE_NP_COMMUNITY_ERROR_NOT_INITIALIZED;
|
||||
}
|
||||
|
||||
auto trans = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
if (!trans)
|
||||
{
|
||||
return SCE_NP_COMMUNITY_ERROR_INVALID_ID;
|
||||
|
@ -326,7 +314,7 @@ error_code scenp_tus_set_multislot_variable(s32 transId, T targetNpId, vm::cptr<
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -413,7 +401,7 @@ error_code scenp_tus_get_multislot_variable(s32 transId, T targetNpId, vm::cptr<
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -500,7 +488,7 @@ error_code scenp_tus_get_multiuser_variable(s32 transId, T targetNpIdArray, SceN
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -599,7 +587,7 @@ error_code scenp_tus_get_friends_variable(s32 transId, SceNpTusSlotId slotId, s3
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -659,7 +647,7 @@ error_code scenp_tus_add_and_get_variable(s32 transId, T targetNpId, SceNpTusSlo
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -736,7 +724,7 @@ error_code scenp_tus_try_and_set_variable(s32 transId, T targetNpId, SceNpTusSlo
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -813,7 +801,7 @@ error_code scenp_tus_delete_multislot_variable(s32 transId, T targetNpId, vm::cp
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -885,7 +873,7 @@ error_code scenp_tus_set_data(s32 transId, T targetNpId, SceNpTusSlotId slotId,
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -957,7 +945,7 @@ error_code scenp_tus_get_data(s32 transId, T targetNpId, SceNpTusSlotId slotId,
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -1044,7 +1032,7 @@ error_code scenp_tus_get_multislot_data_status(s32 transId, T targetNpId, vm::cp
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -1131,7 +1119,7 @@ error_code scenp_tus_get_multiuser_data_status(s32 transId, T targetNpIdArray, S
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -1230,7 +1218,7 @@ error_code scenp_tus_get_friends_data_status(s32 transId, SceNpTusSlotId slotId,
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -1295,7 +1283,7 @@ error_code scenp_tus_delete_multislot_data(s32 transId, T targetNpId, vm::cptr<S
|
|||
return SCE_NP_COMMUNITY_ERROR_INVALID_ONLINE_ID;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -1337,7 +1325,7 @@ error_code sceNpTusDeleteMultiSlotDataVUserAsync(s32 transId, vm::cptr<SceNpTusV
|
|||
return scenp_tus_delete_multislot_data(transId, targetVirtualUserId, slotIdArray, arrayNum, option, true, true);
|
||||
}
|
||||
|
||||
void scenp_tss_no_file(const std::shared_ptr<tus_transaction_ctx>& trans, vm::ptr<SceNpTssDataStatus> dataStatus)
|
||||
void scenp_tss_no_file(const shared_ptr<tus_transaction_ctx>& trans, vm::ptr<SceNpTssDataStatus> dataStatus)
|
||||
{
|
||||
// TSS are files stored on PSN by developers, no dumps available atm
|
||||
std::memset(dataStatus.get_ptr(), 0, sizeof(SceNpTssDataStatus));
|
||||
|
@ -1365,7 +1353,7 @@ error_code sceNpTssGetData(s32 transId, SceNpTssSlotId slotId, vm::ptr<SceNpTssD
|
|||
return SCE_NP_COMMUNITY_ERROR_INSUFFICIENT_ARGUMENT;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
@ -1398,7 +1386,7 @@ error_code sceNpTssGetDataAsync(s32 transId, SceNpTssSlotId slotId, vm::ptr<SceN
|
|||
return SCE_NP_COMMUNITY_ERROR_INSUFFICIENT_ARGUMENT;
|
||||
}
|
||||
|
||||
auto trans_ctx = idm::get<tus_transaction_ctx>(transId);
|
||||
auto trans_ctx = idm::get_unlocked<tus_transaction_ctx>(transId);
|
||||
|
||||
if (!trans_ctx)
|
||||
{
|
||||
|
|
|
@ -51,7 +51,7 @@ void config_event_entry(ppu_thread& ppu)
|
|||
}
|
||||
|
||||
const u32 queue_id = cfg.queue_id;
|
||||
auto queue = idm::get<lv2_obj, lv2_event_queue>(queue_id);
|
||||
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(queue_id);
|
||||
|
||||
while (queue && sys_event_queue_receive(ppu, queue_id, vm::null, 0) == CELL_OK)
|
||||
{
|
||||
|
@ -81,7 +81,7 @@ void config_event_entry(ppu_thread& ppu)
|
|||
if (!queue->exists)
|
||||
{
|
||||
// Exit condition
|
||||
queue = nullptr;
|
||||
queue = null_ptr;
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -134,7 +134,7 @@ extern void send_sys_io_connect_event(usz index, u32 state)
|
|||
|
||||
if (cfg.init_ctr)
|
||||
{
|
||||
if (auto port = idm::get<lv2_obj, lv2_event_queue>(cfg.queue_id))
|
||||
if (auto port = idm::get_unlocked<lv2_obj, lv2_event_queue>(cfg.queue_id))
|
||||
{
|
||||
port->send(0, 1, index, state);
|
||||
}
|
||||
|
|
|
@ -60,7 +60,7 @@ error_code sys_mempool_create(ppu_thread& ppu, vm::ptr<sys_mempool_t> mempool, v
|
|||
auto id = idm::make<memory_pool_t>();
|
||||
*mempool = id;
|
||||
|
||||
auto memory_pool = idm::get<memory_pool_t>(id);
|
||||
auto memory_pool = idm::get_unlocked<memory_pool_t>(id);
|
||||
|
||||
memory_pool->chunk = chunk;
|
||||
memory_pool->chunk_size = chunk_size;
|
||||
|
@ -114,7 +114,7 @@ void sys_mempool_destroy(ppu_thread& ppu, sys_mempool_t mempool)
|
|||
{
|
||||
sysPrxForUser.warning("sys_mempool_destroy(mempool=%d)", mempool);
|
||||
|
||||
auto memory_pool = idm::get<memory_pool_t>(mempool);
|
||||
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
|
||||
if (memory_pool)
|
||||
{
|
||||
u32 condid = memory_pool->condid;
|
||||
|
@ -136,7 +136,7 @@ error_code sys_mempool_free_block(ppu_thread& ppu, sys_mempool_t mempool, vm::pt
|
|||
{
|
||||
sysPrxForUser.warning("sys_mempool_free_block(mempool=%d, block=*0x%x)", mempool, block);
|
||||
|
||||
auto memory_pool = idm::get<memory_pool_t>(mempool);
|
||||
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
|
||||
if (!memory_pool)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
|
@ -160,7 +160,7 @@ u64 sys_mempool_get_count(ppu_thread& ppu, sys_mempool_t mempool)
|
|||
{
|
||||
sysPrxForUser.warning("sys_mempool_get_count(mempool=%d)", mempool);
|
||||
|
||||
auto memory_pool = idm::get<memory_pool_t>(mempool);
|
||||
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
|
||||
if (!memory_pool)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
|
@ -175,7 +175,7 @@ vm::ptr<void> sys_mempool_allocate_block(ppu_thread& ppu, sys_mempool_t mempool)
|
|||
{
|
||||
sysPrxForUser.warning("sys_mempool_allocate_block(mempool=%d)", mempool);
|
||||
|
||||
auto memory_pool = idm::get<memory_pool_t>(mempool);
|
||||
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
|
||||
if (!memory_pool)
|
||||
{ // if the memory pool gets deleted-- is null, clearly it's impossible to allocate memory.
|
||||
return vm::null;
|
||||
|
@ -185,7 +185,7 @@ vm::ptr<void> sys_mempool_allocate_block(ppu_thread& ppu, sys_mempool_t mempool)
|
|||
while (memory_pool->free_blocks.empty()) // while is to guard against spurious wakeups
|
||||
{
|
||||
sys_cond_wait(ppu, memory_pool->condid, 0);
|
||||
memory_pool = idm::get<memory_pool_t>(mempool);
|
||||
memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
|
||||
if (!memory_pool) // in case spurious wake up was from delete, don't die by accessing a freed pool.
|
||||
{ // No need to unlock as if the pool is freed, the lock was freed as well.
|
||||
return vm::null;
|
||||
|
@ -202,7 +202,7 @@ vm::ptr<void> sys_mempool_try_allocate_block(ppu_thread& ppu, sys_mempool_t memp
|
|||
{
|
||||
sysPrxForUser.warning("sys_mempool_try_allocate_block(mempool=%d)", mempool);
|
||||
|
||||
auto memory_pool = idm::get<memory_pool_t>(mempool);
|
||||
auto memory_pool = idm::get_unlocked<memory_pool_t>(mempool);
|
||||
|
||||
if (!memory_pool || memory_pool->free_blocks.empty())
|
||||
{
|
||||
|
|
|
@ -1,6 +1,8 @@
|
|||
#include "stdafx.h"
|
||||
#include "PPUAnalyser.h"
|
||||
|
||||
#include "lv2/sys_sync.h"
|
||||
|
||||
#include "PPUOpcodes.h"
|
||||
#include "PPUThread.h"
|
||||
|
||||
|
@ -37,7 +39,8 @@ void fmt_class_string<bs_t<ppu_attr>>::format(std::string& out, u64 arg)
|
|||
format_bitset(out, arg, "[", ",", "]", &fmt_class_string<ppu_attr>::format);
|
||||
}
|
||||
|
||||
void ppu_module::validate(u32 reloc)
|
||||
template <>
|
||||
void ppu_module<lv2_obj>::validate(u32 reloc)
|
||||
{
|
||||
// Load custom PRX configuration if available
|
||||
if (fs::file yml{path + ".yml"})
|
||||
|
@ -529,7 +532,8 @@ namespace ppu_patterns
|
|||
};
|
||||
}
|
||||
|
||||
bool ppu_module::analyse(u32 lib_toc, u32 entry, const u32 sec_end, const std::vector<u32>& applied, const std::vector<u32>& exported_funcs, std::function<bool()> check_aborted)
|
||||
template <>
|
||||
bool ppu_module<lv2_obj>::analyse(u32 lib_toc, u32 entry, const u32 sec_end, const std::vector<u32>& applied, const std::vector<u32>& exported_funcs, std::function<bool()> check_aborted)
|
||||
{
|
||||
if (segs.empty())
|
||||
{
|
||||
|
|
|
@ -72,8 +72,11 @@ struct ppu_segment
|
|||
};
|
||||
|
||||
// PPU Module Information
|
||||
struct ppu_module
|
||||
template <typename Type>
|
||||
struct ppu_module : public Type
|
||||
{
|
||||
using Type::Type;
|
||||
|
||||
ppu_module() noexcept = default;
|
||||
|
||||
ppu_module(const ppu_module&) = delete;
|
||||
|
@ -177,11 +180,16 @@ struct ppu_module
|
|||
}
|
||||
};
|
||||
|
||||
struct main_ppu_module : public ppu_module
|
||||
template <typename T>
|
||||
struct main_ppu_module : public ppu_module<T>
|
||||
{
|
||||
u32 elf_entry{};
|
||||
u32 seg0_code_end{};
|
||||
std::vector<u32> applied_patches;
|
||||
|
||||
// Disable inherited savestate ordering
|
||||
void save(utils::serial&) = delete;
|
||||
static constexpr double savestate_init_pos = double{};
|
||||
};
|
||||
|
||||
// Aux
|
||||
|
|
|
@ -576,7 +576,7 @@ extern const std::unordered_map<u32, std::string_view>& get_exported_function_na
|
|||
}
|
||||
|
||||
// Resolve relocations for variable/function linkage.
|
||||
static void ppu_patch_refs(const ppu_module& _module, std::vector<ppu_reloc>* out_relocs, u32 fref, u32 faddr)
|
||||
static void ppu_patch_refs(const ppu_module<lv2_obj>& _module, std::vector<ppu_reloc>* out_relocs, u32 fref, u32 faddr)
|
||||
{
|
||||
struct ref_t
|
||||
{
|
||||
|
@ -704,7 +704,7 @@ extern bool ppu_register_library_lock(std::string_view libname, bool lock_lib)
|
|||
}
|
||||
|
||||
// Load and register exports; return special exports found (nameless module)
|
||||
static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link, u32 exports_start, u32 exports_end, bool for_observing_callbacks = false, std::vector<u32>* funcs = nullptr, std::basic_string<char>* loaded_flags = nullptr)
|
||||
static auto ppu_load_exports(const ppu_module<lv2_obj>& _module, ppu_linkage_info* link, u32 exports_start, u32 exports_end, bool for_observing_callbacks = false, std::vector<u32>* funcs = nullptr, std::basic_string<char>* loaded_flags = nullptr)
|
||||
{
|
||||
std::unordered_map<u32, u32> result;
|
||||
|
||||
|
@ -803,7 +803,7 @@ static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link,
|
|||
const auto fnids = +lib.nids;
|
||||
const auto faddrs = +lib.addrs;
|
||||
|
||||
u32 previous_rtoc = umax;
|
||||
u64 previous_rtoc = umax;
|
||||
|
||||
// Get functions
|
||||
for (u32 i = 0, end = lib.num_func; i < end; i++)
|
||||
|
@ -816,21 +816,22 @@ static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link,
|
|||
{
|
||||
if (previous_rtoc == fdata.rtoc)
|
||||
{
|
||||
ppu_loader.notice("**** %s export: [%s] (0x%08x) at 0x%x [at:0x%x] rtoc=same", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, fdata.addr);
|
||||
// Shortened printing, replacement string is 10 characters as 0x%08x
|
||||
ppu_loader.notice("**** %s export: (0x%08x) at 0x%07x [at:0x%07x, rtoc:same-above]: %s", module_name, fnid, faddr, fdata.addr, ppu_get_function_name(module_name, fnid));
|
||||
}
|
||||
else
|
||||
{
|
||||
previous_rtoc = fdata.rtoc;
|
||||
ppu_loader.notice("**** %s export: [%s] (0x%08x) at 0x%x [at:0x%x] rtoc=0x%x", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, fdata.addr, fdata.rtoc);
|
||||
ppu_loader.notice("**** %s export: (0x%08x) at 0x%07x [at:0x%07x, rtoc:0x%08x]: %s", module_name, fnid, faddr, fdata.addr, fdata.rtoc, ppu_get_function_name(module_name, fnid));
|
||||
}
|
||||
}
|
||||
else if (fptr)
|
||||
{
|
||||
ppu_loader.error("**** %s export: [%s] (0x%08x) at 0x%x [Invalid Function Address: 0x%x!]", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr, fdata.addr);
|
||||
ppu_loader.error("**** %s export: (0x%08x) at 0x%07x [Invalid Function Address: 0x%07x!]: '%s'", module_name, fnid, faddr, fdata.addr, ppu_get_function_name(module_name, fnid));
|
||||
}
|
||||
else
|
||||
{
|
||||
ppu_loader.warning("**** %s export: [%s] (0x%08x) at 0x%x [Illegal Descriptor Address!]", module_name, ppu_get_function_name(module_name, fnid), fnid, faddr);
|
||||
ppu_loader.warning("**** %s export: (0x%08x) at 0x%07x [Illegal Descriptor Address!]: '%s'", module_name, fnid, faddr, ppu_get_function_name(module_name, fnid));
|
||||
}
|
||||
|
||||
if (funcs)
|
||||
|
@ -938,7 +939,7 @@ static auto ppu_load_exports(const ppu_module& _module, ppu_linkage_info* link,
|
|||
return result;
|
||||
}
|
||||
|
||||
static auto ppu_load_imports(const ppu_module& _module, std::vector<ppu_reloc>& relocs, ppu_linkage_info* link, u32 imports_start, u32 imports_end)
|
||||
static auto ppu_load_imports(const ppu_module<lv2_obj>& _module, std::vector<ppu_reloc>& relocs, ppu_linkage_info* link, u32 imports_start, u32 imports_end)
|
||||
{
|
||||
std::unordered_map<u32, void*> result;
|
||||
|
||||
|
@ -1030,10 +1031,10 @@ static auto ppu_load_imports(const ppu_module& _module, std::vector<ppu_reloc>&
|
|||
// For _sys_prx_register_module
|
||||
void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<char>& loaded_flags)
|
||||
{
|
||||
auto& _main = g_fxo->get<main_ppu_module>();
|
||||
auto& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
|
||||
auto& link = g_fxo->get<ppu_linkage_info>();
|
||||
|
||||
ppu_module vm_all_fake_module{};
|
||||
ppu_module<lv2_obj> vm_all_fake_module{};
|
||||
vm_all_fake_module.segs.emplace_back(ppu_segment{0x10000, 0 - 0x10000u, 1 /*LOAD*/, 0, 0 - 0x1000u, vm::base(0x10000)});
|
||||
vm_all_fake_module.addr_to_seg_index.emplace(0x10000, 0);
|
||||
|
||||
|
@ -1130,7 +1131,7 @@ void init_ppu_functions(utils::serial* ar, bool full = false)
|
|||
}
|
||||
}
|
||||
|
||||
static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment& seg)
|
||||
static void ppu_check_patch_spu_images(const ppu_module<lv2_obj>& mod, const ppu_segment& seg)
|
||||
{
|
||||
if (!seg.size)
|
||||
{
|
||||
|
@ -1139,7 +1140,7 @@ static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment&
|
|||
|
||||
const bool is_firmware = mod.path.starts_with(vfs::get("/dev_flash/"));
|
||||
|
||||
const auto _main = g_fxo->try_get<main_ppu_module>();
|
||||
const auto _main = g_fxo->try_get<main_ppu_module<lv2_obj>>();
|
||||
|
||||
const std::string_view seg_view{ensure(mod.get_ptr<char>(seg.addr)), seg.size};
|
||||
|
||||
|
@ -1430,10 +1431,10 @@ static void ppu_check_patch_spu_images(const ppu_module& mod, const ppu_segment&
|
|||
}
|
||||
}
|
||||
|
||||
void try_spawn_ppu_if_exclusive_program(const ppu_module& m)
|
||||
void try_spawn_ppu_if_exclusive_program(const ppu_module<lv2_obj>& m)
|
||||
{
|
||||
// If only PRX/OVL has been loaded at Emu.BootGame(), launch a single PPU thread so its memory can be viewed
|
||||
if (Emu.IsReady() && g_fxo->get<main_ppu_module>().segs.empty() && !Emu.DeserialManager())
|
||||
if (Emu.IsReady() && g_fxo->get<main_ppu_module<lv2_obj>>().segs.empty() && !Emu.DeserialManager())
|
||||
{
|
||||
ppu_thread_params p
|
||||
{
|
||||
|
@ -1521,15 +1522,15 @@ const char* get_prx_name_by_cia(u32 addr)
|
|||
return nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
|
||||
shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
|
||||
{
|
||||
if (elf != elf_error::ok)
|
||||
{
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
|
||||
// Create new PRX object
|
||||
const auto prx = !ar && !virtual_load ? idm::make_ptr<lv2_obj, lv2_prx>() : std::make_shared<lv2_prx>();
|
||||
const auto prx = !ar && !virtual_load ? idm::make_ptr<lv2_obj, lv2_prx>() : make_shared<lv2_prx>();
|
||||
|
||||
// Access linkage information object
|
||||
auto& link = g_fxo->get<ppu_linkage_info>();
|
||||
|
@ -2054,7 +2055,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
|
|||
init_ppu_functions(ar, false);
|
||||
|
||||
// Set for delayed initialization in ppu_initialize()
|
||||
auto& _main = g_fxo->get<main_ppu_module>();
|
||||
auto& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
|
||||
|
||||
// Access linkage information object
|
||||
auto& link = g_fxo->get<ppu_linkage_info>();
|
||||
|
@ -2080,7 +2081,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
|
|||
|
||||
struct on_fatal_error
|
||||
{
|
||||
ppu_module& _main;
|
||||
ppu_module<lv2_obj>& _main;
|
||||
bool errored = true;
|
||||
|
||||
~on_fatal_error()
|
||||
|
@ -2498,7 +2499,7 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
|
|||
}
|
||||
|
||||
// Initialize process
|
||||
std::vector<std::shared_ptr<lv2_prx>> loaded_modules;
|
||||
std::vector<shared_ptr<lv2_prx>> loaded_modules;
|
||||
|
||||
// Module list to load at startup
|
||||
std::set<std::string> load_libs;
|
||||
|
@ -2778,11 +2779,11 @@ bool ppu_load_exec(const ppu_exec_object& elf, bool virtual_load, const std::str
|
|||
return true;
|
||||
}
|
||||
|
||||
std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
|
||||
std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object& elf, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar)
|
||||
{
|
||||
if (elf != elf_error::ok)
|
||||
{
|
||||
return {nullptr, CELL_ENOENT};
|
||||
return {null_ptr, CELL_ENOENT};
|
||||
}
|
||||
|
||||
// Access linkage information object
|
||||
|
@ -2804,12 +2805,12 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
|
|||
if (!r.valid() || !r.inside(addr_range::start_length(0x30000000, 0x10000000)))
|
||||
{
|
||||
// TODO: Check error and if there's a better way to error check
|
||||
return {nullptr, CELL_ENOEXEC};
|
||||
return {null_ptr, CELL_ENOEXEC};
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::shared_ptr<lv2_overlay> ovlm = std::make_shared<lv2_overlay>();
|
||||
shared_ptr<lv2_overlay> ovlm = make_shared<lv2_overlay>();
|
||||
|
||||
// Set path (TODO)
|
||||
ovlm->name = path.substr(path.find_last_of('/') + 1);
|
||||
|
@ -2859,7 +2860,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
|
|||
if (!vm::check_addr(addr, vm::page_readable, size))
|
||||
{
|
||||
ppu_loader.error("ppu_load_overlay(): Archived PPU overlay memory has not been found! (addr=0x%x, memsz=0x%x)", addr, size);
|
||||
return {nullptr, CELL_EABORT};
|
||||
return {null_ptr, CELL_EABORT};
|
||||
}
|
||||
}
|
||||
else if (!vm::get(vm::any, 0x30000000)->falloc(addr, size))
|
||||
|
@ -2873,7 +2874,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
|
|||
}
|
||||
|
||||
// TODO: Check error code, maybe disallow more than one overlay instance completely
|
||||
return {nullptr, CELL_EBUSY};
|
||||
return {null_ptr, CELL_EBUSY};
|
||||
}
|
||||
|
||||
// Store only LOAD segments (TODO)
|
||||
|
@ -3088,7 +3089,7 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
|
|||
return !!(cpu->state & cpu_flag::exit);
|
||||
}))
|
||||
{
|
||||
return {nullptr, CellError{CELL_CANCEL + 0u}};
|
||||
return {null_ptr, CellError{CELL_CANCEL + 0u}};
|
||||
}
|
||||
|
||||
// Validate analyser results (not required)
|
||||
|
@ -3105,11 +3106,11 @@ std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_ex
|
|||
|
||||
bool ppu_load_rel_exec(const ppu_rel_object& elf)
|
||||
{
|
||||
ppu_module relm{};
|
||||
ppu_module<lv2_obj> relm{};
|
||||
|
||||
struct on_fatal_error
|
||||
{
|
||||
ppu_module& relm;
|
||||
ppu_module<lv2_obj>& relm;
|
||||
bool errored = true;
|
||||
|
||||
~on_fatal_error()
|
||||
|
|
|
@ -174,13 +174,13 @@ bool serialize<ppu_thread::cr_bits>(utils::serial& ar, typename ppu_thread::cr_b
|
|||
}
|
||||
|
||||
extern void ppu_initialize();
|
||||
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
|
||||
extern bool ppu_initialize(const ppu_module& info, bool check_only = false, u64 file_size = 0);
|
||||
static void ppu_initialize2(class jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module& whole_module);
|
||||
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release = false);
|
||||
extern bool ppu_initialize(const ppu_module<lv2_obj>& info, bool check_only = false, u64 file_size = 0);
|
||||
static void ppu_initialize2(class jit_compiler& jit, const ppu_module<lv2_obj>& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module<lv2_obj>& whole_module);
|
||||
extern bool ppu_load_exec(const ppu_exec_object&, bool virtual_load, const std::string&, utils::serial* = nullptr);
|
||||
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* = nullptr);
|
||||
extern std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* = nullptr);
|
||||
extern void ppu_unload_prx(const lv2_prx&);
|
||||
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64 file_offset, utils::serial* = nullptr);
|
||||
extern shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64 file_offset, utils::serial* = nullptr);
|
||||
extern void ppu_execute_syscall(ppu_thread& ppu, u64 code);
|
||||
static void ppu_break(ppu_thread&, ppu_opcode_t, be_t<u32>*, ppu_intrp_func*);
|
||||
|
||||
|
@ -550,7 +550,7 @@ u32 ppu_read_mmio_aware_u32(u8* vm_base, u32 eal)
|
|||
if (eal >= RAW_SPU_BASE_ADDR)
|
||||
{
|
||||
// RawSPU MMIO
|
||||
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -578,7 +578,7 @@ void ppu_write_mmio_aware_u32(u8* vm_base, u32 eal, u32 value)
|
|||
if (eal >= RAW_SPU_BASE_ADDR)
|
||||
{
|
||||
// RawSPU MMIO
|
||||
auto thread = idm::get<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
auto thread = idm::get_unlocked<named_thread<spu_thread>>(spu_thread::find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -3450,7 +3450,7 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||
{
|
||||
if (count > 20000 && g_cfg.core.perf_report) [[unlikely]]
|
||||
{
|
||||
perf_log.warning(u8"STCX: took too long: %.3fµs (%u c)", count / (utils::get_tsc_freq() / 1000'000.), count);
|
||||
perf_log.warning("STCX: took too long: %.3fus (%u c)", count / (utils::get_tsc_freq() / 1000'000.), count);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -3837,7 +3837,7 @@ extern fs::file make_file_view(fs::file&& _file, u64 offset, u64 max_size = umax
|
|||
return file;
|
||||
}
|
||||
|
||||
extern void ppu_finalize(const ppu_module& info, bool force_mem_release)
|
||||
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release)
|
||||
{
|
||||
if (info.segs.empty())
|
||||
{
|
||||
|
@ -3885,7 +3885,7 @@ extern void ppu_finalize(const ppu_module& info, bool force_mem_release)
|
|||
#endif
|
||||
}
|
||||
|
||||
extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module*>* loaded_modules)
|
||||
extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_module<lv2_obj>*>* loaded_modules)
|
||||
{
|
||||
if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm)
|
||||
{
|
||||
|
@ -3978,7 +3978,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
|
|||
|
||||
if (loaded_modules)
|
||||
{
|
||||
if (std::any_of(loaded_modules->begin(), loaded_modules->end(), [&](ppu_module* obj)
|
||||
if (std::any_of(loaded_modules->begin(), loaded_modules->end(), [&](ppu_module<lv2_obj>* obj)
|
||||
{
|
||||
return obj->name == entry.name;
|
||||
}))
|
||||
|
@ -4311,7 +4311,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
|
|||
|
||||
auto slice = possible_exec_file_paths.pop_all();
|
||||
|
||||
auto main_module = std::move(g_fxo->get<main_ppu_module>());
|
||||
auto main_module = std::move(g_fxo->get<main_ppu_module<lv2_obj>>());
|
||||
|
||||
for (; slice; slice.pop_front(), g_progr_fdone++)
|
||||
{
|
||||
|
@ -4348,7 +4348,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
|
|||
{
|
||||
while (exec_err == elf_error::ok)
|
||||
{
|
||||
main_ppu_module& _main = g_fxo->get<main_ppu_module>();
|
||||
main_ppu_module<lv2_obj>& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
|
||||
_main = {};
|
||||
|
||||
auto current_cache = std::move(g_fxo->get<spu_cache>());
|
||||
|
@ -4393,7 +4393,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
|
|||
ppu_log.notice("Failed to precompile '%s' as executable (%s)", path, exec_err);
|
||||
}
|
||||
|
||||
g_fxo->get<main_ppu_module>() = std::move(main_module);
|
||||
g_fxo->get<main_ppu_module<lv2_obj>>() = std::move(main_module);
|
||||
g_fxo->get<spu_cache>().collect_funcs_to_precompile = true;
|
||||
Emu.ConfigurePPUCache();
|
||||
});
|
||||
|
@ -4403,7 +4403,7 @@ extern void ppu_precompile(std::vector<std::string>& dir_queue, std::vector<ppu_
|
|||
|
||||
extern void ppu_initialize()
|
||||
{
|
||||
if (!g_fxo->is_init<main_ppu_module>())
|
||||
if (!g_fxo->is_init<main_ppu_module<lv2_obj>>())
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
@ -4413,7 +4413,7 @@ extern void ppu_initialize()
|
|||
return;
|
||||
}
|
||||
|
||||
auto& _main = g_fxo->get<main_ppu_module>();
|
||||
auto& _main = g_fxo->get<main_ppu_module<lv2_obj>>();
|
||||
|
||||
std::optional<scoped_progress_dialog> progress_dialog(std::in_place, get_localized_string(localized_string_id::PROGRESS_DIALOG_ANALYZING_PPU_EXECUTABLE));
|
||||
|
||||
|
@ -4436,7 +4436,7 @@ extern void ppu_initialize()
|
|||
compile_main = ppu_initialize(_main, true);
|
||||
}
|
||||
|
||||
std::vector<ppu_module*> module_list;
|
||||
std::vector<ppu_module<lv2_obj>*> module_list;
|
||||
|
||||
const std::string firmware_sprx_path = vfs::get("/dev_flash/sys/external/");
|
||||
|
||||
|
@ -4541,7 +4541,7 @@ extern void ppu_initialize()
|
|||
}
|
||||
}
|
||||
|
||||
bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
|
||||
bool ppu_initialize(const ppu_module<lv2_obj>& info, bool check_only, u64 file_size)
|
||||
{
|
||||
if (g_cfg.core.ppu_decoder != ppu_decoder_type::llvm)
|
||||
{
|
||||
|
@ -4668,7 +4668,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
|
|||
const u32 reloc = info.relocs.empty() ? 0 : ::at32(info.segs, 0).addr;
|
||||
|
||||
// Info sent to threads
|
||||
std::vector<std::pair<std::string, ppu_module>> workload;
|
||||
std::vector<std::pair<std::string, ppu_module<lv2_obj>>> workload;
|
||||
|
||||
// Info to load to main JIT instance (true - compiled)
|
||||
std::vector<std::pair<std::string, bool>> link_workload;
|
||||
|
@ -4733,7 +4733,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
|
|||
}
|
||||
|
||||
// Copy module information (TODO: optimize)
|
||||
ppu_module part;
|
||||
ppu_module<lv2_obj> part;
|
||||
part.copy_part(info);
|
||||
part.funcs.reserve(16000);
|
||||
|
||||
|
@ -5035,15 +5035,15 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
|
|||
struct thread_op
|
||||
{
|
||||
atomic_t<u32>& work_cv;
|
||||
std::vector<std::pair<std::string, ppu_module>>& workload;
|
||||
const ppu_module& main_module;
|
||||
std::vector<std::pair<std::string, ppu_module<lv2_obj>>>& workload;
|
||||
const ppu_module<lv2_obj>& main_module;
|
||||
const std::string& cache_path;
|
||||
const cpu_thread* cpu;
|
||||
|
||||
std::unique_lock<decltype(jit_core_allocator::sem)> core_lock;
|
||||
|
||||
thread_op(atomic_t<u32>& work_cv, std::vector<std::pair<std::string, ppu_module>>& workload
|
||||
, const cpu_thread* cpu, const ppu_module& main_module, const std::string& cache_path, decltype(jit_core_allocator::sem)& sem) noexcept
|
||||
thread_op(atomic_t<u32>& work_cv, std::vector<std::pair<std::string, ppu_module<lv2_obj>>>& workload
|
||||
, const cpu_thread* cpu, const ppu_module<lv2_obj>& main_module, const std::string& cache_path, decltype(jit_core_allocator::sem)& sem) noexcept
|
||||
|
||||
: work_cv(work_cv)
|
||||
, workload(workload)
|
||||
|
@ -5257,7 +5257,7 @@ bool ppu_initialize(const ppu_module& info, bool check_only, u64 file_size)
|
|||
#endif
|
||||
}
|
||||
|
||||
static void ppu_initialize2(jit_compiler& jit, const ppu_module& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module& whole_module)
|
||||
static void ppu_initialize2(jit_compiler& jit, const ppu_module<lv2_obj>& module_part, const std::string& cache_path, const std::string& obj_name, const ppu_module<lv2_obj>& whole_module)
|
||||
{
|
||||
#ifdef LLVM_AVAILABLE
|
||||
using namespace llvm;
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
|
||||
#include "Emu/system_config.h"
|
||||
#include "Emu/Cell/Common.h"
|
||||
#include "Emu/Cell/lv2/sys_sync.h"
|
||||
#include "PPUTranslator.h"
|
||||
#include "PPUThread.h"
|
||||
#include "SPUThread.h"
|
||||
|
@ -28,7 +29,7 @@ const ppu_decoder<PPUTranslator> s_ppu_decoder;
|
|||
extern const ppu_decoder<ppu_itype> g_ppu_itype;
|
||||
extern const ppu_decoder<ppu_iname> g_ppu_iname;
|
||||
|
||||
PPUTranslator::PPUTranslator(LLVMContext& context, Module* _module, const ppu_module& info, ExecutionEngine& engine)
|
||||
PPUTranslator::PPUTranslator(LLVMContext& context, Module* _module, const ppu_module<lv2_obj>& info, ExecutionEngine& engine)
|
||||
: cpu_translator(_module, false)
|
||||
, m_info(info)
|
||||
, m_pure_attr()
|
||||
|
@ -322,7 +323,7 @@ Function* PPUTranslator::Translate(const ppu_function& info)
|
|||
return m_function;
|
||||
}
|
||||
|
||||
Function* PPUTranslator::GetSymbolResolver(const ppu_module& info)
|
||||
Function* PPUTranslator::GetSymbolResolver(const ppu_module<lv2_obj>& info)
|
||||
{
|
||||
m_function = cast<Function>(m_module->getOrInsertFunction("__resolve_symbols", FunctionType::get(get_type<void>(), { get_type<u8*>(), get_type<u64>() }, false)).getCallee());
|
||||
|
||||
|
|
|
@ -8,10 +8,15 @@
|
|||
|
||||
#include "util/types.hpp"
|
||||
|
||||
template <typename T>
|
||||
struct ppu_module;
|
||||
|
||||
struct lv2_obj;
|
||||
|
||||
class PPUTranslator final : public cpu_translator
|
||||
{
|
||||
// PPU Module
|
||||
const ppu_module& m_info;
|
||||
const ppu_module<lv2_obj>& m_info;
|
||||
|
||||
// Relevant relocations
|
||||
std::map<u64, const ppu_reloc*> m_relocs;
|
||||
|
@ -331,7 +336,7 @@ public:
|
|||
// Handle compilation errors
|
||||
void CompilationError(const std::string& error);
|
||||
|
||||
PPUTranslator(llvm::LLVMContext& context, llvm::Module* _module, const ppu_module& info, llvm::ExecutionEngine& engine);
|
||||
PPUTranslator(llvm::LLVMContext& context, llvm::Module* _module, const ppu_module<lv2_obj>& info, llvm::ExecutionEngine& engine);
|
||||
~PPUTranslator();
|
||||
|
||||
// Get thread context struct type
|
||||
|
@ -339,7 +344,7 @@ public:
|
|||
|
||||
// Parses PPU opcodes and translate them into LLVM IR
|
||||
llvm::Function* Translate(const ppu_function& info);
|
||||
llvm::Function* GetSymbolResolver(const ppu_module& info);
|
||||
llvm::Function* GetSymbolResolver(const ppu_module<lv2_obj>& info);
|
||||
|
||||
void MFVSCR(ppu_opcode_t op);
|
||||
void MTVSCR(ppu_opcode_t op);
|
||||
|
|
|
@ -4666,35 +4666,44 @@ public:
|
|||
return zshuffle(std::forward<TA>(a), 15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
|
||||
}
|
||||
|
||||
template <typename T, typename U>
|
||||
static llvm_calli<u8[16], T, U> rotqbybi(T&& a, U&& b)
|
||||
{
|
||||
return {"spu_rotqbybi", {std::forward<T>(a), std::forward<U>(b)}};
|
||||
}
|
||||
|
||||
void ROTQBYBI(spu_opcode_t op)
|
||||
{
|
||||
const auto a = get_vr<u8[16]>(op.ra);
|
||||
|
||||
// Data with swapped endian from a load instruction
|
||||
if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok)
|
||||
register_intrinsic("spu_rotqbybi", [&](llvm::CallInst* ci)
|
||||
{
|
||||
const auto sc = build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
|
||||
const auto sh = sc + (splat_scalar(get_vr<u8[16]>(op.rb)) >> 3);
|
||||
const auto a = value<u8[16]>(ci->getOperand(0));
|
||||
const auto b = value<u8[16]>(ci->getOperand(1));
|
||||
|
||||
// Data with swapped endian from a load instruction
|
||||
if (auto [ok, as] = match_expr(a, byteswap(match<u8[16]>())); ok)
|
||||
{
|
||||
const auto sc = build<u8[16]>(15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0);
|
||||
const auto sh = sc + (splat_scalar(b) >> 3);
|
||||
|
||||
if (m_use_avx512_icl)
|
||||
{
|
||||
return eval(vpermb(as, sh));
|
||||
}
|
||||
|
||||
return eval(pshufb(as, (sh & 0xf)));
|
||||
}
|
||||
const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
||||
const auto sh = sc - (splat_scalar(b) >> 3);
|
||||
|
||||
if (m_use_avx512_icl)
|
||||
{
|
||||
set_vr(op.rt, vpermb(as, sh));
|
||||
return;
|
||||
return eval(vpermb(a, sh));
|
||||
}
|
||||
|
||||
set_vr(op.rt, pshufb(as, (sh & 0xf)));
|
||||
return;
|
||||
}
|
||||
const auto sc = build<u8[16]>(0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15);
|
||||
const auto sh = sc - (splat_scalar(get_vr<u8[16]>(op.rb)) >> 3);
|
||||
return eval(pshufb(a, (sh & 0xf)));
|
||||
});
|
||||
|
||||
if (m_use_avx512_icl)
|
||||
{
|
||||
set_vr(op.rt, vpermb(a, sh));
|
||||
return;
|
||||
}
|
||||
|
||||
set_vr(op.rt, pshufb(a, (sh & 0xf)));
|
||||
set_vr(op.rt, rotqbybi(get_vr<u8[16]>(op.ra), get_vr<u8[16]>(op.rb)));
|
||||
}
|
||||
|
||||
void ROTQMBYBI(spu_opcode_t op)
|
||||
|
@ -4813,6 +4822,39 @@ public:
|
|||
void ROTQBI(spu_opcode_t op)
|
||||
{
|
||||
const auto a = get_vr(op.ra);
|
||||
const auto ax = get_vr<u8[16]>(op.ra);
|
||||
const auto bx = get_vr<u8[16]>(op.rb);
|
||||
|
||||
// Combined bit and bytes shift
|
||||
if (auto [ok, v0, v1] = match_expr(ax, rotqbybi(match<u8[16]>(), match<u8[16]>())); ok && v1.eq(bx))
|
||||
{
|
||||
const auto b32 = get_vr<s32[4]>(op.rb);
|
||||
|
||||
// Is the rotate less than 31 bits?
|
||||
if (auto k = get_known_bits(b32); (k.Zero & 0x60) == 0x60u)
|
||||
{
|
||||
const auto b = splat_scalar(get_vr(op.rb));
|
||||
set_vr(op.rt, fshl(bitcast<u32[4]>(v0), zshuffle(bitcast<u32[4]>(v0), 3, 0, 1, 2), b));
|
||||
return;
|
||||
}
|
||||
|
||||
// Inverted shift count
|
||||
if (auto [ok1, v10, v11] = match_expr(b32, match<s32[4]>() - match<s32[4]>()); ok1)
|
||||
{
|
||||
if (auto [ok2, data] = get_const_vector(v10.value, m_pos); ok2)
|
||||
{
|
||||
if ((data & v128::from32p(0x7f)) == v128{})
|
||||
{
|
||||
if (auto k = get_known_bits(v11); (k.Zero & 0x60) == 0x60u)
|
||||
{
|
||||
set_vr(op.rt, fshr(zshuffle(bitcast<u32[4]>(v0), 1, 2, 3, 0), bitcast<u32[4]>(v0), splat_scalar(bitcast<u32[4]>(v11))));
|
||||
return;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const auto b = splat_scalar(get_vr(op.rb) & 0x7);
|
||||
set_vr(op.rt, fshl(a, zshuffle(a, 3, 0, 1, 2), b));
|
||||
}
|
||||
|
|
|
@ -2415,7 +2415,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
|
|||
if (eal < SYS_SPU_THREAD_BASE_LOW)
|
||||
{
|
||||
// RawSPU MMIO
|
||||
auto thread = idm::get<named_thread<spu_thread>>(find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
auto thread = idm::get_unlocked<named_thread<spu_thread>>(find_raw_spu((eal - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET));
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -3837,7 +3837,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
|
|||
|
||||
if (count2 > 20000 && g_cfg.core.perf_report) [[unlikely]]
|
||||
{
|
||||
perf_log.warning(u8"PUTLLC: took too long: %.3fµs (%u c) (addr=0x%x) (S)", count2 / (utils::get_tsc_freq() / 1000'000.), count2, addr);
|
||||
perf_log.warning("PUTLLC: took too long: %.3fus (%u c) (addr=0x%x) (S)", count2 / (utils::get_tsc_freq() / 1000'000.), count2, addr);
|
||||
}
|
||||
|
||||
if (ok)
|
||||
|
@ -3872,7 +3872,7 @@ bool spu_thread::do_putllc(const spu_mfc_cmd& args)
|
|||
{
|
||||
if (count > 20000 && g_cfg.core.perf_report) [[unlikely]]
|
||||
{
|
||||
perf_log.warning(u8"PUTLLC: took too long: %.3fµs (%u c) (addr = 0x%x)", count / (utils::get_tsc_freq() / 1000'000.), count, addr);
|
||||
perf_log.warning("PUTLLC: took too long: %.3fus (%u c) (addr = 0x%x)", count / (utils::get_tsc_freq() / 1000'000.), count, addr);
|
||||
}
|
||||
|
||||
break;
|
||||
|
@ -4087,7 +4087,7 @@ void do_cell_atomic_128_store(u32 addr, const void* to_write)
|
|||
|
||||
if (result > 20000 && g_cfg.core.perf_report) [[unlikely]]
|
||||
{
|
||||
perf_log.warning(u8"STORE128: took too long: %.3fµs (%u c) (addr=0x%x)", result / (utils::get_tsc_freq() / 1000'000.), result, addr);
|
||||
perf_log.warning("STORE128: took too long: %.3fus (%u c) (addr=0x%x)", result / (utils::get_tsc_freq() / 1000'000.), result, addr);
|
||||
}
|
||||
|
||||
static_cast<void>(cpu->test_stopped());
|
||||
|
@ -6007,7 +6007,7 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
|
|||
|
||||
spu_function_logger logger(*this, "sys_spu_thread_send_event");
|
||||
|
||||
std::shared_ptr<lv2_event_queue> queue;
|
||||
shared_ptr<lv2_event_queue> queue;
|
||||
{
|
||||
std::lock_guard lock(group->mutex);
|
||||
|
||||
|
@ -6059,7 +6059,7 @@ bool spu_thread::set_ch_value(u32 ch, u32 value)
|
|||
|
||||
spu_function_logger logger(*this, "sys_spu_thread_throw_event");
|
||||
|
||||
std::shared_ptr<lv2_event_queue> queue;
|
||||
shared_ptr<lv2_event_queue> queue;
|
||||
{
|
||||
std::lock_guard lock{group->mutex};
|
||||
queue = this->spup[spup];
|
||||
|
@ -6447,7 +6447,7 @@ bool spu_thread::stop_and_signal(u32 code)
|
|||
return true;
|
||||
}
|
||||
|
||||
auto get_queue = [this](u32 spuq) -> const std::shared_ptr<lv2_event_queue>&
|
||||
auto get_queue = [this](u32 spuq) -> const shared_ptr<lv2_event_queue>&
|
||||
{
|
||||
for (auto& v : this->spuq)
|
||||
{
|
||||
|
@ -6460,7 +6460,7 @@ bool spu_thread::stop_and_signal(u32 code)
|
|||
}
|
||||
}
|
||||
|
||||
static const std::shared_ptr<lv2_event_queue> empty;
|
||||
static const shared_ptr<lv2_event_queue> empty;
|
||||
return empty;
|
||||
};
|
||||
|
||||
|
@ -6523,7 +6523,7 @@ bool spu_thread::stop_and_signal(u32 code)
|
|||
|
||||
spu_function_logger logger(*this, "sys_spu_thread_receive_event");
|
||||
|
||||
std::shared_ptr<lv2_event_queue> queue;
|
||||
shared_ptr<lv2_event_queue> queue;
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
@ -6665,7 +6665,7 @@ bool spu_thread::stop_and_signal(u32 code)
|
|||
|
||||
spu_log.trace("sys_spu_thread_tryreceive_event(spuq=0x%x)", spuq);
|
||||
|
||||
std::shared_ptr<lv2_event_queue> queue;
|
||||
shared_ptr<lv2_event_queue> queue;
|
||||
|
||||
reader_lock{group->mutex}, queue = get_queue(spuq);
|
||||
|
||||
|
|
|
@ -453,7 +453,7 @@ struct spu_int_ctrl_t
|
|||
atomic_t<u64> mask;
|
||||
atomic_t<u64> stat;
|
||||
|
||||
std::shared_ptr<struct lv2_int_tag> tag;
|
||||
shared_ptr<struct lv2_int_tag> tag;
|
||||
|
||||
void set(u64 ints);
|
||||
|
||||
|
@ -755,8 +755,8 @@ public:
|
|||
atomic_t<status_npc_sync_var> status_npc{};
|
||||
std::array<spu_int_ctrl_t, 3> int_ctrl{}; // SPU Class 0, 1, 2 Interrupt Management
|
||||
|
||||
std::array<std::pair<u32, std::shared_ptr<lv2_event_queue>>, 32> spuq{}; // Event Queue Keys for SPU Thread
|
||||
std::shared_ptr<lv2_event_queue> spup[64]; // SPU Ports
|
||||
std::array<std::pair<u32, shared_ptr<lv2_event_queue>>, 32> spuq{}; // Event Queue Keys for SPU Thread
|
||||
shared_ptr<lv2_event_queue> spup[64]; // SPU Ports
|
||||
spu_channel exit_status{}; // Threaded SPU exit status (not a channel, but the interface fits)
|
||||
atomic_t<u32> last_exit_status; // Value to be written in exit_status after checking group termination
|
||||
lv2_spu_group* const group; // SPU Thread Group (access by the spu threads in the group only! From other threads obtain a shared pointer to group using group ID)
|
||||
|
|
|
@ -14,11 +14,21 @@
|
|||
|
||||
LOG_CHANNEL(sys_cond);
|
||||
|
||||
lv2_cond::lv2_cond(utils::serial& ar)
|
||||
lv2_cond::lv2_cond(utils::serial& ar) noexcept
|
||||
: key(ar)
|
||||
, name(ar)
|
||||
, mtx_id(ar)
|
||||
, mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
|
||||
, mutex(idm::check_unlocked<lv2_obj, lv2_mutex>(mtx_id))
|
||||
, _mutex(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)) // May be nullptr
|
||||
{
|
||||
}
|
||||
|
||||
lv2_cond::lv2_cond(u64 key, u64 name, u32 mtx_id, shared_ptr<lv2_obj> mutex0) noexcept
|
||||
: key(key)
|
||||
, name(name)
|
||||
, mtx_id(mtx_id)
|
||||
, mutex(static_cast<lv2_mutex*>(mutex0.get()))
|
||||
, _mutex(mutex0)
|
||||
{
|
||||
}
|
||||
|
||||
|
@ -49,7 +59,7 @@ CellError lv2_cond::on_id_create()
|
|||
{
|
||||
if (!mutex)
|
||||
{
|
||||
mutex = ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id));
|
||||
_mutex = static_cast<shared_ptr<lv2_obj>>(ensure(idm::get_unlocked<lv2_obj, lv2_mutex>(mtx_id)));
|
||||
}
|
||||
|
||||
// Defer function
|
||||
|
@ -59,10 +69,9 @@ CellError lv2_cond::on_id_create()
|
|||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_cond::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_cond::load(utils::serial& ar)
|
||||
{
|
||||
auto cond = std::make_shared<lv2_cond>(ar);
|
||||
return lv2_obj::load(cond->key, cond);
|
||||
return load_func(make_shared<lv2_cond>(ar));
|
||||
}
|
||||
|
||||
void lv2_cond::save(utils::serial& ar)
|
||||
|
@ -76,7 +85,7 @@ error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id,
|
|||
|
||||
sys_cond.trace("sys_cond_create(cond_id=*0x%x, mutex_id=0x%x, attr=*0x%x)", cond_id, mutex_id, attr);
|
||||
|
||||
auto mutex = idm::get<lv2_obj, lv2_mutex>(mutex_id);
|
||||
auto mutex = idm::get_unlocked<lv2_obj, lv2_mutex>(mutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -94,7 +103,7 @@ error_code sys_cond_create(ppu_thread& ppu, vm::ptr<u32> cond_id, u32 mutex_id,
|
|||
|
||||
if (const auto error = lv2_obj::create<lv2_cond>(_attr.pshared, ipc_key, _attr.flags, [&]
|
||||
{
|
||||
return std::make_shared<lv2_cond>(
|
||||
return make_single<lv2_cond>(
|
||||
ipc_key,
|
||||
_attr.name_u64,
|
||||
mutex_id,
|
||||
|
|
|
@ -26,19 +26,14 @@ struct lv2_cond final : lv2_obj
|
|||
const u64 name;
|
||||
const u32 mtx_id;
|
||||
|
||||
std::shared_ptr<lv2_mutex> mutex; // Associated Mutex
|
||||
lv2_mutex* mutex; // Associated Mutex
|
||||
shared_ptr<lv2_obj> _mutex;
|
||||
ppu_thread* sq{};
|
||||
|
||||
lv2_cond(u64 key, u64 name, u32 mtx_id, std::shared_ptr<lv2_mutex> mutex)
|
||||
: key(key)
|
||||
, name(name)
|
||||
, mtx_id(mtx_id)
|
||||
, mutex(std::move(mutex))
|
||||
{
|
||||
}
|
||||
lv2_cond(u64 key, u64 name, u32 mtx_id, shared_ptr<lv2_obj> mutex0) noexcept;
|
||||
|
||||
lv2_cond(utils::serial& ar);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
lv2_cond(utils::serial& ar) noexcept;
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
|
||||
CellError on_id_create();
|
||||
|
|
|
@ -101,18 +101,37 @@ void lv2_config::initialize()
|
|||
lv2_config_service::create(SYS_CONFIG_SERVICE_PADMANAGER2, 0, 1, 0, hid_info, 0x1a)->notify();
|
||||
}
|
||||
|
||||
void lv2_config::add_service_event(const std::shared_ptr<lv2_config_service_event>& event)
|
||||
void lv2_config::add_service_event(shared_ptr<lv2_config_service_event> event)
|
||||
{
|
||||
std::lock_guard lock(m_mutex);
|
||||
events.emplace(event->id, event);
|
||||
events.emplace(event->id, std::move(event));
|
||||
}
|
||||
|
||||
void lv2_config::remove_service_event(u32 id)
|
||||
{
|
||||
shared_ptr<lv2_config_service_event> ptr;
|
||||
|
||||
std::lock_guard lock(m_mutex);
|
||||
events.erase(id);
|
||||
|
||||
if (auto it = events.find(id); it != events.end())
|
||||
{
|
||||
ptr = std::move(it->second);
|
||||
events.erase(it);
|
||||
}
|
||||
}
|
||||
|
||||
lv2_config_service_event& lv2_config_service_event::operator=(thread_state s) noexcept
|
||||
{
|
||||
if (s == thread_state::finished)
|
||||
{
|
||||
if (auto global = g_fxo->try_get<lv2_config>())
|
||||
{
|
||||
global->remove_service_event(id);
|
||||
}
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
// LV2 Config Service Listener
|
||||
bool lv2_config_service_listener::check_service(const lv2_config_service& service) const
|
||||
|
@ -140,13 +159,13 @@ bool lv2_config_service_listener::check_service(const lv2_config_service& servic
|
|||
return true;
|
||||
}
|
||||
|
||||
bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_service_event>& event)
|
||||
bool lv2_config_service_listener::notify(const shared_ptr<lv2_config_service_event>& event)
|
||||
{
|
||||
service_events.emplace_back(event);
|
||||
return event->notify();
|
||||
}
|
||||
|
||||
bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_service>& service)
|
||||
bool lv2_config_service_listener::notify(const shared_ptr<lv2_config_service>& service)
|
||||
{
|
||||
if (!check_service(*service))
|
||||
return false;
|
||||
|
@ -158,7 +177,7 @@ bool lv2_config_service_listener::notify(const std::shared_ptr<lv2_config_servic
|
|||
|
||||
void lv2_config_service_listener::notify_all()
|
||||
{
|
||||
std::vector<std::shared_ptr<lv2_config_service>> services;
|
||||
std::vector<shared_ptr<lv2_config_service>> services;
|
||||
|
||||
// Grab all events
|
||||
idm::select<lv2_config_service>([&](u32 /*id*/, lv2_config_service& service)
|
||||
|
@ -170,7 +189,7 @@ void lv2_config_service_listener::notify_all()
|
|||
});
|
||||
|
||||
// Sort services by timestamp
|
||||
sort(services.begin(), services.end(), [](const std::shared_ptr<lv2_config_service>& s1, const std::shared_ptr<lv2_config_service>& s2)
|
||||
sort(services.begin(), services.end(), [](const shared_ptr<lv2_config_service>& s1, const shared_ptr<lv2_config_service>& s2)
|
||||
{
|
||||
return s1->timestamp < s2->timestamp;
|
||||
});
|
||||
|
@ -198,9 +217,9 @@ void lv2_config_service::unregister()
|
|||
|
||||
void lv2_config_service::notify() const
|
||||
{
|
||||
std::vector<std::shared_ptr<lv2_config_service_listener>> listeners;
|
||||
std::vector<shared_ptr<lv2_config_service_listener>> listeners;
|
||||
|
||||
auto sptr = wkptr.lock();
|
||||
const shared_ptr<lv2_config_service> sptr = get_shared_ptr();
|
||||
|
||||
idm::select<lv2_config_service_listener>([&](u32 /*id*/, lv2_config_service_listener& listener)
|
||||
{
|
||||
|
@ -210,13 +229,14 @@ void lv2_config_service::notify() const
|
|||
|
||||
for (auto& listener : listeners)
|
||||
{
|
||||
listener->notify(this->get_shared_ptr());
|
||||
listener->notify(sptr);
|
||||
}
|
||||
}
|
||||
|
||||
bool lv2_config_service_event::notify() const
|
||||
{
|
||||
const auto _handle = handle.lock();
|
||||
const auto _handle = handle;
|
||||
|
||||
if (!_handle)
|
||||
{
|
||||
return false;
|
||||
|
@ -259,7 +279,7 @@ error_code sys_config_open(u32 equeue_hdl, vm::ptr<u32> out_config_hdl)
|
|||
sys_config.trace("sys_config_open(equeue_hdl=0x%x, out_config_hdl=*0x%x)", equeue_hdl, out_config_hdl);
|
||||
|
||||
// Find queue with the given ID
|
||||
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_hdl);
|
||||
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_hdl);
|
||||
if (!queue)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
|
@ -303,7 +323,7 @@ error_code sys_config_get_service_event(u32 config_hdl, u32 event_id, vm::ptr<sy
|
|||
sys_config.trace("sys_config_get_service_event(config_hdl=0x%x, event_id=0x%llx, dst=*0x%llx, size=0x%llx)", config_hdl, event_id, dst, size);
|
||||
|
||||
// Find sys_config handle object with the given ID
|
||||
const auto cfg = idm::get<lv2_config_handle>(config_hdl);
|
||||
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
|
||||
if (!cfg)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
|
@ -335,7 +355,7 @@ error_code sys_config_add_service_listener(u32 config_hdl, sys_config_service_id
|
|||
sys_config.trace("sys_config_add_service_listener(config_hdl=0x%x, service_id=0x%llx, min_verbosity=0x%llx, in=*0x%x, size=%lld, type=0x%llx, out_listener_hdl=*0x%x)", config_hdl, service_id, min_verbosity, in, size, type, out_listener_hdl);
|
||||
|
||||
// Find sys_config handle object with the given ID
|
||||
auto cfg = idm::get<lv2_config_handle>(config_hdl);
|
||||
auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
|
||||
if (!cfg)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
|
@ -383,7 +403,7 @@ error_code sys_config_register_service(u32 config_hdl, sys_config_service_id ser
|
|||
sys_config.trace("sys_config_register_service(config_hdl=0x%x, service_id=0x%llx, user_id=0x%llx, verbosity=0x%llx, data_but=*0x%llx, size=%lld, out_service_hdl=*0x%llx)", config_hdl, service_id, user_id, verbosity, data_buf, size, out_service_hdl);
|
||||
|
||||
// Find sys_config handle object with the given ID
|
||||
const auto cfg = idm::get<lv2_config_handle>(config_hdl);
|
||||
const auto cfg = idm::get_unlocked<lv2_config_handle>(config_hdl);
|
||||
if (!cfg)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
|
|
|
@ -3,6 +3,9 @@
|
|||
#include <map>
|
||||
#include <list>
|
||||
|
||||
#include "util/atomic.hpp"
|
||||
#include "util/shared_ptr.hpp"
|
||||
|
||||
|
||||
/*
|
||||
* sys_config is a "subscription-based data storage API"
|
||||
|
@ -133,30 +136,30 @@ class lv2_config
|
|||
shared_mutex m_mutex;
|
||||
|
||||
// Map of LV2 Service Events
|
||||
std::unordered_map<u32, std::weak_ptr<lv2_config_service_event>> events;
|
||||
std::unordered_map<u32, shared_ptr<lv2_config_service_event>> events;
|
||||
|
||||
public:
|
||||
void initialize();
|
||||
|
||||
// Service Events
|
||||
void add_service_event(const std::shared_ptr<lv2_config_service_event>& event);
|
||||
void add_service_event(shared_ptr<lv2_config_service_event> event);
|
||||
void remove_service_event(u32 id);
|
||||
|
||||
std::shared_ptr<lv2_config_service_event> find_event(u32 id)
|
||||
shared_ptr<lv2_config_service_event> find_event(u32 id)
|
||||
{
|
||||
reader_lock lock(m_mutex);
|
||||
|
||||
const auto it = events.find(id);
|
||||
|
||||
if (it == events.cend())
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
|
||||
if (auto event = it->second.lock())
|
||||
if (it->second)
|
||||
{
|
||||
return event;
|
||||
return it->second;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -175,33 +178,35 @@ private:
|
|||
u32 idm_id;
|
||||
|
||||
// queue for service/io event notifications
|
||||
const std::weak_ptr<lv2_event_queue> queue;
|
||||
const shared_ptr<lv2_event_queue> queue;
|
||||
|
||||
bool send_queue_event(u64 source, u64 d1, u64 d2, u64 d3) const
|
||||
{
|
||||
if (auto sptr = queue.lock())
|
||||
if (auto sptr = queue)
|
||||
{
|
||||
return sptr->send(source, d1, d2, d3) == 0;
|
||||
}
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
public:
|
||||
// Constructors (should not be used directly)
|
||||
lv2_config_handle(std::weak_ptr<lv2_event_queue>&& _queue)
|
||||
lv2_config_handle(shared_ptr<lv2_event_queue> _queue) noexcept
|
||||
: queue(std::move(_queue))
|
||||
{}
|
||||
{
|
||||
}
|
||||
|
||||
// Factory
|
||||
template <typename... Args>
|
||||
static std::shared_ptr<lv2_config_handle> create(Args&&... args)
|
||||
static shared_ptr<lv2_config_handle> create(Args&&... args)
|
||||
{
|
||||
if (auto cfg = idm::make_ptr<lv2_config_handle>(std::forward<Args>(args)...))
|
||||
{
|
||||
cfg->idm_id = idm::last_id();
|
||||
return cfg;
|
||||
}
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
|
||||
// Notify event queue for this handle
|
||||
|
@ -225,7 +230,6 @@ public:
|
|||
private:
|
||||
// IDM data
|
||||
u32 idm_id;
|
||||
std::weak_ptr<lv2_config_service> wkptr;
|
||||
|
||||
// Whether this service is currently registered or not
|
||||
bool registered = true;
|
||||
|
@ -240,27 +244,27 @@ public:
|
|||
const std::vector<u8> data;
|
||||
|
||||
// Constructors (should not be used directly)
|
||||
lv2_config_service(sys_config_service_id _id, u64 _user_id, u64 _verbosity, u32 _padding, const u8 _data[], usz size)
|
||||
lv2_config_service(sys_config_service_id _id, u64 _user_id, u64 _verbosity, u32 _padding, const u8* _data, usz size) noexcept
|
||||
: timestamp(get_system_time())
|
||||
, id(_id)
|
||||
, user_id(_user_id)
|
||||
, verbosity(_verbosity)
|
||||
, padding(_padding)
|
||||
, data(&_data[0], &_data[size])
|
||||
{}
|
||||
{
|
||||
}
|
||||
|
||||
// Factory
|
||||
template <typename... Args>
|
||||
static std::shared_ptr<lv2_config_service> create(Args&&... args)
|
||||
static shared_ptr<lv2_config_service> create(Args&&... args)
|
||||
{
|
||||
if (auto service = idm::make_ptr<lv2_config_service>(std::forward<Args>(args)...))
|
||||
{
|
||||
service->wkptr = service;
|
||||
service->idm_id = idm::last_id();
|
||||
return service;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
|
||||
// Registration
|
||||
|
@ -272,7 +276,7 @@ public:
|
|||
|
||||
// Utilities
|
||||
usz get_size() const { return sizeof(sys_config_service_event_t)-1 + data.size(); }
|
||||
std::shared_ptr<lv2_config_service> get_shared_ptr () const { return wkptr.lock(); }
|
||||
shared_ptr<lv2_config_service> get_shared_ptr () const { return idm::get_unlocked<lv2_config_service>(idm_id); }
|
||||
u32 get_id() const { return idm_id; }
|
||||
};
|
||||
|
||||
|
@ -290,14 +294,13 @@ public:
|
|||
private:
|
||||
// IDM data
|
||||
u32 idm_id;
|
||||
std::weak_ptr<lv2_config_service_listener> wkptr;
|
||||
|
||||
// The service listener owns the service events - service events will not be freed as long as their corresponding listener exists
|
||||
// This has been confirmed to be the case in realhw
|
||||
std::vector<std::shared_ptr<lv2_config_service_event>> service_events;
|
||||
std::weak_ptr<lv2_config_handle> handle;
|
||||
std::vector<shared_ptr<lv2_config_service_event>> service_events;
|
||||
shared_ptr<lv2_config_handle> handle;
|
||||
|
||||
bool notify(const std::shared_ptr<lv2_config_service_event>& event);
|
||||
bool notify(const shared_ptr<lv2_config_service_event>& event);
|
||||
|
||||
public:
|
||||
const sys_config_service_id service_id;
|
||||
|
@ -307,8 +310,8 @@ public:
|
|||
const std::vector<u8> data;
|
||||
|
||||
// Constructors (should not be used directly)
|
||||
lv2_config_service_listener(std::shared_ptr<lv2_config_handle>& _handle, sys_config_service_id _service_id, u64 _min_verbosity, sys_config_service_listener_type _type, const u8 _data[], usz size)
|
||||
: handle(_handle)
|
||||
lv2_config_service_listener(shared_ptr<lv2_config_handle> _handle, sys_config_service_id _service_id, u64 _min_verbosity, sys_config_service_listener_type _type, const u8* _data, usz size) noexcept
|
||||
: handle(std::move(_handle))
|
||||
, service_id(_service_id)
|
||||
, min_verbosity(_min_verbosity)
|
||||
, type(_type)
|
||||
|
@ -317,30 +320,29 @@ public:
|
|||
|
||||
// Factory
|
||||
template <typename... Args>
|
||||
static std::shared_ptr<lv2_config_service_listener> create(Args&&... args)
|
||||
static shared_ptr<lv2_config_service_listener> create(Args&&... args)
|
||||
{
|
||||
if (auto listener = idm::make_ptr<lv2_config_service_listener>(std::forward<Args>(args)...))
|
||||
{
|
||||
listener->wkptr = listener;
|
||||
listener->idm_id = idm::last_id();
|
||||
return listener;
|
||||
}
|
||||
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
|
||||
// Check whether service matches
|
||||
bool check_service(const lv2_config_service& service) const;
|
||||
|
||||
// Register new event, and notify queue
|
||||
bool notify(const std::shared_ptr<lv2_config_service>& service);
|
||||
bool notify(const shared_ptr<lv2_config_service>& service);
|
||||
|
||||
// (Re-)notify about all still-registered past events
|
||||
void notify_all();
|
||||
|
||||
// Utilities
|
||||
u32 get_id() const { return idm_id; }
|
||||
std::shared_ptr<lv2_config_service_listener> get_shared_ptr() const { return wkptr.lock(); }
|
||||
shared_ptr<lv2_config_service_listener> get_shared_ptr() const { return idm::get_unlocked<lv2_config_service_listener>(idm_id); }
|
||||
};
|
||||
|
||||
/*
|
||||
|
@ -363,30 +365,24 @@ public:
|
|||
|
||||
// Note: Events hold a shared_ptr to their corresponding service - services only get freed once there are no more pending service events
|
||||
// This has been confirmed to be the case in realhw
|
||||
const std::weak_ptr<lv2_config_handle> handle;
|
||||
const std::shared_ptr<lv2_config_service> service;
|
||||
const shared_ptr<lv2_config_handle> handle;
|
||||
const shared_ptr<lv2_config_service> service;
|
||||
const lv2_config_service_listener& listener;
|
||||
|
||||
// Constructors (should not be used directly)
|
||||
lv2_config_service_event(const std::weak_ptr<lv2_config_handle>& _handle, const std::shared_ptr<lv2_config_service>& _service, const lv2_config_service_listener& _listener)
|
||||
: id(get_next_id())
|
||||
, handle(_handle)
|
||||
, service(_service)
|
||||
, listener(_listener)
|
||||
{}
|
||||
|
||||
lv2_config_service_event(const std::weak_ptr<lv2_config_handle>&& _handle, const std::shared_ptr<lv2_config_service>&& _service, const lv2_config_service_listener& _listener)
|
||||
lv2_config_service_event(shared_ptr<lv2_config_handle> _handle, shared_ptr<lv2_config_service> _service, const lv2_config_service_listener& _listener) noexcept
|
||||
: id(get_next_id())
|
||||
, handle(std::move(_handle))
|
||||
, service(std::move(_service))
|
||||
, listener(_listener)
|
||||
{}
|
||||
{
|
||||
}
|
||||
|
||||
// Factory
|
||||
template <typename... Args>
|
||||
static std::shared_ptr<lv2_config_service_event> create(Args&&... args)
|
||||
static shared_ptr<lv2_config_service_event> create(Args&&... args)
|
||||
{
|
||||
auto ev = std::make_shared<lv2_config_service_event>(std::forward<Args>(args)...);
|
||||
auto ev = make_shared<lv2_config_service_event>(std::forward<Args>(args)...);
|
||||
|
||||
g_fxo->get<lv2_config>().add_service_event(ev);
|
||||
|
||||
|
@ -394,13 +390,9 @@ public:
|
|||
}
|
||||
|
||||
// Destructor
|
||||
~lv2_config_service_event()
|
||||
{
|
||||
if (auto global = g_fxo->try_get<lv2_config>())
|
||||
{
|
||||
global->remove_service_event(id);
|
||||
}
|
||||
}
|
||||
lv2_config_service_event& operator=(thread_state s) noexcept;
|
||||
|
||||
~lv2_config_service_event() noexcept = default;
|
||||
|
||||
// Notify queue that this event exists
|
||||
bool notify() const;
|
||||
|
|
|
@ -35,10 +35,10 @@ lv2_event_queue::lv2_event_queue(utils::serial& ar) noexcept
|
|||
ar(events);
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_event_queue::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_event_queue::load(utils::serial& ar)
|
||||
{
|
||||
auto queue = std::make_shared<lv2_event_queue>(ar);
|
||||
return lv2_obj::load(queue->key, queue);
|
||||
auto queue = make_shared<lv2_event_queue>(ar);
|
||||
return [ptr = lv2_obj::load(queue->key, queue)](void* storage) { *static_cast<shared_ptr<lv2_obj>*>(storage) = ptr; };
|
||||
}
|
||||
|
||||
void lv2_event_queue::save(utils::serial& ar)
|
||||
|
@ -57,13 +57,13 @@ void lv2_event_queue::save_ptr(utils::serial& ar, lv2_event_queue* q)
|
|||
ar(q->id);
|
||||
}
|
||||
|
||||
std::shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue, std::string_view msg)
|
||||
shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, shared_ptr<lv2_event_queue>& queue, std::string_view msg)
|
||||
{
|
||||
const u32 id = ar.pop<u32>();
|
||||
|
||||
if (!id)
|
||||
{
|
||||
return nullptr;
|
||||
return {};
|
||||
}
|
||||
|
||||
if (auto q = idm::get_unlocked<lv2_obj, lv2_event_queue>(id))
|
||||
|
@ -89,7 +89,7 @@ std::shared_ptr<lv2_event_queue> lv2_event_queue::load_ptr(utils::serial& ar, st
|
|||
});
|
||||
|
||||
// Null until resolved
|
||||
return nullptr;
|
||||
return {};
|
||||
}
|
||||
|
||||
lv2_event_port::lv2_event_port(utils::serial& ar)
|
||||
|
@ -106,7 +106,7 @@ void lv2_event_port::save(utils::serial& ar)
|
|||
lv2_event_queue::save_ptr(ar, queue.get());
|
||||
}
|
||||
|
||||
std::shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
|
||||
shared_ptr<lv2_event_queue> lv2_event_queue::find(u64 ipc_key)
|
||||
{
|
||||
if (ipc_key == SYS_EVENT_QUEUE_LOCAL)
|
||||
{
|
||||
|
@ -238,7 +238,7 @@ error_code sys_event_queue_create(cpu_thread& cpu, vm::ptr<u32> equeue_id, vm::p
|
|||
|
||||
if (const auto error = lv2_obj::create<lv2_event_queue>(pshared, ipc_key, flags, [&]()
|
||||
{
|
||||
return std::make_shared<lv2_event_queue>(protocol, type, size, name, ipc_key);
|
||||
return make_shared<lv2_event_queue>(protocol, type, size, name, ipc_key);
|
||||
}))
|
||||
{
|
||||
return error;
|
||||
|
@ -394,7 +394,7 @@ error_code sys_event_queue_tryreceive(ppu_thread& ppu, u32 equeue_id, vm::ptr<sy
|
|||
|
||||
sys_event.trace("sys_event_queue_tryreceive(equeue_id=0x%x, event_array=*0x%x, size=%d, number=*0x%x)", equeue_id, event_array, size, number);
|
||||
|
||||
const auto queue = idm::get<lv2_obj, lv2_event_queue>(equeue_id);
|
||||
const auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(equeue_id);
|
||||
|
||||
if (!queue)
|
||||
{
|
||||
|
|
|
@ -100,10 +100,10 @@ struct lv2_event_queue final : public lv2_obj
|
|||
lv2_event_queue(u32 protocol, s32 type, s32 size, u64 name, u64 ipc_key) noexcept;
|
||||
|
||||
lv2_event_queue(utils::serial& ar) noexcept;
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
static void save_ptr(utils::serial&, lv2_event_queue*);
|
||||
static std::shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, std::shared_ptr<lv2_event_queue>& queue, std::string_view msg = {});
|
||||
static shared_ptr<lv2_event_queue> load_ptr(utils::serial& ar, shared_ptr<lv2_event_queue>& queue, std::string_view msg = {});
|
||||
|
||||
CellError send(lv2_event event, bool* notified_thread = nullptr, lv2_event_port* port = nullptr);
|
||||
|
||||
|
@ -113,7 +113,7 @@ struct lv2_event_queue final : public lv2_obj
|
|||
}
|
||||
|
||||
// Get event queue by its global key
|
||||
static std::shared_ptr<lv2_event_queue> find(u64 ipc_key);
|
||||
static shared_ptr<lv2_event_queue> find(u64 ipc_key);
|
||||
};
|
||||
|
||||
struct lv2_event_port final : lv2_obj
|
||||
|
@ -124,7 +124,7 @@ struct lv2_event_port final : lv2_obj
|
|||
const u64 name; // Event source (generated from id and process id if not set)
|
||||
|
||||
atomic_t<usz> is_busy = 0; // Counts threads waiting on event sending
|
||||
std::shared_ptr<lv2_event_queue> queue; // Event queue this port is connected to
|
||||
shared_ptr<lv2_event_queue> queue; // Event queue this port is connected to
|
||||
|
||||
lv2_event_port(s32 type, u64 name)
|
||||
: type(type)
|
||||
|
|
|
@ -22,10 +22,9 @@ lv2_event_flag::lv2_event_flag(utils::serial& ar)
|
|||
ar(pattern);
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_event_flag::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_event_flag::load(utils::serial& ar)
|
||||
{
|
||||
auto eflag = std::make_shared<lv2_event_flag>(ar);
|
||||
return lv2_obj::load(eflag->key, eflag);
|
||||
return load_func(make_shared<lv2_event_flag>(ar));
|
||||
}
|
||||
|
||||
void lv2_event_flag::save(utils::serial& ar)
|
||||
|
@ -66,7 +65,7 @@ error_code sys_event_flag_create(ppu_thread& ppu, vm::ptr<u32> id, vm::ptr<sys_e
|
|||
|
||||
if (const auto error = lv2_obj::create<lv2_event_flag>(_attr.pshared, ipc_key, _attr.flags, [&]
|
||||
{
|
||||
return std::make_shared<lv2_event_flag>(
|
||||
return make_shared<lv2_event_flag>(
|
||||
_attr.protocol,
|
||||
ipc_key,
|
||||
_attr.type,
|
||||
|
@ -330,7 +329,7 @@ error_code sys_event_flag_set(cpu_thread& cpu, u32 id, u64 bitptn)
|
|||
// Warning: may be called from SPU thread.
|
||||
sys_event_flag.trace("sys_event_flag_set(id=0x%x, bitptn=0x%llx)", id, bitptn);
|
||||
|
||||
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
|
||||
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
|
||||
|
||||
if (!flag)
|
||||
{
|
||||
|
@ -502,7 +501,7 @@ error_code sys_event_flag_cancel(ppu_thread& ppu, u32 id, vm::ptr<u32> num)
|
|||
|
||||
if (num) *num = 0;
|
||||
|
||||
const auto flag = idm::get<lv2_obj, lv2_event_flag>(id);
|
||||
const auto flag = idm::get_unlocked<lv2_obj, lv2_event_flag>(id);
|
||||
|
||||
if (!flag)
|
||||
{
|
||||
|
|
|
@ -54,7 +54,7 @@ struct lv2_event_flag final : lv2_obj
|
|||
}
|
||||
|
||||
lv2_event_flag(utils::serial& ar);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
|
||||
// Check mode arg
|
||||
|
|
|
@ -79,7 +79,7 @@ void fmt_class_string<lv2_file>::format(std::string& out, u64 arg)
|
|||
const usz pos = file.file ? file.file.pos() : umax;
|
||||
const usz size = file.file ? file.file.size() : umax;
|
||||
|
||||
fmt::append(out, u8"%s, “%s”, Mode: 0x%x, Flags: 0x%x, Pos/Size: %s/%s (0x%x/0x%x)", file.type, file.name.data(), file.mode, file.flags, get_size(pos), get_size(size), pos, size);
|
||||
fmt::append(out, u8"%s, '%s', Mode: 0x%x, Flags: 0x%x, Pos/Size: %s/%s (0x%x/0x%x)", file.type, file.name.data(), file.mode, file.flags, get_size(pos), get_size(size), pos, size);
|
||||
}
|
||||
|
||||
template<>
|
||||
|
@ -87,7 +87,7 @@ void fmt_class_string<lv2_dir>::format(std::string& out, u64 arg)
|
|||
{
|
||||
const auto& dir = get_object(arg);
|
||||
|
||||
fmt::append(out, u8"Directory, “%s”, Entries: %u/%u", dir.name.data(), std::min<u64>(dir.pos, dir.entries.size()), dir.entries.size());
|
||||
fmt::append(out, u8"Directory, '%s', Entries: %u/%u", dir.name.data(), std::min<u64>(dir.pos, dir.entries.size()), dir.entries.size());
|
||||
}
|
||||
|
||||
bool has_fs_write_rights(std::string_view vpath)
|
||||
|
@ -615,11 +615,11 @@ void loaded_npdrm_keys::save(utils::serial& ar)
|
|||
|
||||
struct lv2_file::file_view : fs::file_base
|
||||
{
|
||||
const std::shared_ptr<lv2_file> m_file;
|
||||
const shared_ptr<lv2_file> m_file;
|
||||
const u64 m_off;
|
||||
u64 m_pos;
|
||||
|
||||
explicit file_view(const std::shared_ptr<lv2_file>& _file, u64 offset)
|
||||
explicit file_view(const shared_ptr<lv2_file>& _file, u64 offset)
|
||||
: m_file(_file)
|
||||
, m_off(offset)
|
||||
, m_pos(0)
|
||||
|
@ -699,7 +699,7 @@ struct lv2_file::file_view : fs::file_base
|
|||
}
|
||||
};
|
||||
|
||||
fs::file lv2_file::make_view(const std::shared_ptr<lv2_file>& _file, u64 offset)
|
||||
fs::file lv2_file::make_view(const shared_ptr<lv2_file>& _file, u64 offset)
|
||||
{
|
||||
fs::file result;
|
||||
result.reset(std::make_unique<lv2_file::file_view>(_file, offset));
|
||||
|
@ -745,7 +745,7 @@ error_code sys_fs_test(ppu_thread&, u32 arg1, u32 arg2, vm::ptr<u32> arg3, u32 a
|
|||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object>(*arg3);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object>(*arg3);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -1059,16 +1059,16 @@ error_code sys_fs_open(ppu_thread& ppu, vm::cptr<char> path, s32 flags, vm::ptr<
|
|||
return {g_fxo->get<lv2_fs_mount_info_map>().lookup(vpath) == &g_mp_sys_dev_hdd1 ? sys_fs.warning : sys_fs.error, error, path};
|
||||
}
|
||||
|
||||
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&ppath = ppath, &file = file, mode, flags, &real = real, &type = type]() -> std::shared_ptr<lv2_file>
|
||||
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&ppath = ppath, &file = file, mode, flags, &real = real, &type = type]() -> shared_ptr<lv2_file>
|
||||
{
|
||||
std::shared_ptr<lv2_file> result;
|
||||
shared_ptr<lv2_file> result;
|
||||
|
||||
if (type >= lv2_file_type::sdata && !g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
|
||||
{
|
||||
return result;
|
||||
}
|
||||
|
||||
result = std::make_shared<lv2_file>(ppath, std::move(file), mode, flags, real, type);
|
||||
result = stx::make_shared<lv2_file>(ppath, std::move(file), mode, flags, real, type);
|
||||
sys_fs.warning("sys_fs_open(): fd=%u, %s", idm::last_id(), *result);
|
||||
return result;
|
||||
}))
|
||||
|
@ -1100,7 +1100,7 @@ error_code sys_fs_read(ppu_thread& ppu, u32 fd, vm::ptr<void> buf, u64 nbytes, v
|
|||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file || (nbytes && file->flags & CELL_FS_O_WRONLY))
|
||||
{
|
||||
|
@ -1169,7 +1169,7 @@ error_code sys_fs_write(ppu_thread& ppu, u32 fd, vm::cptr<void> buf, u64 nbytes,
|
|||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file || (nbytes && !(file->flags & CELL_FS_O_ACCMODE)))
|
||||
{
|
||||
|
@ -1239,7 +1239,7 @@ error_code sys_fs_close(ppu_thread& ppu, u32 fd)
|
|||
ppu.state += cpu_flag::wait;
|
||||
lv2_obj::sleep(ppu);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -1279,7 +1279,7 @@ error_code sys_fs_close(ppu_thread& ppu, u32 fd)
|
|||
auto& default_container = g_fxo->get<default_sys_fs_container>();
|
||||
std::lock_guard lock(default_container.mutex);
|
||||
|
||||
if (auto ct = idm::get<lv2_memory_container>(file->ct_id))
|
||||
if (auto ct = idm::get_unlocked<lv2_memory_container>(file->ct_id))
|
||||
{
|
||||
ct->free(file->ct_used);
|
||||
if (default_container.id == file->ct_id)
|
||||
|
@ -1442,7 +1442,7 @@ error_code sys_fs_readdir(ppu_thread& ppu, u32 fd, vm::ptr<CellFsDirent> dir, vm
|
|||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
const auto directory = idm::get<lv2_fs_object, lv2_dir>(fd);
|
||||
const auto directory = idm::get_unlocked<lv2_fs_object, lv2_dir>(fd);
|
||||
|
||||
if (!directory)
|
||||
{
|
||||
|
@ -1614,7 +1614,7 @@ error_code sys_fs_fstat(ppu_thread& ppu, u32 fd, vm::ptr<CellFsStat> sb)
|
|||
|
||||
sys_fs.warning("sys_fs_fstat(fd=%d, sb=*0x%x)", fd, sb);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -1960,7 +1960,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -2056,7 +2056,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -2081,14 +2081,14 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
|||
|
||||
fs::file stream;
|
||||
stream.reset(std::move(sdata_file));
|
||||
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&file = *file, &stream = stream]() -> std::shared_ptr<lv2_file>
|
||||
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&file = *file, &stream = stream]() -> shared_ptr<lv2_file>
|
||||
{
|
||||
if (!g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
|
||||
{
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
|
||||
return std::make_shared<lv2_file>(file, std::move(stream), file.mode, CELL_FS_O_RDONLY, file.real_path, lv2_file_type::sdata);
|
||||
return stx::make_shared<lv2_file>(file, std::move(stream), file.mode, CELL_FS_O_RDONLY, file.real_path, lv2_file_type::sdata);
|
||||
}))
|
||||
{
|
||||
arg->out_code = CELL_OK;
|
||||
|
@ -2198,13 +2198,13 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
if (!file)
|
||||
{
|
||||
return CELL_EBADF;
|
||||
}
|
||||
|
||||
if (auto ct = idm::get<lv2_memory_container>(file->ct_id))
|
||||
if (auto ct = idm::get_unlocked<lv2_memory_container>(file->ct_id))
|
||||
{
|
||||
ct->free(file->ct_used);
|
||||
if (default_container.id == file->ct_id)
|
||||
|
@ -2427,7 +2427,7 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto directory = idm::get<lv2_fs_object, lv2_dir>(fd);
|
||||
const auto directory = idm::get_unlocked<lv2_fs_object, lv2_dir>(fd);
|
||||
|
||||
if (!directory)
|
||||
{
|
||||
|
@ -2566,14 +2566,14 @@ error_code sys_fs_fcntl(ppu_thread& ppu, u32 fd, u32 op, vm::ptr<void> _arg, u32
|
|||
return result.error;
|
||||
}
|
||||
|
||||
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&]() -> std::shared_ptr<lv2_file>
|
||||
if (const u32 id = idm::import<lv2_fs_object, lv2_file>([&]() -> shared_ptr<lv2_file>
|
||||
{
|
||||
if (!g_fxo->get<loaded_npdrm_keys>().npdrm_fds.try_inc(16))
|
||||
{
|
||||
return nullptr;
|
||||
return null_ptr;
|
||||
}
|
||||
|
||||
return std::make_shared<lv2_file>(result.ppath, std::move(result.file), 0, 0, std::move(result.real_path), lv2_file_type::sdata);
|
||||
return stx::make_shared<lv2_file>(result.ppath, std::move(result.file), 0, 0, std::move(result.real_path), lv2_file_type::sdata);
|
||||
}))
|
||||
{
|
||||
arg->out_code = CELL_OK;
|
||||
|
@ -2597,7 +2597,7 @@ error_code sys_fs_lseek(ppu_thread& ppu, u32 fd, s64 offset, s32 whence, vm::ptr
|
|||
|
||||
sys_fs.trace("sys_fs_lseek(fd=%d, offset=0x%llx, whence=0x%x, pos=*0x%x)", fd, offset, whence, pos);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -2643,7 +2643,7 @@ error_code sys_fs_fdatasync(ppu_thread& ppu, u32 fd)
|
|||
|
||||
sys_fs.trace("sys_fs_fdadasync(fd=%d)", fd);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
|
||||
{
|
||||
|
@ -2668,7 +2668,7 @@ error_code sys_fs_fsync(ppu_thread& ppu, u32 fd)
|
|||
|
||||
sys_fs.trace("sys_fs_fsync(fd=%d)", fd);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
|
||||
{
|
||||
|
@ -2692,7 +2692,7 @@ error_code sys_fs_fget_block_size(ppu_thread& ppu, u32 fd, vm::ptr<u64> sector_s
|
|||
|
||||
sys_fs.warning("sys_fs_fget_block_size(fd=%d, sector_size=*0x%x, block_size=*0x%x, arg4=*0x%x, out_flags=*0x%x)", fd, sector_size, block_size, arg4, out_flags);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -2819,7 +2819,7 @@ error_code sys_fs_ftruncate(ppu_thread& ppu, u32 fd, u64 size)
|
|||
|
||||
sys_fs.warning("sys_fs_ftruncate(fd=%d, size=0x%llx)", fd, size);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
|
||||
{
|
||||
|
@ -3089,7 +3089,7 @@ error_code sys_fs_lsn_get_cda_size(ppu_thread&, u32 fd, vm::ptr<u64> ptr)
|
|||
{
|
||||
sys_fs.warning("sys_fs_lsn_get_cda_size(fd=%d, ptr=*0x%x)", fd, ptr);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -3112,7 +3112,7 @@ error_code sys_fs_lsn_lock(ppu_thread&, u32 fd)
|
|||
{
|
||||
sys_fs.trace("sys_fs_lsn_lock(fd=%d)", fd);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -3134,7 +3134,7 @@ error_code sys_fs_lsn_unlock(ppu_thread&, u32 fd)
|
|||
{
|
||||
sys_fs.trace("sys_fs_lsn_unlock(fd=%d)", fd);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
|
|
@ -360,7 +360,7 @@ struct lv2_file final : lv2_fs_object
|
|||
struct file_view;
|
||||
|
||||
// Make file view from lv2_file object (for MSELF support)
|
||||
static fs::file make_view(const std::shared_ptr<lv2_file>& _file, u64 offset);
|
||||
static fs::file make_view(const shared_ptr<lv2_file>& _file, u64 offset);
|
||||
};
|
||||
|
||||
struct lv2_dir final : lv2_fs_object
|
||||
|
|
|
@ -12,13 +12,13 @@
|
|||
LOG_CHANNEL(sys_interrupt);
|
||||
|
||||
lv2_int_tag::lv2_int_tag() noexcept
|
||||
: lv2_obj{1}
|
||||
: lv2_obj(1)
|
||||
, id(idm::last_id())
|
||||
{
|
||||
}
|
||||
|
||||
lv2_int_tag::lv2_int_tag(utils::serial& ar) noexcept
|
||||
: lv2_obj{1}
|
||||
: lv2_obj(1)
|
||||
, id(idm::last_id())
|
||||
, handler([&]()
|
||||
{
|
||||
|
@ -44,8 +44,8 @@ void lv2_int_tag::save(utils::serial& ar)
|
|||
ar(lv2_obj::check(handler) ? handler->id : 0);
|
||||
}
|
||||
|
||||
lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept
|
||||
: lv2_obj{1}
|
||||
lv2_int_serv::lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread, u64 arg1, u64 arg2) noexcept
|
||||
: lv2_obj(1)
|
||||
, id(idm::last_id())
|
||||
, thread(thread)
|
||||
, arg1(arg1)
|
||||
|
@ -54,7 +54,7 @@ lv2_int_serv::lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thre
|
|||
}
|
||||
|
||||
lv2_int_serv::lv2_int_serv(utils::serial& ar) noexcept
|
||||
: lv2_obj{1}
|
||||
: lv2_obj(1)
|
||||
, id(idm::last_id())
|
||||
, thread(idm::get_unlocked<named_thread<ppu_thread>>(ar))
|
||||
, arg1(ar)
|
||||
|
@ -96,7 +96,7 @@ void lv2_int_serv::join() const
|
|||
thread->cmd_notify.notify_one();
|
||||
(*thread)();
|
||||
|
||||
idm::remove_verify<named_thread<ppu_thread>>(thread->id, static_cast<std::weak_ptr<named_thread<ppu_thread>>>(thread));
|
||||
idm::remove_verify<named_thread<ppu_thread>>(thread->id, thread);
|
||||
}
|
||||
|
||||
error_code sys_interrupt_tag_destroy(ppu_thread& ppu, u32 intrtag)
|
||||
|
@ -139,7 +139,7 @@ error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32
|
|||
|
||||
const u32 id = idm::import<lv2_obj, lv2_int_serv>([&]()
|
||||
{
|
||||
std::shared_ptr<lv2_int_serv> result;
|
||||
shared_ptr<lv2_int_serv> result;
|
||||
|
||||
// Get interrupt tag
|
||||
const auto tag = idm::check_unlocked<lv2_obj, lv2_int_tag>(intrtag);
|
||||
|
@ -173,7 +173,7 @@ error_code _sys_interrupt_thread_establish(ppu_thread& ppu, vm::ptr<u32> ih, u32
|
|||
return result;
|
||||
}
|
||||
|
||||
result = std::make_shared<lv2_int_serv>(it, arg1, arg2);
|
||||
result = make_shared<lv2_int_serv>(it, arg1, arg2);
|
||||
tag->handler = result;
|
||||
|
||||
it->cmd_list
|
||||
|
@ -251,7 +251,7 @@ void ppu_interrupt_thread_entry(ppu_thread& ppu, ppu_opcode_t, be_t<u32>*, struc
|
|||
{
|
||||
while (true)
|
||||
{
|
||||
std::shared_ptr<lv2_int_serv> serv = nullptr;
|
||||
shared_ptr<lv2_int_serv> serv = null_ptr;
|
||||
|
||||
// Loop endlessly trying to invoke an interrupt if required
|
||||
idm::select<named_thread<spu_thread>>([&](u32, spu_thread& spu)
|
||||
|
|
|
@ -11,7 +11,7 @@ struct lv2_int_tag final : public lv2_obj
|
|||
static const u32 id_base = 0x0a000000;
|
||||
|
||||
const u32 id;
|
||||
std::shared_ptr<struct lv2_int_serv> handler;
|
||||
shared_ptr<struct lv2_int_serv> handler;
|
||||
|
||||
lv2_int_tag() noexcept;
|
||||
lv2_int_tag(utils::serial& ar) noexcept;
|
||||
|
@ -23,11 +23,11 @@ struct lv2_int_serv final : public lv2_obj
|
|||
static const u32 id_base = 0x0b000000;
|
||||
|
||||
const u32 id;
|
||||
const std::shared_ptr<named_thread<ppu_thread>> thread;
|
||||
const shared_ptr<named_thread<ppu_thread>> thread;
|
||||
const u64 arg1;
|
||||
const u64 arg2;
|
||||
|
||||
lv2_int_serv(const std::shared_ptr<named_thread<ppu_thread>>& thread, u64 arg1, u64 arg2) noexcept;
|
||||
lv2_int_serv(shared_ptr<named_thread<ppu_thread>> thread, u64 arg1, u64 arg2) noexcept;
|
||||
lv2_int_serv(utils::serial& ar) noexcept;
|
||||
void save(utils::serial& ar);
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ error_code sys_io_buffer_allocate(u32 handle, vm::ptr<u32> block)
|
|||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
if (auto io = idm::get<lv2_io_buf>(handle))
|
||||
if (auto io = idm::get_unlocked<lv2_io_buf>(handle))
|
||||
{
|
||||
// no idea what we actually need to allocate
|
||||
if (u32 addr = vm::alloc(io->block_count * io->block_size, vm::main))
|
||||
|
@ -62,7 +62,7 @@ error_code sys_io_buffer_free(u32 handle, u32 block)
|
|||
{
|
||||
sys_io.todo("sys_io_buffer_free(handle=0x%x, block=0x%x)", handle, block);
|
||||
|
||||
const auto io = idm::get<lv2_io_buf>(handle);
|
||||
const auto io = idm::get_unlocked<lv2_io_buf>(handle);
|
||||
|
||||
if (!io)
|
||||
{
|
||||
|
|
|
@ -64,7 +64,7 @@ error_code _sys_lwcond_destroy(ppu_thread& ppu, u32 lwcond_id)
|
|||
|
||||
sys_lwcond.trace("_sys_lwcond_destroy(lwcond_id=0x%x)", lwcond_id);
|
||||
|
||||
std::shared_ptr<lv2_lwcond> _cond;
|
||||
shared_ptr<lv2_lwcond> _cond;
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
@ -440,7 +440,7 @@ error_code _sys_lwcond_queue_wait(ppu_thread& ppu, u32 lwcond_id, u32 lwmutex_id
|
|||
|
||||
ppu.gpr[3] = CELL_OK;
|
||||
|
||||
std::shared_ptr<lv2_lwmutex> mutex;
|
||||
shared_ptr<lv2_lwmutex> mutex;
|
||||
|
||||
auto& sstate = *ppu.optional_savestate_state;
|
||||
|
||||
|
|
|
@ -56,7 +56,7 @@ error_code _sys_lwmutex_destroy(ppu_thread& ppu, u32 lwmutex_id)
|
|||
|
||||
sys_lwmutex.trace("_sys_lwmutex_destroy(lwmutex_id=0x%x)", lwmutex_id);
|
||||
|
||||
std::shared_ptr<lv2_lwmutex> _mutex;
|
||||
shared_ptr<lv2_lwmutex> _mutex;
|
||||
|
||||
while (true)
|
||||
{
|
||||
|
|
|
@ -28,10 +28,13 @@ lv2_memory_container::lv2_memory_container(utils::serial& ar, bool from_idm) noe
|
|||
{
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_memory_container::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_memory_container::load(utils::serial& ar)
|
||||
{
|
||||
// Use idm::last_id() only for the instances at IDM
|
||||
return std::make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true);
|
||||
return [ptr = make_shared<lv2_memory_container>(stx::exact_t<utils::serial&>(ar), true)](void* storage)
|
||||
{
|
||||
*static_cast<shared_ptr<lv2_memory_container>*>(storage) = ptr;
|
||||
};
|
||||
}
|
||||
|
||||
void lv2_memory_container::save(utils::serial& ar)
|
||||
|
@ -43,7 +46,7 @@ lv2_memory_container* lv2_memory_container::search(u32 id)
|
|||
{
|
||||
if (id != SYS_MEMORY_CONTAINER_ID_INVALID)
|
||||
{
|
||||
return idm::check<lv2_memory_container>(id);
|
||||
return idm::check_unlocked<lv2_memory_container>(id);
|
||||
}
|
||||
|
||||
return &g_fxo->get<lv2_memory_container>();
|
||||
|
@ -397,7 +400,7 @@ error_code sys_memory_container_get_size(cpu_thread& cpu, vm::ptr<sys_memory_inf
|
|||
|
||||
sys_memory.warning("sys_memory_container_get_size(mem_info=*0x%x, cid=0x%x)", mem_info, cid);
|
||||
|
||||
const auto ct = idm::get<lv2_memory_container>(cid);
|
||||
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
|
|
@ -74,7 +74,7 @@ struct lv2_memory_container
|
|||
|
||||
lv2_memory_container(u32 size, bool from_idm = false) noexcept;
|
||||
lv2_memory_container(utils::serial& ar, bool from_idm = false) noexcept;
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
static lv2_memory_container* search(u32 id);
|
||||
|
||||
|
|
|
@ -82,13 +82,13 @@ CellError lv2_memory::on_id_create()
|
|||
return {};
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_memory::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_memory::load(utils::serial& ar)
|
||||
{
|
||||
auto mem = std::make_shared<lv2_memory>(ar);
|
||||
auto mem = make_shared<lv2_memory>(ar);
|
||||
mem->exists++; // Disable on_id_create()
|
||||
std::shared_ptr<void> ptr = lv2_obj::load(mem->key, mem, +mem->pshared);
|
||||
auto func = load_func(mem, +mem->pshared);
|
||||
mem->exists--;
|
||||
return ptr;
|
||||
return func;
|
||||
}
|
||||
|
||||
void lv2_memory::save(utils::serial& ar)
|
||||
|
@ -128,7 +128,7 @@ error_code create_lv2_shm(bool pshared, u64 ipc_key, u64 size, u32 align, u64 fl
|
|||
|
||||
if (auto error = lv2_obj::create<lv2_memory>(_pshared, ipc_key, exclusive ? SYS_SYNC_NEWLY_CREATED : SYS_SYNC_NOT_CARE, [&]()
|
||||
{
|
||||
return std::make_shared<lv2_memory>(
|
||||
return make_shared<lv2_memory>(
|
||||
static_cast<u32>(size),
|
||||
align,
|
||||
flags,
|
||||
|
@ -294,7 +294,7 @@ error_code sys_mmapper_allocate_shared_memory_from_container(ppu_thread& ppu, u6
|
|||
}
|
||||
}
|
||||
|
||||
const auto ct = idm::get<lv2_memory_container>(cid);
|
||||
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
@ -491,7 +491,7 @@ error_code sys_mmapper_allocate_shared_memory_from_container_ext(ppu_thread& ppu
|
|||
}
|
||||
}
|
||||
|
||||
const auto ct = idm::get<lv2_memory_container>(cid);
|
||||
const auto ct = idm::get_unlocked<lv2_memory_container>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
@ -797,7 +797,7 @@ error_code sys_mmapper_enable_page_fault_notification(ppu_thread& ppu, u32 start
|
|||
|
||||
// TODO: Check memory region's flags to make sure the memory can be used for page faults.
|
||||
|
||||
auto queue = idm::get<lv2_obj, lv2_event_queue>(event_queue_id);
|
||||
auto queue = idm::get_unlocked<lv2_obj, lv2_event_queue>(event_queue_id);
|
||||
|
||||
if (!queue)
|
||||
{ // Can't connect the queue if it doesn't exist.
|
||||
|
|
|
@ -31,7 +31,7 @@ struct lv2_memory : lv2_obj
|
|||
lv2_memory(u32 size, u32 align, u64 flags, u64 key, bool pshared, lv2_memory_container* ct);
|
||||
|
||||
lv2_memory(utils::serial& ar);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
|
||||
CellError on_id_create();
|
||||
|
|
|
@ -25,10 +25,9 @@ lv2_mutex::lv2_mutex(utils::serial& ar)
|
|||
control.raw().owner >>= 1;
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_mutex::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_mutex::load(utils::serial& ar)
|
||||
{
|
||||
auto mtx = std::make_shared<lv2_mutex>(ar);
|
||||
return lv2_obj::load(mtx->key, mtx);
|
||||
return load_func(make_shared<lv2_mutex>(ar));
|
||||
}
|
||||
|
||||
void lv2_mutex::save(utils::serial& ar)
|
||||
|
@ -88,7 +87,7 @@ error_code sys_mutex_create(ppu_thread& ppu, vm::ptr<u32> mutex_id, vm::ptr<sys_
|
|||
|
||||
if (auto error = lv2_obj::create<lv2_mutex>(_attr.pshared, _attr.ipc_key, _attr.flags, [&]()
|
||||
{
|
||||
return std::make_shared<lv2_mutex>(
|
||||
return make_shared<lv2_mutex>(
|
||||
_attr.protocol,
|
||||
_attr.recursive,
|
||||
_attr.adaptive,
|
||||
|
|
|
@ -58,7 +58,7 @@ struct lv2_mutex final : lv2_obj
|
|||
}
|
||||
|
||||
lv2_mutex(utils::serial& ar);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
|
||||
template <typename T>
|
||||
|
|
|
@ -266,25 +266,25 @@ lv2_socket::lv2_socket(utils::serial& ar, lv2_socket_type _type)
|
|||
ar(last_bound_addr);
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_socket::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_socket::load(utils::serial& ar)
|
||||
{
|
||||
const lv2_socket_type type{ar};
|
||||
|
||||
std::shared_ptr<lv2_socket> sock_lv2;
|
||||
shared_ptr<lv2_socket> sock_lv2;
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case SYS_NET_SOCK_STREAM:
|
||||
case SYS_NET_SOCK_DGRAM:
|
||||
{
|
||||
auto lv2_native = std::make_shared<lv2_socket_native>(ar, type);
|
||||
auto lv2_native = make_shared<lv2_socket_native>(ar, type);
|
||||
ensure(lv2_native->create_socket() >= 0);
|
||||
sock_lv2 = std::move(lv2_native);
|
||||
break;
|
||||
}
|
||||
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(ar, type); break;
|
||||
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(ar, type); break;
|
||||
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(ar, type); break;
|
||||
case SYS_NET_SOCK_RAW: sock_lv2 = make_shared<lv2_socket_raw>(ar, type); break;
|
||||
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = make_shared<lv2_socket_p2p>(ar, type); break;
|
||||
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = make_shared<lv2_socket_p2ps>(ar, type); break;
|
||||
}
|
||||
|
||||
if (std::memcmp(&sock_lv2->last_bound_addr, std::array<u8, 16>{}.data(), 16))
|
||||
|
@ -293,7 +293,7 @@ std::shared_ptr<void> lv2_socket::load(utils::serial& ar)
|
|||
sock_lv2->bind(sock_lv2->last_bound_addr);
|
||||
}
|
||||
|
||||
return sock_lv2;
|
||||
return [ptr = sock_lv2](void* storage) { *static_cast<shared_ptr<lv2_socket>*>(storage) = ptr; };;
|
||||
}
|
||||
|
||||
void lv2_socket::save(utils::serial& ar, bool save_only_this_class)
|
||||
|
@ -352,7 +352,7 @@ error_code sys_net_bnet_accept(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr>
|
|||
|
||||
s32 result = 0;
|
||||
sys_net_sockaddr sn_addr{};
|
||||
std::shared_ptr<lv2_socket> new_socket{};
|
||||
shared_ptr<lv2_socket> new_socket{};
|
||||
|
||||
const auto sock = idm::check<lv2_socket>(s, [&, notify = lv2_obj::notify_all_t()](lv2_socket& sock)
|
||||
{
|
||||
|
@ -465,7 +465,7 @@ error_code sys_net_bnet_bind(ppu_thread& ppu, s32 s, vm::cptr<sys_net_sockaddr>
|
|||
return -SYS_NET_EINVAL;
|
||||
}
|
||||
|
||||
if (!idm::check<lv2_socket>(s))
|
||||
if (!idm::check_unlocked<lv2_socket>(s))
|
||||
{
|
||||
return -SYS_NET_EBADF;
|
||||
}
|
||||
|
@ -514,7 +514,7 @@ error_code sys_net_bnet_connect(ppu_thread& ppu, s32 s, vm::ptr<sys_net_sockaddr
|
|||
return -SYS_NET_EAFNOSUPPORT;
|
||||
}
|
||||
|
||||
if (!idm::check<lv2_socket>(s))
|
||||
if (!idm::check_unlocked<lv2_socket>(s))
|
||||
{
|
||||
return -SYS_NET_EBADF;
|
||||
}
|
||||
|
@ -1194,14 +1194,14 @@ error_code sys_net_bnet_socket(ppu_thread& ppu, lv2_socket_family family, lv2_so
|
|||
return -SYS_NET_EPROTONOSUPPORT;
|
||||
}
|
||||
|
||||
std::shared_ptr<lv2_socket> sock_lv2;
|
||||
shared_ptr<lv2_socket> sock_lv2;
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case SYS_NET_SOCK_STREAM:
|
||||
case SYS_NET_SOCK_DGRAM:
|
||||
{
|
||||
auto lv2_native = std::make_shared<lv2_socket_native>(family, type, protocol);
|
||||
auto lv2_native = make_shared<lv2_socket_native>(family, type, protocol);
|
||||
if (s32 result = lv2_native->create_socket(); result < 0)
|
||||
{
|
||||
return sys_net_error{result};
|
||||
|
@ -1210,9 +1210,9 @@ error_code sys_net_bnet_socket(ppu_thread& ppu, lv2_socket_family family, lv2_so
|
|||
sock_lv2 = std::move(lv2_native);
|
||||
break;
|
||||
}
|
||||
case SYS_NET_SOCK_RAW: sock_lv2 = std::make_shared<lv2_socket_raw>(family, type, protocol); break;
|
||||
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2p>(family, type, protocol); break;
|
||||
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = std::make_shared<lv2_socket_p2ps>(family, type, protocol); break;
|
||||
case SYS_NET_SOCK_RAW: sock_lv2 = make_shared<lv2_socket_raw>(family, type, protocol); break;
|
||||
case SYS_NET_SOCK_DGRAM_P2P: sock_lv2 = make_shared<lv2_socket_p2p>(family, type, protocol); break;
|
||||
case SYS_NET_SOCK_STREAM_P2P: sock_lv2 = make_shared<lv2_socket_p2ps>(family, type, protocol); break;
|
||||
}
|
||||
|
||||
const s32 s = idm::import_existing<lv2_socket>(sock_lv2);
|
||||
|
@ -1775,7 +1775,7 @@ error_code sys_net_abort(ppu_thread& ppu, s32 type, u64 arg, s32 flags)
|
|||
{
|
||||
std::lock_guard nw_lock(g_fxo->get<network_context>().mutex_thread_loop);
|
||||
|
||||
const auto sock = idm::get<lv2_socket>(static_cast<u32>(arg));
|
||||
const auto sock = idm::get_unlocked<lv2_socket>(static_cast<u32>(arg));
|
||||
|
||||
if (!sock)
|
||||
{
|
||||
|
|
|
@ -64,7 +64,7 @@ void lv2_socket::set_poll_event(bs_t<lv2_socket::poll_t> event)
|
|||
events += event;
|
||||
}
|
||||
|
||||
void lv2_socket::poll_queue(std::shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event, std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb)
|
||||
void lv2_socket::poll_queue(shared_ptr<ppu_thread> ppu, bs_t<lv2_socket::poll_t> event, std::function<bool(bs_t<lv2_socket::poll_t>)> poll_cb)
|
||||
{
|
||||
set_poll_event(event);
|
||||
queue.emplace_back(std::move(ppu), poll_cb);
|
||||
|
@ -175,3 +175,17 @@ void lv2_socket::queue_wake(ppu_thread* ppu)
|
|||
break;
|
||||
}
|
||||
}
|
||||
|
||||
lv2_socket& lv2_socket::operator=(thread_state s) noexcept
|
||||
{
|
||||
if (s == thread_state::finished)
|
||||
{
|
||||
close();
|
||||
}
|
||||
|
||||
return *this;
|
||||
}
|
||||
|
||||
lv2_socket::~lv2_socket() noexcept
|
||||
{
|
||||
}
|
||||
|
|
|
@ -27,6 +27,8 @@ using socket_type = uptr;
|
|||
using socket_type = int;
|
||||
#endif
|
||||
|
||||
enum class thread_state : u32;
|
||||
|
||||
class lv2_socket
|
||||
{
|
||||
public:
|
||||
|
@ -60,16 +62,17 @@ public:
|
|||
lv2_socket(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
|
||||
lv2_socket(utils::serial&) {}
|
||||
lv2_socket(utils::serial&, lv2_socket_type type);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial&, bool save_only_this_class = false);
|
||||
virtual ~lv2_socket() = default;
|
||||
virtual ~lv2_socket() noexcept;
|
||||
lv2_socket& operator=(thread_state s) noexcept;
|
||||
|
||||
std::unique_lock<shared_mutex> lock();
|
||||
|
||||
void set_lv2_id(u32 id);
|
||||
bs_t<poll_t> get_events() const;
|
||||
void set_poll_event(bs_t<poll_t> event);
|
||||
void poll_queue(std::shared_ptr<ppu_thread> ppu, bs_t<poll_t> event, std::function<bool(bs_t<poll_t>)> poll_cb);
|
||||
void poll_queue(shared_ptr<ppu_thread> ppu, bs_t<poll_t> event, std::function<bool(bs_t<poll_t>)> poll_cb);
|
||||
u32 clear_queue(ppu_thread*);
|
||||
void handle_events(const pollfd& native_fd, bool unset_connecting = false);
|
||||
void queue_wake(ppu_thread* ppu);
|
||||
|
@ -85,7 +88,7 @@ public:
|
|||
#endif
|
||||
|
||||
public:
|
||||
virtual std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) = 0;
|
||||
virtual std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) = 0;
|
||||
virtual s32 bind(const sys_net_sockaddr& addr) = 0;
|
||||
|
||||
virtual std::optional<s32> connect(const sys_net_sockaddr& addr) = 0;
|
||||
|
@ -133,7 +136,7 @@ protected:
|
|||
atomic_bs_t<poll_t> events{};
|
||||
|
||||
// Event processing workload (pair of thread id and the processing function)
|
||||
std::vector<std::pair<std::shared_ptr<ppu_thread>, std::function<bool(bs_t<poll_t>)>>> queue;
|
||||
std::vector<std::pair<shared_ptr<ppu_thread>, std::function<bool(bs_t<poll_t>)>>> queue;
|
||||
|
||||
// Socket options value keepers
|
||||
// Non-blocking IO option
|
||||
|
|
|
@ -57,17 +57,9 @@ void lv2_socket_native::save(utils::serial& ar)
|
|||
ar(is_socket_connected());
|
||||
}
|
||||
|
||||
lv2_socket_native::~lv2_socket_native()
|
||||
lv2_socket_native::~lv2_socket_native() noexcept
|
||||
{
|
||||
std::lock_guard lock(mutex);
|
||||
if (socket)
|
||||
{
|
||||
#ifdef _WIN32
|
||||
::closesocket(socket);
|
||||
#else
|
||||
::close(socket);
|
||||
#endif
|
||||
}
|
||||
lv2_socket_native::close();
|
||||
}
|
||||
|
||||
s32 lv2_socket_native::create_socket()
|
||||
|
@ -106,7 +98,7 @@ void lv2_socket_native::set_socket(socket_type socket, lv2_socket_family family,
|
|||
set_non_blocking();
|
||||
}
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_native::accept(bool is_lock)
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_native::accept(bool is_lock)
|
||||
{
|
||||
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
|
||||
|
||||
|
@ -127,7 +119,7 @@ std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_
|
|||
|
||||
if (native_socket != invalid_socket)
|
||||
{
|
||||
auto newsock = std::make_shared<lv2_socket_native>(family, type, protocol);
|
||||
auto newsock = make_single<lv2_socket_native>(family, type, protocol);
|
||||
newsock->set_socket(native_socket, family, type, protocol);
|
||||
|
||||
// Sockets inherit non blocking behaviour from their parent
|
||||
|
@ -274,7 +266,7 @@ std::optional<s32> lv2_socket_native::connect(const sys_net_sockaddr& addr)
|
|||
#ifdef _WIN32
|
||||
connecting = true;
|
||||
#endif
|
||||
this->poll_queue(nullptr, lv2_socket::poll_t::write, [this](bs_t<lv2_socket::poll_t> events) -> bool
|
||||
this->poll_queue(null_ptr, lv2_socket::poll_t::write, [this](bs_t<lv2_socket::poll_t> events) -> bool
|
||||
{
|
||||
if (events & lv2_socket::poll_t::write)
|
||||
{
|
||||
|
@ -1114,10 +1106,12 @@ void lv2_socket_native::close()
|
|||
socket = {};
|
||||
}
|
||||
|
||||
auto& dnshook = g_fxo->get<np::dnshook>();
|
||||
dnshook.remove_dns_spy(lv2_id);
|
||||
if (auto dnshook = g_fxo->try_get<np::dnshook>())
|
||||
{
|
||||
dnshook->remove_dns_spy(lv2_id);
|
||||
}
|
||||
|
||||
if (bound_port)
|
||||
if (bound_port && g_fxo->is_init<named_thread<np::np_handler>>())
|
||||
{
|
||||
auto& nph = g_fxo->get<named_thread<np::np_handler>>();
|
||||
nph.upnp_remove_port_mapping(bound_port, type == SYS_NET_SOCK_STREAM ? "TCP" : "UDP");
|
||||
|
|
|
@ -30,13 +30,15 @@
|
|||
class lv2_socket_native final : public lv2_socket
|
||||
{
|
||||
public:
|
||||
static constexpr u32 id_type = 1;
|
||||
|
||||
lv2_socket_native(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
|
||||
lv2_socket_native(utils::serial& ar, lv2_socket_type type);
|
||||
~lv2_socket_native() noexcept override;
|
||||
void save(utils::serial& ar);
|
||||
~lv2_socket_native();
|
||||
s32 create_socket();
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
s32 bind(const sys_net_sockaddr& addr) override;
|
||||
|
||||
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
|
||||
|
|
|
@ -72,7 +72,7 @@ void lv2_socket_p2p::handle_new_data(sys_net_sockaddr_in_p2p p2p_addr, std::vect
|
|||
}
|
||||
}
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2p::accept([[maybe_unused]] bool is_lock)
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2p::accept([[maybe_unused]] bool is_lock)
|
||||
{
|
||||
sys_net.fatal("[P2P] accept() called on a P2P socket");
|
||||
return {};
|
||||
|
@ -330,9 +330,14 @@ void lv2_socket_p2p::close()
|
|||
return;
|
||||
}
|
||||
|
||||
auto& nc = g_fxo->get<p2p_context>();
|
||||
if (g_fxo->is_init<p2p_context>())
|
||||
{
|
||||
auto& nc = g_fxo->get<p2p_context>();
|
||||
std::lock_guard lock(nc.list_p2p_ports_mutex);
|
||||
|
||||
if (!nc.list_p2p_ports.contains(port))
|
||||
return;
|
||||
|
||||
auto& p2p_port = ::at32(nc.list_p2p_ports, port);
|
||||
{
|
||||
std::lock_guard lock(p2p_port.bound_p2p_vports_mutex);
|
||||
|
|
|
@ -9,7 +9,7 @@ public:
|
|||
lv2_socket_p2p(utils::serial& ar, lv2_socket_type type);
|
||||
void save(utils::serial& ar);
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
s32 bind(const sys_net_sockaddr& addr) override;
|
||||
|
||||
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
|
||||
|
|
|
@ -467,7 +467,7 @@ bool lv2_socket_p2ps::handle_listening(p2ps_encapsulated_tcp* tcp_header, [[mayb
|
|||
const u16 new_op_vport = tcp_header->src_port;
|
||||
const u64 new_cur_seq = send_hdr.seq + 1;
|
||||
const u64 new_data_beg_seq = send_hdr.ack;
|
||||
auto sock_lv2 = std::make_shared<lv2_socket_p2ps>(socket, port, vport, new_op_addr, new_op_port, new_op_vport, new_cur_seq, new_data_beg_seq, so_nbio);
|
||||
auto sock_lv2 = make_shared<lv2_socket_p2ps>(socket, port, vport, new_op_addr, new_op_port, new_op_vport, new_cur_seq, new_data_beg_seq, so_nbio);
|
||||
const s32 new_sock_id = idm::import_existing<lv2_socket>(sock_lv2);
|
||||
sock_lv2->set_lv2_id(new_sock_id);
|
||||
const u64 key_connected = (reinterpret_cast<struct sockaddr_in*>(op_addr)->sin_addr.s_addr) | (static_cast<u64>(tcp_header->src_port) << 48) | (static_cast<u64>(tcp_header->dst_port) << 32);
|
||||
|
@ -600,7 +600,7 @@ std::pair<s32, sys_net_sockaddr> lv2_socket_p2ps::getpeername()
|
|||
return {CELL_OK, res};
|
||||
}
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2ps::accept(bool is_lock)
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_p2ps::accept(bool is_lock)
|
||||
{
|
||||
std::unique_lock<shared_mutex> lock(mutex, std::defer_lock);
|
||||
|
||||
|
@ -944,8 +944,9 @@ void lv2_socket_p2ps::close()
|
|||
return;
|
||||
}
|
||||
|
||||
auto& nc = g_fxo->get<p2p_context>();
|
||||
if (g_fxo->is_init<p2p_context>())
|
||||
{
|
||||
auto& nc = g_fxo->get<p2p_context>();
|
||||
std::lock_guard lock(nc.list_p2p_ports_mutex);
|
||||
auto& p2p_port = ::at32(nc.list_p2p_ports, port);
|
||||
{
|
||||
|
@ -973,8 +974,10 @@ void lv2_socket_p2ps::close()
|
|||
}
|
||||
}
|
||||
|
||||
auto& tcpm = g_fxo->get<named_thread<tcp_timeout_monitor>>();
|
||||
tcpm.clear_all_messages(lv2_id);
|
||||
if (const auto tcpm = g_fxo->try_get<named_thread<tcp_timeout_monitor>>())
|
||||
{
|
||||
tcpm->clear_all_messages(lv2_id);
|
||||
}
|
||||
}
|
||||
|
||||
s32 lv2_socket_p2ps::shutdown([[maybe_unused]] s32 how)
|
||||
|
|
|
@ -58,6 +58,8 @@ std::vector<u8> generate_u2s_packet(const p2ps_encapsulated_tcp& header, const u
|
|||
class lv2_socket_p2ps final : public lv2_socket_p2p
|
||||
{
|
||||
public:
|
||||
static constexpr u32 id_type = 2;
|
||||
|
||||
lv2_socket_p2ps(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
|
||||
lv2_socket_p2ps(socket_type socket, u16 port, u16 vport, u32 op_addr, u16 op_port, u16 op_vport, u64 cur_seq, u64 data_beg_seq, s32 so_nbio);
|
||||
lv2_socket_p2ps(utils::serial& ar, lv2_socket_type type);
|
||||
|
@ -70,7 +72,7 @@ public:
|
|||
void send_u2s_packet(std::vector<u8> data, const ::sockaddr_in* dst, u64 seq, bool require_ack);
|
||||
void close_stream();
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
s32 bind(const sys_net_sockaddr& addr) override;
|
||||
|
||||
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
|
||||
|
|
|
@ -36,7 +36,7 @@ void lv2_socket_raw::save(utils::serial& ar)
|
|||
lv2_socket::save(ar, true);
|
||||
}
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_raw::accept([[maybe_unused]] bool is_lock)
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> lv2_socket_raw::accept([[maybe_unused]] bool is_lock)
|
||||
{
|
||||
sys_net.fatal("[RAW] accept() called on a RAW socket");
|
||||
return {};
|
||||
|
|
|
@ -5,11 +5,13 @@
|
|||
class lv2_socket_raw final : public lv2_socket
|
||||
{
|
||||
public:
|
||||
static constexpr u32 id_type = 1;
|
||||
|
||||
lv2_socket_raw(lv2_socket_family family, lv2_socket_type type, lv2_ip_protocol protocol);
|
||||
lv2_socket_raw(utils::serial& ar, lv2_socket_type type);
|
||||
void save(utils::serial& ar);
|
||||
|
||||
std::tuple<bool, s32, std::shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
std::tuple<bool, s32, shared_ptr<lv2_socket>, sys_net_sockaddr> accept(bool is_lock = true) override;
|
||||
s32 bind(const sys_net_sockaddr& addr) override;
|
||||
|
||||
std::optional<s32> connect(const sys_net_sockaddr& addr) override;
|
||||
|
|
|
@ -138,7 +138,7 @@ void p2p_thread::bind_sce_np_port()
|
|||
|
||||
void network_thread::operator()()
|
||||
{
|
||||
std::vector<std::shared_ptr<lv2_socket>> socklist;
|
||||
std::vector<shared_ptr<lv2_socket>> socklist;
|
||||
socklist.reserve(lv2_socket::id_count);
|
||||
|
||||
{
|
||||
|
|
|
@ -135,11 +135,11 @@ bool nt_p2p_port::handle_connected(s32 sock_id, p2ps_encapsulated_tcp* tcp_heade
|
|||
|
||||
bool nt_p2p_port::handle_listening(s32 sock_id, p2ps_encapsulated_tcp* tcp_header, u8* data, ::sockaddr_storage* op_addr)
|
||||
{
|
||||
auto sock = idm::get<lv2_socket>(sock_id);
|
||||
auto sock = idm::get_unlocked<lv2_socket>(sock_id);
|
||||
if (!sock)
|
||||
return false;
|
||||
|
||||
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(*sock.get());
|
||||
auto& sock_p2ps = reinterpret_cast<lv2_socket_p2ps&>(*sock);
|
||||
return sock_p2ps.handle_listening(tcp_header, data, op_addr);
|
||||
}
|
||||
|
||||
|
|
|
@ -13,10 +13,10 @@
|
|||
#include "sys_overlay.h"
|
||||
#include "sys_fs.h"
|
||||
|
||||
extern std::pair<std::shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar = nullptr);
|
||||
extern std::pair<shared_ptr<lv2_overlay>, CellError> ppu_load_overlay(const ppu_exec_object&, bool virtual_load, const std::string& path, s64 file_offset, utils::serial* ar = nullptr);
|
||||
|
||||
extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0);
|
||||
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
|
||||
extern bool ppu_initialize(const ppu_module<lv2_obj>&, bool check_only = false, u64 file_size = 0);
|
||||
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release = false);
|
||||
|
||||
LOG_CHANNEL(sys_overlay);
|
||||
|
||||
|
@ -68,7 +68,7 @@ static error_code overlay_load_module(vm::ptr<u32> ovlmid, const std::string& vp
|
|||
|
||||
ppu_initialize(*ovlm);
|
||||
|
||||
sys_overlay.success(u8"Loaded overlay: “%s” (id=0x%x)", vpath, idm::last_id());
|
||||
sys_overlay.success("Loaded overlay: \"%s\" (id=0x%x)", vpath, idm::last_id());
|
||||
|
||||
*ovlmid = idm::last_id();
|
||||
*entry = ovlm->entry;
|
||||
|
@ -78,7 +78,7 @@ static error_code overlay_load_module(vm::ptr<u32> ovlmid, const std::string& vp
|
|||
|
||||
fs::file make_file_view(fs::file&& file, u64 offset, u64 size);
|
||||
|
||||
std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_overlay::load(utils::serial& ar)
|
||||
{
|
||||
const std::string vpath = ar.pop<std::string>();
|
||||
const std::string path = vfs::get(vpath);
|
||||
|
@ -86,7 +86,7 @@ std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
|
|||
|
||||
sys_overlay.success("lv2_overlay::load(): vpath='%s', path='%s', offset=0x%x", vpath, path, offset);
|
||||
|
||||
std::shared_ptr<lv2_overlay> ovlm;
|
||||
shared_ptr<lv2_overlay> ovlm;
|
||||
|
||||
fs::file file{path.substr(0, path.size() - (offset ? fmt::format("_x%x", offset).size() : 0))};
|
||||
|
||||
|
@ -110,7 +110,10 @@ std::shared_ptr<void> lv2_overlay::load(utils::serial& ar)
|
|||
sys_overlay.error("lv2_overlay::load(): Failed to find file. (vpath='%s', offset=0x%x)", vpath, offset);
|
||||
}
|
||||
|
||||
return ovlm;
|
||||
return [ovlm](void* storage)
|
||||
{
|
||||
*static_cast<shared_ptr<lv2_obj>*>(storage) = ovlm;
|
||||
};
|
||||
}
|
||||
|
||||
void lv2_overlay::save(utils::serial& ar)
|
||||
|
@ -156,7 +159,7 @@ error_code sys_overlay_load_module_by_fd(vm::ptr<u32> ovlmid, u32 fd, u64 offset
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
|
|
@ -5,7 +5,7 @@
|
|||
#include "sys_sync.h"
|
||||
#include <vector>
|
||||
|
||||
struct lv2_overlay final : lv2_obj, ppu_module
|
||||
struct lv2_overlay final : ppu_module<lv2_obj>
|
||||
{
|
||||
static const u32 id_base = 0x25000000;
|
||||
|
||||
|
@ -15,7 +15,7 @@ struct lv2_overlay final : lv2_obj, ppu_module
|
|||
|
||||
lv2_overlay() = default;
|
||||
lv2_overlay(utils::serial&){}
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
};
|
||||
|
||||
|
|
|
@ -22,9 +22,9 @@ LOG_CHANNEL(sys_ppu_thread);
|
|||
// Simple structure to cleanup previous thread, because can't remove its own thread
|
||||
struct ppu_thread_cleaner
|
||||
{
|
||||
std::shared_ptr<void> old;
|
||||
shared_ptr<named_thread<ppu_thread>> old;
|
||||
|
||||
std::shared_ptr<void> clean(std::shared_ptr<void> ptr)
|
||||
shared_ptr<named_thread<ppu_thread>> clean(shared_ptr<named_thread<ppu_thread>> ptr)
|
||||
{
|
||||
return std::exchange(old, std::move(ptr));
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
|
|||
ppu_join_status old_status;
|
||||
|
||||
// Avoid cases where cleaning causes the destructor to be called inside IDM lock scope (for performance)
|
||||
std::shared_ptr<void> old_ppu;
|
||||
shared_ptr<named_thread<ppu_thread>> old_ppu;
|
||||
{
|
||||
lv2_obj::notify_all_t notify;
|
||||
lv2_obj::prepare_for_sleep(ppu);
|
||||
|
@ -115,7 +115,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
|
|||
if (old_status != ppu_join_status::joinable)
|
||||
{
|
||||
// Remove self ID from IDM, move owning ptr
|
||||
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(std::move(idm::find_unlocked<named_thread<ppu_thread>>(ppu.id)->second));
|
||||
old_ppu = g_fxo->get<ppu_thread_cleaner>().clean(idm::withdraw<named_thread<ppu_thread>>(ppu.id, 0, std::false_type{}));
|
||||
}
|
||||
|
||||
// Get writers mask (wait for all current writers to quit)
|
||||
|
@ -147,7 +147,7 @@ void _sys_ppu_thread_exit(ppu_thread& ppu, u64 errorcode)
|
|||
if (old_ppu)
|
||||
{
|
||||
// It is detached from IDM now so join must be done explicitly now
|
||||
*static_cast<named_thread<ppu_thread>*>(old_ppu.get()) = thread_state::finished;
|
||||
*old_ppu = thread_state::finished;
|
||||
}
|
||||
|
||||
// Need to wait until the current writers finish
|
||||
|
@ -435,7 +435,7 @@ error_code sys_ppu_thread_stop(ppu_thread& ppu, u32 thread_id)
|
|||
return CELL_ENOSYS;
|
||||
}
|
||||
|
||||
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id);
|
||||
const auto thread = idm::check<named_thread<ppu_thread>>(thread_id, [](named_thread<ppu_thread>&) {});
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -529,7 +529,7 @@ error_code _sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, vm::p
|
|||
p.arg0 = arg;
|
||||
p.arg1 = unk;
|
||||
|
||||
return std::make_shared<named_thread<ppu_thread>>(p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
|
||||
return stx::make_shared<named_thread<ppu_thread>>(p, ppu_name, prio, 1 - static_cast<int>(flags & 3));
|
||||
});
|
||||
|
||||
if (!tid)
|
||||
|
@ -539,7 +539,7 @@ error_code _sys_ppu_thread_create(ppu_thread& ppu, vm::ptr<u64> thread_id, vm::p
|
|||
return CELL_EAGAIN;
|
||||
}
|
||||
|
||||
sys_ppu_thread.warning(u8"_sys_ppu_thread_create(): Thread “%s” created (id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)", ppu_name, tid, entry.addr, entry.rtoc, tls);
|
||||
sys_ppu_thread.warning("_sys_ppu_thread_create(): Thread \"%s\" created (id=0x%x, func=*0x%x, rtoc=0x%x, user-tls=0x%x)", ppu_name, tid, entry.addr, entry.rtoc, tls);
|
||||
|
||||
ppu.check_state();
|
||||
*thread_id = tid;
|
||||
|
@ -594,7 +594,7 @@ error_code sys_ppu_thread_rename(ppu_thread& ppu, u32 thread_id, vm::cptr<char>
|
|||
|
||||
sys_ppu_thread.warning("sys_ppu_thread_rename(thread_id=0x%x, name=*0x%x)", thread_id, name);
|
||||
|
||||
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
|
||||
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -618,7 +618,7 @@ error_code sys_ppu_thread_rename(ppu_thread& ppu, u32 thread_id, vm::cptr<char>
|
|||
auto _name = make_single<std::string>(std::move(out_str));
|
||||
|
||||
// thread_ctrl name is not changed (TODO)
|
||||
sys_ppu_thread.warning(u8"sys_ppu_thread_rename(): Thread renamed to “%s”", *_name);
|
||||
sys_ppu_thread.warning("sys_ppu_thread_rename(): Thread renamed to \"%s\"", *_name);
|
||||
thread->ppu_tname.store(std::move(_name));
|
||||
thread_ctrl::set_name(*thread, thread->thread_name); // TODO: Currently sets debugger thread name only for local thread
|
||||
|
||||
|
@ -631,7 +631,7 @@ error_code sys_ppu_thread_recover_page_fault(ppu_thread& ppu, u32 thread_id)
|
|||
|
||||
sys_ppu_thread.warning("sys_ppu_thread_recover_page_fault(thread_id=0x%x)", thread_id);
|
||||
|
||||
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
|
||||
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
@ -647,7 +647,7 @@ error_code sys_ppu_thread_get_page_fault_context(ppu_thread& ppu, u32 thread_id,
|
|||
|
||||
sys_ppu_thread.todo("sys_ppu_thread_get_page_fault_context(thread_id=0x%x, ctxt=*0x%x)", thread_id, ctxt);
|
||||
|
||||
const auto thread = idm::get<named_thread<ppu_thread>>(thread_id);
|
||||
const auto thread = idm::get_unlocked<named_thread<ppu_thread>>(thread_id);
|
||||
|
||||
if (!thread)
|
||||
{
|
||||
|
|
|
@ -231,7 +231,7 @@ CellError process_is_spu_lock_line_reservation_address(u32 addr, u64 flags)
|
|||
return CELL_EPERM;
|
||||
default:
|
||||
{
|
||||
if (auto vm0 = idm::get<sys_vm_t>(sys_vm_t::find_id(addr)))
|
||||
if (auto vm0 = idm::get_unlocked<sys_vm_t>(sys_vm_t::find_id(addr)))
|
||||
{
|
||||
// sys_vm area was not covering the address specified but made a reservation on the entire 256mb region
|
||||
if (vm0->addr + vm0->size - 1 < addr)
|
||||
|
@ -433,16 +433,29 @@ void lv2_exitspawn(ppu_thread& ppu, std::vector<std::string>& argv, std::vector<
|
|||
|
||||
using namespace id_manager;
|
||||
|
||||
auto func = [is_real_reboot, old_size = g_fxo->get<lv2_memory_container>().size, vec = (reader_lock{g_mutex}, g_fxo->get<id_map<lv2_memory_container>>().vec)](u32 sdk_suggested_mem) mutable
|
||||
shared_ptr<utils::serial> idm_capture = make_shared<utils::serial>();
|
||||
|
||||
if (!is_real_reboot)
|
||||
{
|
||||
reader_lock rlock{id_manager::g_mutex};
|
||||
g_fxo->get<id_map<lv2_memory_container>>().save(*idm_capture);
|
||||
stx::serial_breathe_and_tag(*idm_capture, "id_map<lv2_memory_container>", false);
|
||||
}
|
||||
|
||||
idm_capture->set_reading_state();
|
||||
|
||||
auto func = [is_real_reboot, old_size = g_fxo->get<lv2_memory_container>().size, idm_capture](u32 sdk_suggested_mem) mutable
|
||||
{
|
||||
if (is_real_reboot)
|
||||
{
|
||||
// Do not save containers on actual reboot
|
||||
vec.clear();
|
||||
ensure(g_fxo->init<id_map<lv2_memory_container>>());
|
||||
}
|
||||
else
|
||||
{
|
||||
// Save LV2 memory containers
|
||||
ensure(g_fxo->init<id_map<lv2_memory_container>>(*idm_capture));
|
||||
}
|
||||
|
||||
// Save LV2 memory containers
|
||||
ensure(g_fxo->init<id_map<lv2_memory_container>>())->vec = std::move(vec);
|
||||
|
||||
// Empty the containers, accumulate their total size
|
||||
u32 total_size = 0;
|
||||
|
|
|
@ -17,12 +17,12 @@
|
|||
#include "sys_memory.h"
|
||||
#include <span>
|
||||
|
||||
extern void dump_executable(std::span<const u8> data, const ppu_module* _module, std::string_view title_id);
|
||||
extern void dump_executable(std::span<const u8> data, const ppu_module<lv2_obj>* _module, std::string_view title_id);
|
||||
|
||||
extern std::shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64, utils::serial* = nullptr);
|
||||
extern shared_ptr<lv2_prx> ppu_load_prx(const ppu_prx_object&, bool virtual_load, const std::string&, s64, utils::serial* = nullptr);
|
||||
extern void ppu_unload_prx(const lv2_prx& prx);
|
||||
extern bool ppu_initialize(const ppu_module&, bool check_only = false, u64 file_size = 0);
|
||||
extern void ppu_finalize(const ppu_module& info, bool force_mem_release = false);
|
||||
extern bool ppu_initialize(const ppu_module<lv2_obj>&, bool check_only = false, u64 file_size = 0);
|
||||
extern void ppu_finalize(const ppu_module<lv2_obj>& info, bool force_mem_release = false);
|
||||
extern void ppu_manual_load_imports_exports(u32 imports_start, u32 imports_size, u32 exports_start, u32 exports_size, std::basic_string<char>& loaded_flags);
|
||||
|
||||
LOG_CHANNEL(sys_prx);
|
||||
|
@ -35,7 +35,7 @@ extern const std::map<std::string_view, int> g_prx_list
|
|||
{ "libaacenc_spurs.sprx", 0 },
|
||||
{ "libac3dec.sprx", 0 },
|
||||
{ "libac3dec2.sprx", 0 },
|
||||
{ "libadec.sprx", 0 },
|
||||
{ "libadec.sprx", 1 },
|
||||
{ "libadec2.sprx", 0 },
|
||||
{ "libadec_internal.sprx", 0 },
|
||||
{ "libad_async.sprx", 0 },
|
||||
|
@ -235,7 +235,7 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
|
|||
prx->name = std::move(name);
|
||||
prx->path = std::move(path);
|
||||
|
||||
sys_prx.warning(u8"Ignored module: “%s” (id=0x%x)", vpath, idm::last_id());
|
||||
sys_prx.warning("Ignored module: \"%s\" (id=0x%x)", vpath, idm::last_id());
|
||||
|
||||
return not_an_error(idm::last_id());
|
||||
};
|
||||
|
@ -253,7 +253,7 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
|
|||
{
|
||||
if (fs_error + 0u == CELL_ENOENT && is_firmware_sprx)
|
||||
{
|
||||
sys_prx.error(u8"firmware SPRX not found: “%s” (forcing HLE implementation)", vpath, idm::last_id());
|
||||
sys_prx.error("firmware SPRX not found: \"%s\" (forcing HLE implementation)", vpath, idm::last_id());
|
||||
return hle_load();
|
||||
}
|
||||
|
||||
|
@ -298,14 +298,14 @@ static error_code prx_load_module(const std::string& vpath, u64 flags, vm::ptr<s
|
|||
|
||||
ppu_initialize(*prx);
|
||||
|
||||
sys_prx.success(u8"Loaded module: “%s” (id=0x%x)", vpath, idm::last_id());
|
||||
sys_prx.success("Loaded module: \"%s\" (id=0x%x)", vpath, idm::last_id());
|
||||
|
||||
return not_an_error(idm::last_id());
|
||||
}
|
||||
|
||||
fs::file make_file_view(fs::file&& file, u64 offset, u64 size);
|
||||
|
||||
std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_prx::load(utils::serial& ar)
|
||||
{
|
||||
[[maybe_unused]] const s32 version = GET_SERIALIZATION_VERSION(lv2_prx_overlay);
|
||||
|
||||
|
@ -316,11 +316,11 @@ std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
|
|||
usz seg_count = 0;
|
||||
ar.deserialize_vle(seg_count);
|
||||
|
||||
std::shared_ptr<lv2_prx> prx;
|
||||
shared_ptr<lv2_prx> prx;
|
||||
|
||||
auto hle_load = [&]()
|
||||
{
|
||||
prx = std::make_shared<lv2_prx>();
|
||||
prx = make_shared<lv2_prx>();
|
||||
prx->path = path;
|
||||
prx->name = path.substr(path.find_last_of(fs::delim) + 1);
|
||||
};
|
||||
|
@ -337,7 +337,7 @@ std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
|
|||
{
|
||||
u128 klic = g_fxo->get<loaded_npdrm_keys>().last_key();
|
||||
file = make_file_view(std::move(file), offset, umax);
|
||||
prx = ppu_load_prx(ppu_prx_object{ decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic)) }, false, path, 0, &ar);
|
||||
prx = ppu_load_prx(ppu_prx_object{decrypt_self(std::move(file), reinterpret_cast<u8*>(&klic))}, false, path, 0, &ar);
|
||||
prx->m_loaded_flags = std::move(loaded_flags);
|
||||
prx->m_external_loaded_flags = std::move(external_flags);
|
||||
|
||||
|
@ -369,7 +369,11 @@ std::shared_ptr<void> lv2_prx::load(utils::serial& ar)
|
|||
}
|
||||
|
||||
prx->state = state;
|
||||
return prx;
|
||||
|
||||
return [prx](void* storage)
|
||||
{
|
||||
*static_cast<shared_ptr<lv2_obj>*>(storage) = prx;
|
||||
};
|
||||
}
|
||||
|
||||
void lv2_prx::save(utils::serial& ar)
|
||||
|
@ -407,7 +411,7 @@ error_code _sys_prx_load_module_by_fd(ppu_thread& ppu, s32 fd, u64 offset, u64 f
|
|||
|
||||
sys_prx.warning("_sys_prx_load_module_by_fd(fd=%d, offset=0x%x, flags=0x%x, pOpt=*0x%x)", fd, offset, flags, pOpt);
|
||||
|
||||
const auto file = idm::get<lv2_fs_object, lv2_file>(fd);
|
||||
const auto file = idm::get_unlocked<lv2_fs_object, lv2_file>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -519,7 +523,7 @@ error_code _sys_prx_start_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
|
||||
const auto prx = idm::get_unlocked<lv2_obj, lv2_prx>(id);
|
||||
|
||||
if (!prx)
|
||||
{
|
||||
|
@ -600,7 +604,7 @@ error_code _sys_prx_stop_module(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<sys_
|
|||
|
||||
sys_prx.warning("_sys_prx_stop_module(id=0x%x, flags=0x%x, pOpt=*0x%x)", id, flags, pOpt);
|
||||
|
||||
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
|
||||
const auto prx = idm::get_unlocked<lv2_obj, lv2_prx>(id);
|
||||
|
||||
if (!prx)
|
||||
{
|
||||
|
@ -1013,7 +1017,7 @@ error_code _sys_prx_get_module_info(ppu_thread& ppu, u32 id, u64 flags, vm::ptr<
|
|||
|
||||
sys_prx.warning("_sys_prx_get_module_info(id=0x%x, flags=%d, pOpt=*0x%x)", id, flags, pOpt);
|
||||
|
||||
const auto prx = idm::get<lv2_obj, lv2_prx>(id);
|
||||
const auto prx = idm::get_unlocked<lv2_obj, lv2_prx>(id);
|
||||
|
||||
if (!pOpt)
|
||||
{
|
||||
|
|
|
@ -172,7 +172,7 @@ enum : u32
|
|||
PRX_STATE_DESTROYED, // Last state, the module cannot be restarted
|
||||
};
|
||||
|
||||
struct lv2_prx final : lv2_obj, ppu_module
|
||||
struct lv2_prx final : ppu_module<lv2_obj>
|
||||
{
|
||||
static const u32 id_base = 0x23000000;
|
||||
|
||||
|
@ -204,7 +204,7 @@ struct lv2_prx final : lv2_obj, ppu_module
|
|||
|
||||
lv2_prx() noexcept = default;
|
||||
lv2_prx(utils::serial&) {}
|
||||
static std::shared_ptr<void> load(utils::serial&);
|
||||
static std::function<void(void*)> load(utils::serial&);
|
||||
void save(utils::serial& ar);
|
||||
};
|
||||
|
||||
|
|
|
@ -425,7 +425,7 @@ error_code sys_rsx_context_iomap(cpu_thread& cpu, u32 context_id, u32 io, u32 ea
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if ((addr == ea || !(addr % 0x1000'0000)) && idm::check<sys_vm_t>(sys_vm_t::find_id(addr)))
|
||||
if ((addr == ea || !(addr % 0x1000'0000)) && idm::check_unlocked<sys_vm_t>(sys_vm_t::find_id(addr)))
|
||||
{
|
||||
// Virtual memory is disallowed
|
||||
return CELL_EINVAL;
|
||||
|
|
|
@ -164,7 +164,7 @@ error_code sys_rsxaudio_initialize(vm::ptr<u32> handle)
|
|||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(id);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(id);
|
||||
std::lock_guard lock(rsxaudio_obj->mutex);
|
||||
|
||||
rsxaudio_obj->shmem = vm::addr_t{vm::alloc(sizeof(rsxaudio_shmem), vm::main)};
|
||||
|
@ -201,7 +201,7 @@ error_code sys_rsxaudio_finalize(u32 handle)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_finalize(handle=0x%x)", handle);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -219,7 +219,7 @@ error_code sys_rsxaudio_finalize(u32 handle)
|
|||
|
||||
{
|
||||
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
|
||||
rsxaudio_thread.rsxaudio_obj_ptr = {};
|
||||
rsxaudio_thread.rsxaudio_obj_ptr = null_ptr;
|
||||
}
|
||||
|
||||
rsxaudio_obj->init = false;
|
||||
|
@ -235,7 +235,7 @@ error_code sys_rsxaudio_import_shared_memory(u32 handle, vm::ptr<u64> addr)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_import_shared_memory(handle=0x%x, addr=*0x%x)", handle, addr);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -264,7 +264,7 @@ error_code sys_rsxaudio_unimport_shared_memory(u32 handle, vm::ptr<u64> addr /*
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_unimport_shared_memory(handle=0x%x, addr=*0x%x)", handle, addr);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -287,7 +287,7 @@ error_code sys_rsxaudio_create_connection(u32 handle)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_create_connection(handle=0x%x)", handle);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -305,15 +305,15 @@ error_code sys_rsxaudio_create_connection(u32 handle)
|
|||
|
||||
const error_code port_create_status = [&]() -> error_code
|
||||
{
|
||||
if (auto queue1 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_1_id))
|
||||
if (auto queue1 = idm::get_unlocked<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_1_id))
|
||||
{
|
||||
rsxaudio_obj->event_queue[0] = queue1;
|
||||
|
||||
if (auto queue2 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_2_id))
|
||||
if (auto queue2 = idm::get_unlocked<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_2_id))
|
||||
{
|
||||
rsxaudio_obj->event_queue[1] = queue2;
|
||||
|
||||
if (auto queue3 = idm::get<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_3_id))
|
||||
if (auto queue3 = idm::get_unlocked<lv2_obj, lv2_event_queue>(sh_page->ctrl.event_queue_3_id))
|
||||
{
|
||||
rsxaudio_obj->event_queue[2] = queue3;
|
||||
|
||||
|
@ -350,7 +350,7 @@ error_code sys_rsxaudio_close_connection(u32 handle)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_close_connection(handle=0x%x)", handle);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -367,7 +367,7 @@ error_code sys_rsxaudio_close_connection(u32 handle)
|
|||
{
|
||||
auto& rsxaudio_thread = g_fxo->get<rsx_audio_data>();
|
||||
std::lock_guard ra_obj_lock{rsxaudio_thread.rsxaudio_obj_upd_m};
|
||||
rsxaudio_thread.rsxaudio_obj_ptr = {};
|
||||
rsxaudio_thread.rsxaudio_obj_ptr = null_ptr;
|
||||
}
|
||||
|
||||
for (u32 q_idx = 0; q_idx < SYS_RSXAUDIO_PORT_CNT; q_idx++)
|
||||
|
@ -382,7 +382,7 @@ error_code sys_rsxaudio_prepare_process(u32 handle)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_prepare_process(handle=0x%x)", handle);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -413,7 +413,7 @@ error_code sys_rsxaudio_start_process(u32 handle)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_start_process(handle=0x%x)", handle);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -463,7 +463,7 @@ error_code sys_rsxaudio_stop_process(u32 handle)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_stop_process(handle=0x%x)", handle);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
@ -511,7 +511,7 @@ error_code sys_rsxaudio_get_dma_param(u32 handle, u32 flag, vm::ptr<u64> out)
|
|||
{
|
||||
sys_rsxaudio.trace("sys_rsxaudio_get_dma_param(handle=0x%x, flag=0x%x, out=0x%x)", handle, flag, out);
|
||||
|
||||
const auto rsxaudio_obj = idm::get<lv2_obj, lv2_rsxaudio>(handle);
|
||||
const auto rsxaudio_obj = idm::get_unlocked<lv2_obj, lv2_rsxaudio>(handle);
|
||||
|
||||
if (!rsxaudio_obj)
|
||||
{
|
||||
|
|
|
@ -161,7 +161,7 @@ struct lv2_rsxaudio final : lv2_obj
|
|||
|
||||
vm::addr_t shmem{};
|
||||
|
||||
std::array<std::shared_ptr<lv2_event_queue>, SYS_RSXAUDIO_PORT_CNT> event_queue{};
|
||||
std::array<shared_ptr<lv2_event_queue>, SYS_RSXAUDIO_PORT_CNT> event_queue{};
|
||||
|
||||
// lv2 uses port memory addresses for their names
|
||||
static constexpr std::array<u64, SYS_RSXAUDIO_PORT_CNT> event_port_name{ 0x8000000000400100, 0x8000000000400200, 0x8000000000400300 };
|
||||
|
@ -583,7 +583,7 @@ public:
|
|||
atomic_t<bool> rsxaudio_ctx_allocated = false;
|
||||
|
||||
shared_mutex rsxaudio_obj_upd_m{};
|
||||
std::shared_ptr<lv2_rsxaudio> rsxaudio_obj_ptr{};
|
||||
shared_ptr<lv2_rsxaudio> rsxaudio_obj_ptr{};
|
||||
|
||||
void operator()();
|
||||
rsxaudio_data_thread& operator=(thread_state state);
|
||||
|
|
|
@ -19,10 +19,9 @@ lv2_rwlock::lv2_rwlock(utils::serial& ar)
|
|||
ar(owner);
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_rwlock::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_rwlock::load(utils::serial& ar)
|
||||
{
|
||||
auto rwlock = std::make_shared<lv2_rwlock>(ar);
|
||||
return lv2_obj::load(rwlock->key, rwlock);
|
||||
return load_func(make_shared<lv2_rwlock>(stx::exact_t<utils::serial&>(ar)));
|
||||
}
|
||||
|
||||
void lv2_rwlock::save(utils::serial& ar)
|
||||
|
@ -56,7 +55,7 @@ error_code sys_rwlock_create(ppu_thread& ppu, vm::ptr<u32> rw_lock_id, vm::ptr<s
|
|||
|
||||
if (auto error = lv2_obj::create<lv2_rwlock>(_attr.pshared, ipc_key, _attr.flags, [&]
|
||||
{
|
||||
return std::make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
|
||||
return make_shared<lv2_rwlock>(protocol, ipc_key, _attr.name_u64);
|
||||
}))
|
||||
{
|
||||
return error;
|
||||
|
|
|
@ -40,7 +40,7 @@ struct lv2_rwlock final : lv2_obj
|
|||
}
|
||||
|
||||
lv2_rwlock(utils::serial& ar);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
};
|
||||
|
||||
|
|
|
@ -20,10 +20,9 @@ lv2_sema::lv2_sema(utils::serial& ar)
|
|||
ar(val);
|
||||
}
|
||||
|
||||
std::shared_ptr<void> lv2_sema::load(utils::serial& ar)
|
||||
std::function<void(void*)> lv2_sema::load(utils::serial& ar)
|
||||
{
|
||||
auto sema = std::make_shared<lv2_sema>(ar);
|
||||
return lv2_obj::load(sema->key, sema);
|
||||
return load_func(make_shared<lv2_sema>(stx::exact_t<utils::serial&>(ar)));
|
||||
}
|
||||
|
||||
void lv2_sema::save(utils::serial& ar)
|
||||
|
@ -68,7 +67,7 @@ error_code sys_semaphore_create(ppu_thread& ppu, vm::ptr<u32> sem_id, vm::ptr<sy
|
|||
|
||||
if (auto error = lv2_obj::create<lv2_sema>(_attr.pshared, ipc_key, _attr.flags, [&]
|
||||
{
|
||||
return std::make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64, max_val, initial_val);
|
||||
return make_shared<lv2_sema>(protocol, ipc_key, _attr.name_u64, max_val, initial_val);
|
||||
}))
|
||||
{
|
||||
return error;
|
||||
|
|
|
@ -42,7 +42,7 @@ struct lv2_sema final : lv2_obj
|
|||
}
|
||||
|
||||
lv2_sema(utils::serial& ar);
|
||||
static std::shared_ptr<void> load(utils::serial& ar);
|
||||
static std::function<void(void*)> load(utils::serial& ar);
|
||||
void save(utils::serial& ar);
|
||||
};
|
||||
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue