mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-20 19:45:20 +00:00
commit
8f9db92bf0
102 changed files with 5156 additions and 4959 deletions
|
@ -2,7 +2,7 @@
|
|||
|
||||
#define IS_LE_MACHINE
|
||||
|
||||
union _CRT_ALIGN(16) u128
|
||||
union u128
|
||||
{
|
||||
u64 _u64[2];
|
||||
s64 _s64[2];
|
||||
|
@ -107,12 +107,12 @@ union _CRT_ALIGN(16) u128
|
|||
{
|
||||
}
|
||||
|
||||
__forceinline operator bool() const
|
||||
force_inline operator bool() const
|
||||
{
|
||||
return (data & mask) != 0;
|
||||
}
|
||||
|
||||
__forceinline bit_element& operator = (const bool right)
|
||||
force_inline bit_element& operator = (const bool right)
|
||||
{
|
||||
if (right)
|
||||
{
|
||||
|
@ -125,7 +125,7 @@ union _CRT_ALIGN(16) u128
|
|||
return *this;
|
||||
}
|
||||
|
||||
__forceinline bit_element& operator = (const bit_element& right)
|
||||
force_inline bit_element& operator = (const bit_element& right)
|
||||
{
|
||||
if (right)
|
||||
{
|
||||
|
@ -249,77 +249,77 @@ union _CRT_ALIGN(16) u128
|
|||
return ret;
|
||||
}
|
||||
|
||||
static __forceinline u128 add8(const u128& left, const u128& right)
|
||||
static force_inline u128 add8(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_add_epi8(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 add16(const u128& left, const u128& right)
|
||||
static force_inline u128 add16(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_add_epi16(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 add32(const u128& left, const u128& right)
|
||||
static force_inline u128 add32(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_add_epi32(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 addfs(const u128& left, const u128& right)
|
||||
static force_inline u128 addfs(const u128& left, const u128& right)
|
||||
{
|
||||
return fromF(_mm_add_ps(left.vf, right.vf));
|
||||
}
|
||||
|
||||
static __forceinline u128 addfd(const u128& left, const u128& right)
|
||||
static force_inline u128 addfd(const u128& left, const u128& right)
|
||||
{
|
||||
return fromD(_mm_add_pd(left.vd, right.vd));
|
||||
}
|
||||
|
||||
static __forceinline u128 sub8(const u128& left, const u128& right)
|
||||
static force_inline u128 sub8(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_sub_epi8(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 sub16(const u128& left, const u128& right)
|
||||
static force_inline u128 sub16(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_sub_epi16(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 sub32(const u128& left, const u128& right)
|
||||
static force_inline u128 sub32(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_sub_epi32(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 subfs(const u128& left, const u128& right)
|
||||
static force_inline u128 subfs(const u128& left, const u128& right)
|
||||
{
|
||||
return fromF(_mm_sub_ps(left.vf, right.vf));
|
||||
}
|
||||
|
||||
static __forceinline u128 subfd(const u128& left, const u128& right)
|
||||
static force_inline u128 subfd(const u128& left, const u128& right)
|
||||
{
|
||||
return fromD(_mm_sub_pd(left.vd, right.vd));
|
||||
}
|
||||
|
||||
static __forceinline u128 maxu8(const u128& left, const u128& right)
|
||||
static force_inline u128 maxu8(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_max_epu8(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 minu8(const u128& left, const u128& right)
|
||||
static force_inline u128 minu8(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_min_epu8(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 eq8(const u128& left, const u128& right)
|
||||
static force_inline u128 eq8(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_cmpeq_epi8(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 eq16(const u128& left, const u128& right)
|
||||
static force_inline u128 eq16(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_cmpeq_epi16(left.vi, right.vi));
|
||||
}
|
||||
|
||||
static __forceinline u128 eq32(const u128& left, const u128& right)
|
||||
static force_inline u128 eq32(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_cmpeq_epi32(left.vi, right.vi));
|
||||
}
|
||||
|
@ -334,17 +334,17 @@ union _CRT_ALIGN(16) u128
|
|||
return (_u64[0] != right._u64[0]) || (_u64[1] != right._u64[1]);
|
||||
}
|
||||
|
||||
__forceinline u128 operator | (const u128& right) const
|
||||
force_inline u128 operator | (const u128& right) const
|
||||
{
|
||||
return fromV(_mm_or_si128(vi, right.vi));
|
||||
}
|
||||
|
||||
__forceinline u128 operator & (const u128& right) const
|
||||
force_inline u128 operator & (const u128& right) const
|
||||
{
|
||||
return fromV(_mm_and_si128(vi, right.vi));
|
||||
}
|
||||
|
||||
__forceinline u128 operator ^ (const u128& right) const
|
||||
force_inline u128 operator ^ (const u128& right) const
|
||||
{
|
||||
return fromV(_mm_xor_si128(vi, right.vi));
|
||||
}
|
||||
|
@ -354,18 +354,18 @@ union _CRT_ALIGN(16) u128
|
|||
return from64(~_u64[0], ~_u64[1]);
|
||||
}
|
||||
|
||||
__forceinline bool is_any_1() const // check if any bit is 1
|
||||
force_inline bool is_any_1() const // check if any bit is 1
|
||||
{
|
||||
return _u64[0] || _u64[1];
|
||||
}
|
||||
|
||||
__forceinline bool is_any_0() const // check if any bit is 0
|
||||
force_inline bool is_any_0() const // check if any bit is 0
|
||||
{
|
||||
return ~_u64[0] || ~_u64[1];
|
||||
}
|
||||
|
||||
// result = (~left) & (right)
|
||||
static __forceinline u128 andnot(const u128& left, const u128& right)
|
||||
static force_inline u128 andnot(const u128& left, const u128& right)
|
||||
{
|
||||
return fromV(_mm_andnot_si128(left.vi, right.vi));
|
||||
}
|
||||
|
@ -379,7 +379,7 @@ union _CRT_ALIGN(16) u128
|
|||
|
||||
std::string to_xyzw() const;
|
||||
|
||||
static __forceinline u128 byteswap(const u128 val)
|
||||
static force_inline u128 byteswap(const u128 val)
|
||||
{
|
||||
u128 ret;
|
||||
ret._u64[0] = _byteswap_uint64(val._u64[1]);
|
||||
|
@ -388,10 +388,11 @@ union _CRT_ALIGN(16) u128
|
|||
}
|
||||
};
|
||||
|
||||
#ifndef InterlockedCompareExchange
|
||||
static __forceinline u128 InterlockedCompareExchange(volatile u128* dest, u128 exch, u128 comp)
|
||||
static_assert(__alignof(u128) == 16 && sizeof(u128) == 16, "Wrong u128 size or alignment");
|
||||
|
||||
static force_inline u128 sync_val_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
#if !defined(_MSC_VER)
|
||||
auto res = __sync_val_compare_and_swap((volatile __int128_t*)dest, (__int128_t&)comp, (__int128_t&)exch);
|
||||
return (u128&)res;
|
||||
#else
|
||||
|
@ -399,60 +400,51 @@ static __forceinline u128 InterlockedCompareExchange(volatile u128* dest, u128 e
|
|||
return comp;
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static __forceinline bool InterlockedCompareExchangeTest(volatile u128* dest, u128 exch, u128 comp)
|
||||
static force_inline bool sync_bool_compare_and_swap(volatile u128* dest, u128 comp, u128 exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
#if !defined(_MSC_VER)
|
||||
return __sync_bool_compare_and_swap((volatile __int128_t*)dest, (__int128_t&)comp, (__int128_t&)exch);
|
||||
#else
|
||||
return _InterlockedCompareExchange128((volatile long long*)dest, exch._u64[1], exch._u64[0], (long long*)&comp) != 0;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef InterlockedExchange
|
||||
static __forceinline u128 InterlockedExchange(volatile u128* dest, u128 value)
|
||||
static force_inline u128 sync_lock_test_and_set(volatile u128* dest, u128 value)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const u128 old = *(u128*)dest;
|
||||
if (InterlockedCompareExchangeTest(dest, value, old)) return old;
|
||||
if (sync_bool_compare_and_swap(dest, old, value)) return old;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedOr
|
||||
static __forceinline u128 InterlockedOr(volatile u128* dest, u128 value)
|
||||
static force_inline u128 sync_fetch_and_or(volatile u128* dest, u128 value)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const u128 old = *(u128*)dest;
|
||||
if (InterlockedCompareExchangeTest(dest, old | value, old)) return old;
|
||||
if (sync_bool_compare_and_swap(dest, old, value | old)) return old;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedAnd
|
||||
static __forceinline u128 InterlockedAnd(volatile u128* dest, u128 value)
|
||||
static force_inline u128 sync_fetch_and_and(volatile u128* dest, u128 value)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const u128 old = *(u128*)dest;
|
||||
if (InterlockedCompareExchangeTest(dest, old & value, old)) return old;
|
||||
if (sync_bool_compare_and_swap(dest, old, value & old)) return old;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedXor
|
||||
static __forceinline u128 InterlockedXor(volatile u128* dest, u128 value)
|
||||
static force_inline u128 sync_fetch_and_xor(volatile u128* dest, u128 value)
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const u128 old = *(u128*)dest;
|
||||
if (InterlockedCompareExchangeTest(dest, old ^ value, old)) return old;
|
||||
if (sync_bool_compare_and_swap(dest, old, value ^ old)) return old;
|
||||
}
|
||||
}
|
||||
#endif
|
||||
|
||||
#define re16(val) _byteswap_ushort(val)
|
||||
#define re32(val) _byteswap_ulong(val)
|
||||
|
@ -463,12 +455,12 @@ template<typename T, int size = sizeof(T)> struct se_t;
|
|||
|
||||
template<typename T> struct se_t<T, 1>
|
||||
{
|
||||
static __forceinline u8 to_be(const T& src)
|
||||
static force_inline u8 to_be(const T& src)
|
||||
{
|
||||
return (u8&)src;
|
||||
}
|
||||
|
||||
static __forceinline T from_be(const u8 src)
|
||||
static force_inline T from_be(const u8 src)
|
||||
{
|
||||
return (T&)src;
|
||||
}
|
||||
|
@ -476,12 +468,12 @@ template<typename T> struct se_t<T, 1>
|
|||
|
||||
template<typename T> struct se_t<T, 2>
|
||||
{
|
||||
static __forceinline u16 to_be(const T& src)
|
||||
static force_inline u16 to_be(const T& src)
|
||||
{
|
||||
return _byteswap_ushort((u16&)src);
|
||||
}
|
||||
|
||||
static __forceinline T from_be(const u16 src)
|
||||
static force_inline T from_be(const u16 src)
|
||||
{
|
||||
const u16 res = _byteswap_ushort(src);
|
||||
return (T&)res;
|
||||
|
@ -490,12 +482,12 @@ template<typename T> struct se_t<T, 2>
|
|||
|
||||
template<typename T> struct se_t<T, 4>
|
||||
{
|
||||
static __forceinline u32 to_be(const T& src)
|
||||
static force_inline u32 to_be(const T& src)
|
||||
{
|
||||
return _byteswap_ulong((u32&)src);
|
||||
}
|
||||
|
||||
static __forceinline T from_be(const u32 src)
|
||||
static force_inline T from_be(const u32 src)
|
||||
{
|
||||
const u32 res = _byteswap_ulong(src);
|
||||
return (T&)res;
|
||||
|
@ -504,12 +496,12 @@ template<typename T> struct se_t<T, 4>
|
|||
|
||||
template<typename T> struct se_t<T, 8>
|
||||
{
|
||||
static __forceinline u64 to_be(const T& src)
|
||||
static force_inline u64 to_be(const T& src)
|
||||
{
|
||||
return _byteswap_uint64((u64&)src);
|
||||
}
|
||||
|
||||
static __forceinline T from_be(const u64 src)
|
||||
static force_inline T from_be(const u64 src)
|
||||
{
|
||||
const u64 res = _byteswap_uint64(src);
|
||||
return (T&)res;
|
||||
|
@ -518,12 +510,12 @@ template<typename T> struct se_t<T, 8>
|
|||
|
||||
template<typename T> struct se_t<T, 16>
|
||||
{
|
||||
static __forceinline u128 to_be(const T& src)
|
||||
static force_inline u128 to_be(const T& src)
|
||||
{
|
||||
return u128::byteswap((u128&)src);
|
||||
}
|
||||
|
||||
static __forceinline T from_be(const u128& src)
|
||||
static force_inline T from_be(const u128& src)
|
||||
{
|
||||
const u128 res = u128::byteswap(src);
|
||||
return (T&)res;
|
||||
|
@ -613,7 +605,7 @@ private:
|
|||
template<typename Tto, typename Tfrom, int mode>
|
||||
struct _convert
|
||||
{
|
||||
static __forceinline be_t<Tto>& func(Tfrom& be_value)
|
||||
static force_inline be_t<Tto>& func(Tfrom& be_value)
|
||||
{
|
||||
Tto res = be_value;
|
||||
return (be_t<Tto>&)res;
|
||||
|
@ -623,7 +615,7 @@ private:
|
|||
template<typename Tto, typename Tfrom>
|
||||
struct _convert<Tto, Tfrom, 1>
|
||||
{
|
||||
static __forceinline be_t<Tto>& func(Tfrom& be_value)
|
||||
static force_inline be_t<Tto>& func(Tfrom& be_value)
|
||||
{
|
||||
Tto res = se_t<Tto, sizeof(Tto)>::func(se_t<Tfrom, sizeof(Tfrom)>::func(be_value));
|
||||
return (be_t<Tto>&)res;
|
||||
|
@ -633,7 +625,7 @@ private:
|
|||
template<typename Tto, typename Tfrom>
|
||||
struct _convert<Tto, Tfrom, 2>
|
||||
{
|
||||
static __forceinline be_t<Tto>& func(Tfrom& be_value)
|
||||
static force_inline be_t<Tto>& func(Tfrom& be_value)
|
||||
{
|
||||
Tto res = be_value >> ((sizeof(Tfrom)-sizeof(Tto)) * 8);
|
||||
return (be_t<Tto>&)res;
|
||||
|
@ -683,7 +675,7 @@ public:
|
|||
}
|
||||
|
||||
//get value in current machine byte ordering
|
||||
__forceinline type value() const
|
||||
force_inline type value() const
|
||||
{
|
||||
#ifdef IS_LE_MACHINE
|
||||
return ToLE();
|
||||
|
@ -882,7 +874,7 @@ template<typename T, typename T1, T1 value> struct _se<be_t<T>, T1, value> : pub
|
|||
template<typename Tto, typename Tfrom>
|
||||
struct convert_le_be_t
|
||||
{
|
||||
static Tto func(Tfrom&& value)
|
||||
static Tto func(Tfrom value)
|
||||
{
|
||||
return (Tto)value;
|
||||
}
|
||||
|
@ -891,7 +883,7 @@ struct convert_le_be_t
|
|||
template<typename Tt, typename Tt1, typename Tfrom>
|
||||
struct convert_le_be_t<be_t<Tt, Tt1>, Tfrom>
|
||||
{
|
||||
static be_t<Tt, Tt1> func(Tfrom&& value)
|
||||
static be_t<Tt, Tt1> func(Tfrom value)
|
||||
{
|
||||
return be_t<Tt, Tt1>::make(value);
|
||||
}
|
||||
|
@ -900,7 +892,7 @@ struct convert_le_be_t<be_t<Tt, Tt1>, Tfrom>
|
|||
template<typename Tt, typename Tt1, typename Tf, typename Tf1>
|
||||
struct convert_le_be_t<be_t<Tt, Tt1>, be_t<Tf, Tf1>>
|
||||
{
|
||||
static be_t<Tt, Tt1> func(be_t<Tf, Tf1>&& value)
|
||||
static be_t<Tt, Tt1> func(be_t<Tf, Tf1> value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
@ -909,20 +901,24 @@ struct convert_le_be_t<be_t<Tt, Tt1>, be_t<Tf, Tf1>>
|
|||
template<typename Tto, typename Tf, typename Tf1>
|
||||
struct convert_le_be_t<Tto, be_t<Tf, Tf1>>
|
||||
{
|
||||
static Tto func(be_t<Tf, Tf1>&& value)
|
||||
static Tto func(be_t<Tf, Tf1> value)
|
||||
{
|
||||
return value.value();
|
||||
}
|
||||
};
|
||||
|
||||
template<typename Tto, typename Tfrom>
|
||||
__forceinline Tto convert_le_be(Tfrom&& value)
|
||||
force_inline Tto convert_le_be(Tfrom value)
|
||||
{
|
||||
return convert_le_be_t<Tto, Tfrom>::func(value);
|
||||
}
|
||||
|
||||
template<typename Tto, typename Tfrom>
|
||||
__forceinline void convert_le_be(Tto& dst, Tfrom&& src)
|
||||
force_inline void convert_le_be(Tto& dst, Tfrom src)
|
||||
{
|
||||
dst = convert_le_be_t<Tto, Tfrom>::func(src);
|
||||
}
|
||||
|
||||
template<typename T> using le_t = T;
|
||||
|
||||
template<typename T> struct to_le_t { using type = T; };
|
||||
|
|
|
@ -136,8 +136,8 @@ bool fs::stat(const std::string& path, stat_t& info)
|
|||
info.mtime = to_time_t(attrs.ftLastWriteTime);
|
||||
info.ctime = to_time_t(attrs.ftCreationTime);
|
||||
#else
|
||||
struct stat64 file_info;
|
||||
if (stat64(path.c_str(), &file_info) < 0)
|
||||
struct stat file_info;
|
||||
if (stat(path.c_str(), &file_info) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -174,8 +174,8 @@ bool fs::is_file(const std::string& file)
|
|||
|
||||
return (attrs & FILE_ATTRIBUTE_DIRECTORY) == 0;
|
||||
#else
|
||||
struct stat64 file_info;
|
||||
if (stat64(file.c_str(), &file_info) < 0)
|
||||
struct stat file_info;
|
||||
if (stat(file.c_str(), &file_info) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -195,8 +195,8 @@ bool fs::is_dir(const std::string& dir)
|
|||
|
||||
return (attrs & FILE_ATTRIBUTE_DIRECTORY) != 0;
|
||||
#else
|
||||
struct stat64 file_info;
|
||||
if (stat64(dir.c_str(), &file_info) < 0)
|
||||
struct stat file_info;
|
||||
if (stat(dir.c_str(), &file_info) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -364,7 +364,7 @@ bool fs::truncate_file(const std::string& file, u64 length)
|
|||
#ifdef _WIN32
|
||||
if (!::truncate_file(file, length))
|
||||
#else
|
||||
if (truncate64(file.c_str(), length))
|
||||
if (::truncate(file.c_str(), length))
|
||||
#endif
|
||||
{
|
||||
LOG_WARNING(GENERAL, "Error resizing file '%s' to 0x%llx: 0x%llx", file, length, GET_API_ERROR);
|
||||
|
@ -480,7 +480,7 @@ bool fs::file::trunc(u64 size) const
|
|||
|
||||
return true; // TODO
|
||||
#else
|
||||
return !ftruncate64(m_fd, size);
|
||||
return !::ftruncate(m_fd, size);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -501,8 +501,8 @@ bool fs::file::stat(stat_t& info) const
|
|||
info.mtime = to_time_t(basic_info.ChangeTime);
|
||||
info.ctime = to_time_t(basic_info.CreationTime);
|
||||
#else
|
||||
struct stat64 file_info;
|
||||
if (fstat64(m_fd, &file_info) < 0)
|
||||
struct stat file_info;
|
||||
if (fstat(m_fd, &file_info) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
@ -580,7 +580,7 @@ u64 fs::file::seek(u64 offset, u32 mode) const
|
|||
|
||||
return pos.QuadPart;
|
||||
#else
|
||||
return lseek64(m_fd, offset, mode);
|
||||
return ::lseek(m_fd, offset, mode);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -595,8 +595,8 @@ u64 fs::file::size() const
|
|||
|
||||
return size.QuadPart;
|
||||
#else
|
||||
struct stat64 file_info;
|
||||
if (fstat64(m_fd, &file_info) < 0)
|
||||
struct stat file_info;
|
||||
if (::fstat(m_fd, &file_info) < 0)
|
||||
{
|
||||
return -1;
|
||||
}
|
||||
|
@ -766,8 +766,8 @@ bool fs::dir::get_next(std::string& name, stat_t& info)
|
|||
#else
|
||||
const auto found = ::readdir((DIR*)m_dd);
|
||||
|
||||
struct stat64 file_info;
|
||||
if (!found || fstatat64(::dirfd((DIR*)m_dd), found->d_name, &file_info, 0) < 0)
|
||||
struct stat file_info;
|
||||
if (!found || ::fstatat(::dirfd((DIR*)m_dd), found->d_name, &file_info, 0) < 0)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
|
325
Utilities/GNU.h
325
Utilities/GNU.h
|
@ -2,22 +2,28 @@
|
|||
|
||||
#include <emmintrin.h>
|
||||
|
||||
#ifdef _WIN32
|
||||
#if defined(_MSC_VER)
|
||||
#define thread_local __declspec(thread)
|
||||
#elif __APPLE__
|
||||
#define thread_local __thread
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#define __noinline __declspec(noinline)
|
||||
#if defined(_MSC_VER)
|
||||
#define never_inline __declspec(noinline)
|
||||
#else
|
||||
#define __noinline __attribute__((noinline))
|
||||
#define never_inline __attribute__((noinline))
|
||||
#endif
|
||||
|
||||
#ifdef _WIN32
|
||||
#define __safebuffers __declspec(safebuffers)
|
||||
#if defined(_MSC_VER)
|
||||
#define safe_buffers __declspec(safebuffers)
|
||||
#else
|
||||
#define __safebuffers
|
||||
#define safe_buffers
|
||||
#endif
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
#define force_inline __forceinline
|
||||
#else
|
||||
#define force_inline __attribute__((always_inline))
|
||||
#endif
|
||||
|
||||
template<size_t size>
|
||||
|
@ -37,7 +43,7 @@ void strcpy_trunc(char(&dst)[size], const char(&src)[rsize])
|
|||
}
|
||||
|
||||
#if defined(__GNUG__)
|
||||
#include <cmath>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <cstdint>
|
||||
|
||||
|
@ -46,12 +52,10 @@ void strcpy_trunc(char(&dst)[size], const char(&src)[rsize])
|
|||
#endif
|
||||
|
||||
#define _fpclass(x) std::fpclassify(x)
|
||||
#define __forceinline __attribute__((always_inline))
|
||||
#define _byteswap_ushort(x) __builtin_bswap16(x)
|
||||
#define _byteswap_ulong(x) __builtin_bswap32(x)
|
||||
#define _byteswap_uint64(x) __builtin_bswap64(x)
|
||||
#define INFINITE 0xFFFFFFFF
|
||||
#define _CRT_ALIGN(x) __attribute__((aligned(x)))
|
||||
|
||||
inline uint64_t __umulh(uint64_t a, uint64_t b)
|
||||
{
|
||||
|
@ -67,10 +71,8 @@ inline int64_t __mulh(int64_t a, int64_t b)
|
|||
return result;
|
||||
}
|
||||
|
||||
|
||||
void * _aligned_malloc(size_t size, size_t alignment);
|
||||
|
||||
#ifdef __APPLE__
|
||||
|
||||
int clock_gettime(int foo, struct timespec *ts);
|
||||
#define wxIsNaN(x) ((x) != (x))
|
||||
|
||||
|
@ -80,220 +82,227 @@ int clock_gettime(int foo, struct timespec *ts);
|
|||
|
||||
#endif /* __APPLE__ */
|
||||
|
||||
#define _aligned_free free
|
||||
|
||||
#define DWORD int32_t
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedCompareExchange
|
||||
static __forceinline uint8_t InterlockedCompareExchange(volatile uint8_t* dest, uint8_t exch, uint8_t comp)
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_val_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_val_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, bool>::type sync_bool_compare_and_swap(volatile T* dest, T2 comp, T2 exch)
|
||||
{
|
||||
return __sync_bool_compare_and_swap(dest, comp, exch);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_lock_test_and_set(volatile T* dest, T2 value)
|
||||
{
|
||||
return __sync_lock_test_and_set(dest, value);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_fetch_and_add(volatile T* dest, T2 value)
|
||||
{
|
||||
return __sync_fetch_and_add(dest, value);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_fetch_and_sub(volatile T* dest, T2 value)
|
||||
{
|
||||
return __sync_fetch_and_sub(dest, value);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_fetch_and_or(volatile T* dest, T2 value)
|
||||
{
|
||||
return __sync_fetch_and_or(dest, value);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_fetch_and_and(volatile T* dest, T2 value)
|
||||
{
|
||||
return __sync_fetch_and_and(dest, value);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> static inline typename std::enable_if<std::is_arithmetic<T>::value, T>::type sync_fetch_and_xor(volatile T* dest, T2 value)
|
||||
{
|
||||
return __sync_fetch_and_xor(dest, value);
|
||||
}
|
||||
|
||||
#endif /* __GNUG__ */
|
||||
|
||||
#if defined(_MSC_VER)
|
||||
|
||||
// atomic compare and swap functions
|
||||
|
||||
static force_inline uint8_t sync_val_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
|
||||
{
|
||||
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint16_t InterlockedCompareExchange(volatile uint16_t* dest, uint16_t exch, uint16_t comp)
|
||||
|
||||
static force_inline uint16_t sync_val_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_val_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint32_t InterlockedCompareExchange(volatile uint32_t* dest, uint32_t exch, uint32_t comp)
|
||||
|
||||
static force_inline uint32_t sync_val_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_val_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint64_t InterlockedCompareExchange(volatile uint64_t* dest, uint64_t exch, uint64_t comp)
|
||||
|
||||
static force_inline uint64_t sync_val_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_val_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static __forceinline bool InterlockedCompareExchangeTest(volatile uint8_t* dest, uint8_t exch, uint8_t comp)
|
||||
static force_inline bool sync_bool_compare_and_swap(volatile uint8_t* dest, uint8_t comp, uint8_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_bool_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return (uint8_t)_InterlockedCompareExchange8((volatile char*)dest, exch, comp) == comp;
|
||||
#endif
|
||||
}
|
||||
static __forceinline bool InterlockedCompareExchangeTest(volatile uint16_t* dest, uint16_t exch, uint16_t comp)
|
||||
|
||||
static force_inline bool sync_bool_compare_and_swap(volatile uint16_t* dest, uint16_t comp, uint16_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_bool_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return (uint16_t)_InterlockedCompareExchange16((volatile short*)dest, exch, comp) == comp;
|
||||
#endif
|
||||
}
|
||||
static __forceinline bool InterlockedCompareExchangeTest(volatile uint32_t* dest, uint32_t exch, uint32_t comp)
|
||||
|
||||
static force_inline bool sync_bool_compare_and_swap(volatile uint32_t* dest, uint32_t comp, uint32_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_bool_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return (uint32_t)_InterlockedCompareExchange((volatile long*)dest, exch, comp) == comp;
|
||||
#endif
|
||||
}
|
||||
static __forceinline bool InterlockedCompareExchangeTest(volatile uint64_t* dest, uint64_t exch, uint64_t comp)
|
||||
|
||||
static force_inline bool sync_bool_compare_and_swap(volatile uint64_t* dest, uint64_t comp, uint64_t exch)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_bool_compare_and_swap(dest, comp, exch);
|
||||
#else
|
||||
return (uint64_t)_InterlockedCompareExchange64((volatile long long*)dest, exch, comp) == comp;
|
||||
#endif
|
||||
}
|
||||
|
||||
#ifndef InterlockedExchange
|
||||
static __forceinline uint8_t InterlockedExchange(volatile uint8_t* dest, uint8_t value)
|
||||
// atomic exchange functions
|
||||
|
||||
static force_inline uint8_t sync_lock_test_and_set(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_lock_test_and_set(dest, value);
|
||||
#else
|
||||
return _InterlockedExchange8((volatile char*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint16_t InterlockedExchange(volatile uint16_t* dest, uint16_t value)
|
||||
|
||||
static force_inline uint16_t sync_lock_test_and_set(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_lock_test_and_set(dest, value);
|
||||
#else
|
||||
return _InterlockedExchange16((volatile short*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint32_t InterlockedExchange(volatile uint32_t* dest, uint32_t value)
|
||||
|
||||
static force_inline uint32_t sync_lock_test_and_set(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_lock_test_and_set(dest, value);
|
||||
#else
|
||||
return _InterlockedExchange((volatile long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint64_t InterlockedExchange(volatile uint64_t* dest, uint64_t value)
|
||||
|
||||
static force_inline uint64_t sync_lock_test_and_set(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_lock_test_and_set(dest, value);
|
||||
#else
|
||||
return _InterlockedExchange64((volatile long long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedOr
|
||||
static __forceinline uint8_t InterlockedOr(volatile uint8_t* dest, uint8_t value)
|
||||
// atomic add functions
|
||||
|
||||
static force_inline uint8_t sync_fetch_and_add(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd8((volatile char*)dest, value);
|
||||
}
|
||||
|
||||
static force_inline uint16_t sync_fetch_and_add(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd16((volatile short*)dest, value);
|
||||
}
|
||||
|
||||
static force_inline uint32_t sync_fetch_and_add(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd((volatile long*)dest, value);
|
||||
}
|
||||
|
||||
static force_inline uint64_t sync_fetch_and_add(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd64((volatile long long*)dest, value);
|
||||
}
|
||||
|
||||
// atomic sub functions
|
||||
|
||||
static force_inline uint8_t sync_fetch_and_sub(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd8((volatile char*)dest, -(char)value);
|
||||
}
|
||||
|
||||
static force_inline uint16_t sync_fetch_and_sub(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd16((volatile short*)dest, -(short)value);
|
||||
}
|
||||
|
||||
static force_inline uint32_t sync_fetch_and_sub(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd((volatile long*)dest, -(long)value);
|
||||
}
|
||||
|
||||
static force_inline uint64_t sync_fetch_and_sub(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedExchangeAdd64((volatile long long*)dest, -(long long)value);
|
||||
}
|
||||
|
||||
// atomic bitwise or functions
|
||||
|
||||
static force_inline uint8_t sync_fetch_and_or(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_or(dest, value);
|
||||
#else
|
||||
return _InterlockedOr8((volatile char*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint16_t InterlockedOr(volatile uint16_t* dest, uint16_t value)
|
||||
|
||||
static force_inline uint16_t sync_fetch_and_or(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_or(dest, value);
|
||||
#else
|
||||
return _InterlockedOr16((volatile short*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint32_t InterlockedOr(volatile uint32_t* dest, uint32_t value)
|
||||
|
||||
static force_inline uint32_t sync_fetch_and_or(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_or(dest, value);
|
||||
#else
|
||||
return _InterlockedOr((volatile long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint64_t InterlockedOr(volatile uint64_t* dest, uint64_t value)
|
||||
|
||||
static force_inline uint64_t sync_fetch_and_or(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_or(dest, value);
|
||||
#else
|
||||
return _InterlockedOr64((volatile long long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedAnd
|
||||
static __forceinline uint8_t InterlockedAnd(volatile uint8_t* dest, uint8_t value)
|
||||
// atomic bitwise and functions
|
||||
|
||||
static force_inline uint8_t sync_fetch_and_and(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_and(dest, value);
|
||||
#else
|
||||
return _InterlockedAnd8((volatile char*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint16_t InterlockedAnd(volatile uint16_t* dest, uint16_t value)
|
||||
|
||||
static force_inline uint16_t sync_fetch_and_and(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_and(dest, value);
|
||||
#else
|
||||
return _InterlockedAnd16((volatile short*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint32_t InterlockedAnd(volatile uint32_t* dest, uint32_t value)
|
||||
|
||||
static force_inline uint32_t sync_fetch_and_and(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_and(dest, value);
|
||||
#else
|
||||
return _InterlockedAnd((volatile long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint64_t InterlockedAnd(volatile uint64_t* dest, uint64_t value)
|
||||
|
||||
static force_inline uint64_t sync_fetch_and_and(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_and(dest, value);
|
||||
#else
|
||||
return _InterlockedAnd64((volatile long long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedXor
|
||||
static __forceinline uint8_t InterlockedXor(volatile uint8_t* dest, uint8_t value)
|
||||
// atomic bitwise xor functions
|
||||
|
||||
static force_inline uint8_t sync_fetch_and_xor(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_xor(dest, value);
|
||||
#else
|
||||
return _InterlockedXor8((volatile char*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint16_t InterlockedXor(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_xor(dest, value);
|
||||
#else
|
||||
return _InterlockedXor16((volatile short*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint32_t InterlockedXor(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_xor(dest, value);
|
||||
#else
|
||||
return _InterlockedXor((volatile long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
static __forceinline uint64_t InterlockedXor(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __sync_fetch_and_xor(dest, value);
|
||||
#else
|
||||
return _InterlockedXor64((volatile long long*)dest, value);
|
||||
#endif
|
||||
}
|
||||
#endif
|
||||
|
||||
static __forceinline uint32_t cntlz32(uint32_t arg)
|
||||
static force_inline uint16_t sync_fetch_and_xor(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedXor16((volatile short*)dest, value);
|
||||
}
|
||||
|
||||
static force_inline uint32_t sync_fetch_and_xor(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedXor((volatile long*)dest, value);
|
||||
}
|
||||
|
||||
static force_inline uint64_t sync_fetch_and_xor(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedXor64((volatile long long*)dest, value);
|
||||
}
|
||||
|
||||
#endif /* _MSC_VER */
|
||||
|
||||
static force_inline uint32_t cntlz32(uint32_t arg)
|
||||
{
|
||||
#if defined(_MSC_VER)
|
||||
unsigned long res;
|
||||
|
@ -317,7 +326,7 @@ static __forceinline uint32_t cntlz32(uint32_t arg)
|
|||
#endif
|
||||
}
|
||||
|
||||
static __forceinline uint64_t cntlz64(uint64_t arg)
|
||||
static force_inline uint64_t cntlz64(uint64_t arg)
|
||||
{
|
||||
#if defined(_MSC_VER)
|
||||
unsigned long res;
|
||||
|
|
|
@ -129,8 +129,7 @@ static struct { inline operator Log::LogType() { return Log::LogType::TTY; } } T
|
|||
void log_message(Log::LogType type, Log::LogSeverity sev, const char* text);
|
||||
void log_message(Log::LogType type, Log::LogSeverity sev, std::string text);
|
||||
|
||||
template<typename... Targs>
|
||||
__noinline void log_message(Log::LogType type, Log::LogSeverity sev, const char* fmt, Targs... args)
|
||||
template<typename... Args> never_inline void log_message(Log::LogType type, Log::LogSeverity sev, const char* fmt, Args... args)
|
||||
{
|
||||
log_message(type, sev, fmt::Format(fmt, fmt::do_unveil(args)...));
|
||||
}
|
||||
|
|
|
@ -61,7 +61,7 @@ public:
|
|||
Show();
|
||||
}
|
||||
|
||||
__forceinline void Update(const u8 thread_id, const u64 value, const wxString& msg)
|
||||
force_inline void Update(const u8 thread_id, const u64 value, const wxString& msg)
|
||||
{
|
||||
if(thread_id > m_cores) return;
|
||||
|
||||
|
|
|
@ -208,49 +208,6 @@ std::vector<std::string> fmt::split(const std::string& source, std::initializer_
|
|||
return std::move(result);
|
||||
}
|
||||
|
||||
std::string fmt::merge(std::vector<std::string> source, const std::string& separator)
|
||||
{
|
||||
if (!source.size())
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string result;
|
||||
|
||||
for (int i = 0; i < source.size() - 1; ++i)
|
||||
{
|
||||
result += source[i] + separator;
|
||||
}
|
||||
|
||||
return result + source.back();
|
||||
}
|
||||
|
||||
std::string fmt::merge(std::initializer_list<std::vector<std::string>> sources, const std::string& separator)
|
||||
{
|
||||
if (!sources.size())
|
||||
{
|
||||
return "";
|
||||
}
|
||||
|
||||
std::string result;
|
||||
bool first = true;
|
||||
|
||||
for (auto &v : sources)
|
||||
{
|
||||
if (first)
|
||||
{
|
||||
result = fmt::merge(v, separator);
|
||||
first = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
result += separator + fmt::merge(v, separator);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string fmt::tolower(std::string source)
|
||||
{
|
||||
std::transform(source.begin(), source.end(), source.begin(), ::tolower);
|
||||
|
|
|
@ -95,8 +95,7 @@ namespace fmt
|
|||
T by_value(T x) { return x; }
|
||||
|
||||
//wrapper to deal with advance sprintf formating options with automatic length finding
|
||||
template<typename ... Args>
|
||||
std::string Format(const char* fmt, Args ... parameters)
|
||||
template<typename... Args> std::string Format(const char* fmt, Args... parameters)
|
||||
{
|
||||
size_t length = 256;
|
||||
std::string str;
|
||||
|
@ -139,7 +138,7 @@ namespace fmt
|
|||
|
||||
if (src.substr(pos, comp_length) == list[i].first)
|
||||
{
|
||||
src = (pos ? src.substr(0, pos) + list[i].second : list[i].second) + std::string(src.c_str() + pos + comp_length);
|
||||
src = (pos ? src.substr(0, pos) + list[i].second : list[i].second) + src.substr(pos + comp_length);
|
||||
pos += list[i].second.length() - 1;
|
||||
break;
|
||||
}
|
||||
|
@ -163,7 +162,7 @@ namespace fmt
|
|||
|
||||
if (src.substr(pos, comp_length) == list[i].first)
|
||||
{
|
||||
src = (pos ? src.substr(0, pos) + list[i].second() : list[i].second()) + std::string(src.c_str() + pos + comp_length);
|
||||
src = (pos ? src.substr(0, pos) + list[i].second() : list[i].second()) + src.substr(pos + comp_length);
|
||||
pos += list[i].second().length() - 1;
|
||||
break;
|
||||
}
|
||||
|
@ -182,7 +181,7 @@ namespace fmt
|
|||
{
|
||||
typedef T result_type;
|
||||
|
||||
__forceinline static result_type get_value(const T& arg)
|
||||
force_inline static result_type get_value(const T& arg)
|
||||
{
|
||||
return arg;
|
||||
}
|
||||
|
@ -193,7 +192,7 @@ namespace fmt
|
|||
{
|
||||
typedef const char* result_type;
|
||||
|
||||
__forceinline static result_type get_value(const char* arg)
|
||||
force_inline static result_type get_value(const char* arg)
|
||||
{
|
||||
return arg;
|
||||
}
|
||||
|
@ -204,7 +203,7 @@ namespace fmt
|
|||
{
|
||||
typedef const char* result_type;
|
||||
|
||||
__forceinline static result_type get_value(const char(&arg)[N])
|
||||
force_inline static result_type get_value(const char(&arg)[N])
|
||||
{
|
||||
return arg;
|
||||
}
|
||||
|
@ -215,7 +214,7 @@ namespace fmt
|
|||
{
|
||||
typedef const char* result_type;
|
||||
|
||||
__forceinline static result_type get_value(const std::string& arg)
|
||||
force_inline static result_type get_value(const std::string& arg)
|
||||
{
|
||||
return arg.c_str();
|
||||
}
|
||||
|
@ -226,7 +225,7 @@ namespace fmt
|
|||
{
|
||||
typedef typename std::underlying_type<T>::type result_type;
|
||||
|
||||
__forceinline static result_type get_value(const T& arg)
|
||||
force_inline static result_type get_value(const T& arg)
|
||||
{
|
||||
return static_cast<result_type>(arg);
|
||||
}
|
||||
|
@ -237,14 +236,14 @@ namespace fmt
|
|||
{
|
||||
typedef typename unveil<T>::result_type result_type;
|
||||
|
||||
__forceinline static result_type get_value(const be_t<T, T2>& arg)
|
||||
force_inline static result_type get_value(const be_t<T, T2>& arg)
|
||||
{
|
||||
return unveil<T>::get_value(arg.value());
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
__forceinline typename unveil<T>::result_type do_unveil(const T& arg)
|
||||
force_inline typename unveil<T>::result_type do_unveil(const T& arg)
|
||||
{
|
||||
return unveil<T>::get_value(arg);
|
||||
}
|
||||
|
@ -266,8 +265,7 @@ namespace fmt
|
|||
vm::psv::ref (fmt::unveil) (vm_ref.h)
|
||||
|
||||
*/
|
||||
template<typename... Args>
|
||||
__forceinline __safebuffers std::string format(const char* fmt, Args... args)
|
||||
template<typename... Args> force_inline safe_buffers std::string format(const char* fmt, Args... args)
|
||||
{
|
||||
return Format(fmt, do_unveil(args)...);
|
||||
}
|
||||
|
@ -291,8 +289,54 @@ namespace fmt
|
|||
std::vector<std::string> rSplit(const std::string& source, const std::string& delim);
|
||||
|
||||
std::vector<std::string> split(const std::string& source, std::initializer_list<std::string> separators, bool is_skip_empty = true);
|
||||
std::string merge(std::vector<std::string> source, const std::string& separator);
|
||||
std::string merge(std::initializer_list<std::vector<std::string>> sources, const std::string& separator);
|
||||
|
||||
template<typename T>
|
||||
std::string merge(const T& source, const std::string& separator)
|
||||
{
|
||||
if (!source.size())
|
||||
{
|
||||
return{};
|
||||
}
|
||||
|
||||
std::string result;
|
||||
|
||||
auto it = source.begin();
|
||||
auto end = source.end();
|
||||
for (--end; it != end; ++it)
|
||||
{
|
||||
result += *it + separator;
|
||||
}
|
||||
|
||||
return result + source.back();
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
std::string merge(std::initializer_list<T> sources, const std::string& separator)
|
||||
{
|
||||
if (!sources.size())
|
||||
{
|
||||
return{};
|
||||
}
|
||||
|
||||
std::string result;
|
||||
bool first = true;
|
||||
|
||||
for (auto &v : sources)
|
||||
{
|
||||
if (first)
|
||||
{
|
||||
result = fmt::merge(v, separator);
|
||||
first = false;
|
||||
}
|
||||
else
|
||||
{
|
||||
result += separator + fmt::merge(v, separator);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::string tolower(std::string source);
|
||||
std::string toupper(std::string source);
|
||||
std::string escape(std::string source);
|
||||
|
|
|
@ -1002,10 +1002,10 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
|||
|
||||
switch (d_size)
|
||||
{
|
||||
case 1: reg_value = vm::priv_ref<atomic_le_t<u8>>(addr).exchange((u8)reg_value); break;
|
||||
case 2: reg_value = vm::priv_ref<atomic_le_t<u16>>(addr).exchange((u16)reg_value); break;
|
||||
case 4: reg_value = vm::priv_ref<atomic_le_t<u32>>(addr).exchange((u32)reg_value); break;
|
||||
case 8: reg_value = vm::priv_ref<atomic_le_t<u64>>(addr).exchange((u64)reg_value); break;
|
||||
case 1: reg_value = vm::priv_ref<atomic<u8>>(addr).exchange((u8)reg_value); break;
|
||||
case 2: reg_value = vm::priv_ref<atomic<u16>>(addr).exchange((u16)reg_value); break;
|
||||
case 4: reg_value = vm::priv_ref<atomic<u32>>(addr).exchange((u32)reg_value); break;
|
||||
case 8: reg_value = vm::priv_ref<atomic<u64>>(addr).exchange((u64)reg_value); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
|
@ -1025,10 +1025,10 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
|||
|
||||
switch (d_size)
|
||||
{
|
||||
case 1: old_value = vm::priv_ref<atomic_le_t<u8>>(addr).compare_and_swap((u8)cmp_value, (u8)reg_value); break;
|
||||
case 2: old_value = vm::priv_ref<atomic_le_t<u16>>(addr).compare_and_swap((u16)cmp_value, (u16)reg_value); break;
|
||||
case 4: old_value = vm::priv_ref<atomic_le_t<u32>>(addr).compare_and_swap((u32)cmp_value, (u32)reg_value); break;
|
||||
case 8: old_value = vm::priv_ref<atomic_le_t<u64>>(addr).compare_and_swap((u64)cmp_value, (u64)reg_value); break;
|
||||
case 1: old_value = vm::priv_ref<atomic<u8>>(addr).compare_and_swap((u8)cmp_value, (u8)reg_value); break;
|
||||
case 2: old_value = vm::priv_ref<atomic<u16>>(addr).compare_and_swap((u16)cmp_value, (u16)reg_value); break;
|
||||
case 4: old_value = vm::priv_ref<atomic<u32>>(addr).compare_and_swap((u32)cmp_value, (u32)reg_value); break;
|
||||
case 8: old_value = vm::priv_ref<atomic<u64>>(addr).compare_and_swap((u64)cmp_value, (u64)reg_value); break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
|
@ -1048,10 +1048,10 @@ bool handle_access_violation(u32 addr, bool is_writing, x64_context* context)
|
|||
|
||||
switch (d_size)
|
||||
{
|
||||
case 1: value = vm::priv_ref<atomic_le_t<u8>>(addr) &= value; break;
|
||||
case 2: value = vm::priv_ref<atomic_le_t<u16>>(addr) &= value; break;
|
||||
case 4: value = vm::priv_ref<atomic_le_t<u32>>(addr) &= value; break;
|
||||
case 8: value = vm::priv_ref<atomic_le_t<u64>>(addr) &= value; break;
|
||||
case 1: value = vm::priv_ref<atomic<u8>>(addr) &= value; break;
|
||||
case 2: value = vm::priv_ref<atomic<u16>>(addr) &= value; break;
|
||||
case 4: value = vm::priv_ref<atomic<u32>>(addr) &= value; break;
|
||||
case 8: value = vm::priv_ref<atomic<u64>>(addr) &= value; break;
|
||||
default: return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -118,7 +118,7 @@ struct waiter_map_t
|
|||
bool is_stopped(u64 signal_id);
|
||||
|
||||
// wait until waiter_func() returns true, signal_id is an arbitrary number
|
||||
template<typename S, typename WT> __forceinline __safebuffers void wait_op(const S& signal_id, const WT waiter_func)
|
||||
template<typename S, typename WT> force_inline safe_buffers void wait_op(const S& signal_id, const WT waiter_func)
|
||||
{
|
||||
// generate hash
|
||||
const auto hash = std::hash<S>()(signal_id) % size;
|
||||
|
@ -141,7 +141,7 @@ struct waiter_map_t
|
|||
}
|
||||
|
||||
// signal all threads waiting on waiter_op() with the same signal_id (signaling only hints those threads that corresponding conditions are *probably* met)
|
||||
template<typename S> __forceinline void notify(const S& signal_id)
|
||||
template<typename S> force_inline void notify(const S& signal_id)
|
||||
{
|
||||
// generate hash
|
||||
const auto hash = std::hash<S>()(signal_id) % size;
|
||||
|
@ -173,7 +173,7 @@ class squeue_t
|
|||
};
|
||||
};
|
||||
|
||||
atomic_le_t<squeue_sync_var_t> m_sync;
|
||||
atomic<squeue_sync_var_t> m_sync;
|
||||
|
||||
mutable std::mutex m_rcv_mutex;
|
||||
mutable std::mutex m_wcv_mutex;
|
||||
|
@ -258,12 +258,12 @@ public:
|
|||
return push(data, [do_exit](){ return do_exit && *do_exit; });
|
||||
}
|
||||
|
||||
__forceinline bool push(const T& data)
|
||||
force_inline bool push(const T& data)
|
||||
{
|
||||
return push(data, SQUEUE_NEVER_EXIT);
|
||||
}
|
||||
|
||||
__forceinline bool try_push(const T& data)
|
||||
force_inline bool try_push(const T& data)
|
||||
{
|
||||
return push(data, SQUEUE_ALWAYS_EXIT);
|
||||
}
|
||||
|
@ -326,12 +326,12 @@ public:
|
|||
return pop(data, [do_exit](){ return do_exit && *do_exit; });
|
||||
}
|
||||
|
||||
__forceinline bool pop(T& data)
|
||||
force_inline bool pop(T& data)
|
||||
{
|
||||
return pop(data, SQUEUE_NEVER_EXIT);
|
||||
}
|
||||
|
||||
__forceinline bool try_pop(T& data)
|
||||
force_inline bool try_pop(T& data)
|
||||
{
|
||||
return pop(data, SQUEUE_ALWAYS_EXIT);
|
||||
}
|
||||
|
@ -388,12 +388,12 @@ public:
|
|||
return peek(data, start_pos, [do_exit](){ return do_exit && *do_exit; });
|
||||
}
|
||||
|
||||
__forceinline bool peek(T& data, u32 start_pos = 0)
|
||||
force_inline bool peek(T& data, u32 start_pos = 0)
|
||||
{
|
||||
return peek(data, start_pos, SQUEUE_NEVER_EXIT);
|
||||
}
|
||||
|
||||
__forceinline bool try_peek(T& data, u32 start_pos = 0)
|
||||
force_inline bool try_peek(T& data, u32 start_pos = 0)
|
||||
{
|
||||
return peek(data, start_pos, SQUEUE_ALWAYS_EXIT);
|
||||
}
|
||||
|
|
|
@ -13,121 +13,121 @@
|
|||
#include <wx/zstream.h>
|
||||
#pragma warning(pop)
|
||||
|
||||
__forceinline u8 Read8(vfsStream& f)
|
||||
force_inline u8 Read8(vfsStream& f)
|
||||
{
|
||||
u8 ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline u16 Read16(vfsStream& f)
|
||||
force_inline u16 Read16(vfsStream& f)
|
||||
{
|
||||
be_t<u16> ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline u32 Read32(vfsStream& f)
|
||||
force_inline u32 Read32(vfsStream& f)
|
||||
{
|
||||
be_t<u32> ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline u64 Read64(vfsStream& f)
|
||||
force_inline u64 Read64(vfsStream& f)
|
||||
{
|
||||
be_t<u64> ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline u16 Read16LE(vfsStream& f)
|
||||
force_inline u16 Read16LE(vfsStream& f)
|
||||
{
|
||||
u16 ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline u32 Read32LE(vfsStream& f)
|
||||
force_inline u32 Read32LE(vfsStream& f)
|
||||
{
|
||||
u32 ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline u64 Read64LE(vfsStream& f)
|
||||
force_inline u64 Read64LE(vfsStream& f)
|
||||
{
|
||||
u64 ret;
|
||||
f.Read(&ret, sizeof(ret));
|
||||
return ret;
|
||||
}
|
||||
|
||||
__forceinline void Write8(vfsStream& f, const u8 data)
|
||||
force_inline void Write8(vfsStream& f, const u8 data)
|
||||
{
|
||||
f.Write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write8(const fs::file& f, const u8 data)
|
||||
force_inline void Write8(const fs::file& f, const u8 data)
|
||||
{
|
||||
f.write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write16LE(vfsStream& f, const u16 data)
|
||||
force_inline void Write16LE(vfsStream& f, const u16 data)
|
||||
{
|
||||
f.Write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write16LE(const fs::file& f, const u16 data)
|
||||
force_inline void Write16LE(const fs::file& f, const u16 data)
|
||||
{
|
||||
f.write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write32LE(vfsStream& f, const u32 data)
|
||||
force_inline void Write32LE(vfsStream& f, const u32 data)
|
||||
{
|
||||
f.Write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write32LE(const fs::file& f, const u32 data)
|
||||
force_inline void Write32LE(const fs::file& f, const u32 data)
|
||||
{
|
||||
f.write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write64LE(vfsStream& f, const u64 data)
|
||||
force_inline void Write64LE(vfsStream& f, const u64 data)
|
||||
{
|
||||
f.Write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write64LE(const fs::file& f, const u64 data)
|
||||
force_inline void Write64LE(const fs::file& f, const u64 data)
|
||||
{
|
||||
f.write(&data, sizeof(data));
|
||||
}
|
||||
|
||||
__forceinline void Write16(vfsStream& f, const u16 data)
|
||||
force_inline void Write16(vfsStream& f, const u16 data)
|
||||
{
|
||||
Write16LE(f, re16(data));
|
||||
}
|
||||
|
||||
__forceinline void Write16(const fs::file& f, const u16 data)
|
||||
force_inline void Write16(const fs::file& f, const u16 data)
|
||||
{
|
||||
Write16LE(f, re16(data));
|
||||
}
|
||||
|
||||
__forceinline void Write32(vfsStream& f, const u32 data)
|
||||
force_inline void Write32(vfsStream& f, const u32 data)
|
||||
{
|
||||
Write32LE(f, re32(data));
|
||||
}
|
||||
|
||||
__forceinline void Write32(const fs::file& f, const u32 data)
|
||||
force_inline void Write32(const fs::file& f, const u32 data)
|
||||
{
|
||||
Write32LE(f, re32(data));
|
||||
}
|
||||
|
||||
__forceinline void Write64(vfsStream& f, const u64 data)
|
||||
force_inline void Write64(vfsStream& f, const u64 data)
|
||||
{
|
||||
Write64LE(f, re64(data));
|
||||
}
|
||||
|
||||
__forceinline void Write64(const fs::file& f, const u64 data)
|
||||
force_inline void Write64(const fs::file& f, const u64 data)
|
||||
{
|
||||
Write64LE(f, re64(data));
|
||||
}
|
||||
|
|
|
@ -5,14 +5,14 @@
|
|||
namespace vm
|
||||
{
|
||||
template<typename AT, typename RT, typename... T>
|
||||
__forceinline RT _ptr_base<RT(T...), 1, AT>::operator()(ARMv7Context& context, T... args) const
|
||||
force_inline RT _ptr_base<RT(T...), 1, AT>::operator()(ARMv7Context& context, T... args) const
|
||||
{
|
||||
return psv_func_detail::func_caller<RT, T...>::call(context, vm::cast(this->addr()), args...);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RT, typename... T>
|
||||
__forceinline RT cb_call(ARMv7Context& context, u32 addr, T... args)
|
||||
force_inline RT cb_call(ARMv7Context& context, u32 addr, T... args)
|
||||
{
|
||||
return psv_func_detail::func_caller<RT, T...>::call(context, addr, args...);
|
||||
}
|
||||
|
|
|
@ -176,7 +176,7 @@ struct ARMv7Context
|
|||
}
|
||||
|
||||
template<typename... T>
|
||||
__noinline void fmt_debug_str(const char* fmt, T... args)
|
||||
never_inline void fmt_debug_str(const char* fmt, T... args)
|
||||
{
|
||||
debug_str = fmt::format(fmt, args...);
|
||||
}
|
||||
|
@ -189,12 +189,12 @@ struct cast_armv7_gpr
|
|||
|
||||
typedef typename std::underlying_type<T>::type underlying_type;
|
||||
|
||||
__forceinline static u32 to_gpr(const T& value)
|
||||
force_inline static u32 to_gpr(const T& value)
|
||||
{
|
||||
return cast_armv7_gpr<underlying_type>::to_gpr(static_cast<underlying_type>(value));
|
||||
}
|
||||
|
||||
__forceinline static T from_gpr(const u32 reg)
|
||||
force_inline static T from_gpr(const u32 reg)
|
||||
{
|
||||
return static_cast<T>(cast_armv7_gpr<underlying_type>::from_gpr(reg));
|
||||
}
|
||||
|
@ -203,12 +203,12 @@ struct cast_armv7_gpr
|
|||
template<>
|
||||
struct cast_armv7_gpr<u8, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const u8& value)
|
||||
force_inline static u32 to_gpr(const u8& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u8 from_gpr(const u32 reg)
|
||||
force_inline static u8 from_gpr(const u32 reg)
|
||||
{
|
||||
return static_cast<u8>(reg);
|
||||
}
|
||||
|
@ -217,12 +217,12 @@ struct cast_armv7_gpr<u8, false>
|
|||
template<>
|
||||
struct cast_armv7_gpr<u16, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const u16& value)
|
||||
force_inline static u32 to_gpr(const u16& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u16 from_gpr(const u32 reg)
|
||||
force_inline static u16 from_gpr(const u32 reg)
|
||||
{
|
||||
return static_cast<u16>(reg);
|
||||
}
|
||||
|
@ -231,12 +231,12 @@ struct cast_armv7_gpr<u16, false>
|
|||
template<>
|
||||
struct cast_armv7_gpr<u32, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const u32& value)
|
||||
force_inline static u32 to_gpr(const u32& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u32 from_gpr(const u32 reg)
|
||||
force_inline static u32 from_gpr(const u32 reg)
|
||||
{
|
||||
return reg;
|
||||
}
|
||||
|
@ -245,12 +245,12 @@ struct cast_armv7_gpr<u32, false>
|
|||
template<>
|
||||
struct cast_armv7_gpr<s8, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const s8& value)
|
||||
force_inline static u32 to_gpr(const s8& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s8 from_gpr(const u32 reg)
|
||||
force_inline static s8 from_gpr(const u32 reg)
|
||||
{
|
||||
return static_cast<s8>(reg);
|
||||
}
|
||||
|
@ -259,12 +259,12 @@ struct cast_armv7_gpr<s8, false>
|
|||
template<>
|
||||
struct cast_armv7_gpr<s16, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const s16& value)
|
||||
force_inline static u32 to_gpr(const s16& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s16 from_gpr(const u32 reg)
|
||||
force_inline static s16 from_gpr(const u32 reg)
|
||||
{
|
||||
return static_cast<s16>(reg);
|
||||
}
|
||||
|
@ -273,12 +273,12 @@ struct cast_armv7_gpr<s16, false>
|
|||
template<>
|
||||
struct cast_armv7_gpr<s32, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const s32& value)
|
||||
force_inline static u32 to_gpr(const s32& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s32 from_gpr(const u32 reg)
|
||||
force_inline static s32 from_gpr(const u32 reg)
|
||||
{
|
||||
return static_cast<s32>(reg);
|
||||
}
|
||||
|
@ -287,25 +287,25 @@ struct cast_armv7_gpr<s32, false>
|
|||
template<>
|
||||
struct cast_armv7_gpr<bool, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const bool& value)
|
||||
force_inline static u32 to_gpr(const bool& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static bool from_gpr(const u32& reg)
|
||||
force_inline static bool from_gpr(const u32& reg)
|
||||
{
|
||||
return reinterpret_cast<const bool&>(reg);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
__forceinline u32 cast_to_armv7_gpr(const T& value)
|
||||
force_inline u32 cast_to_armv7_gpr(const T& value)
|
||||
{
|
||||
return cast_armv7_gpr<T>::to_gpr(value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
__forceinline T cast_from_armv7_gpr(const u32 reg)
|
||||
force_inline T cast_from_armv7_gpr(const u32 reg)
|
||||
{
|
||||
return cast_armv7_gpr<T>::from_gpr(reg);
|
||||
}
|
||||
|
|
|
@ -397,7 +397,7 @@ s32 sceKernelCreateEventFlag(vm::psv::ptr<const char> pName, u32 attr, u32 initP
|
|||
{
|
||||
sceLibKernel.Error("sceKernelCreateEventFlag(pName=*0x%x, attr=0x%x, initPattern=0x%x, pOptParam=*0x%x)", pName, attr, initPattern, pOptParam);
|
||||
|
||||
if (s32 id = g_psv_ef_list.add(new psv_event_flag_t(pName.get_ptr(), attr, initPattern), 0))
|
||||
if (s32 id = g_psv_ef_list.create(pName.get_ptr(), attr, initPattern))
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
@ -461,7 +461,7 @@ s32 sceKernelCreateSema(vm::psv::ptr<const char> pName, u32 attr, s32 initCount,
|
|||
{
|
||||
sceLibKernel.Error("sceKernelCreateSema(pName=*0x%x, attr=0x%x, initCount=%d, maxCount=%d, pOptParam=*0x%x)", pName, attr, initCount, maxCount, pOptParam);
|
||||
|
||||
if (s32 id = g_psv_sema_list.add(new psv_sema_t(pName.get_ptr(), attr, initCount, maxCount), 0))
|
||||
if (s32 id = g_psv_sema_list.create(pName.get_ptr(), attr, initCount, maxCount))
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
@ -473,7 +473,7 @@ s32 sceKernelDeleteSema(s32 semaId)
|
|||
{
|
||||
sceLibKernel.Error("sceKernelDeleteSema(semaId=0x%x)", semaId);
|
||||
|
||||
ref_t<psv_sema_t> sema = g_psv_sema_list.get(semaId);
|
||||
const auto sema = g_psv_sema_list.get(semaId);
|
||||
|
||||
if (!sema)
|
||||
{
|
||||
|
@ -502,7 +502,7 @@ s32 sceKernelWaitSema(s32 semaId, s32 needCount, vm::psv::ptr<u32> pTimeout)
|
|||
{
|
||||
sceLibKernel.Error("sceKernelWaitSema(semaId=0x%x, needCount=%d, pTimeout=*0x%x)", semaId, needCount, pTimeout);
|
||||
|
||||
ref_t<psv_sema_t> sema = g_psv_sema_list.get(semaId);
|
||||
const auto sema = g_psv_sema_list.get(semaId);
|
||||
|
||||
if (!sema)
|
||||
{
|
||||
|
@ -545,7 +545,7 @@ s32 sceKernelCreateMutex(vm::psv::ptr<const char> pName, u32 attr, s32 initCount
|
|||
{
|
||||
sceLibKernel.Error("sceKernelCreateMutex(pName=*0x%x, attr=0x%x, initCount=%d, pOptParam=*0x%x)", pName, attr, initCount, pOptParam);
|
||||
|
||||
if (s32 id = g_psv_mutex_list.add(new psv_mutex_t(pName.get_ptr(), attr, initCount), 0))
|
||||
if (s32 id = g_psv_mutex_list.create(pName.get_ptr(), attr, initCount))
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
@ -646,7 +646,7 @@ s32 sceKernelCreateCond(vm::psv::ptr<const char> pName, u32 attr, s32 mutexId, v
|
|||
{
|
||||
sceLibKernel.Error("sceKernelCreateCond(pName=*0x%x, attr=0x%x, mutexId=0x%x, pOptParam=*0x%x)", pName, attr, mutexId, pOptParam);
|
||||
|
||||
if (s32 id = g_psv_cond_list.add(new psv_cond_t(pName.get_ptr(), attr, mutexId), 0))
|
||||
if (s32 id = g_psv_cond_list.create(pName.get_ptr(), attr, mutexId))
|
||||
{
|
||||
return id;
|
||||
}
|
||||
|
|
|
@ -56,12 +56,12 @@ namespace psv_func_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 4, "Invalid function argument type for ARG_GENERAL");
|
||||
|
||||
__forceinline static T get_arg(ARMv7Context& context)
|
||||
force_inline static T get_arg(ARMv7Context& context)
|
||||
{
|
||||
return cast_from_armv7_gpr<T>(context.GPR[g_count - 1]);
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
{
|
||||
context.GPR[g_count - 1] = cast_to_armv7_gpr<T>(arg);
|
||||
}
|
||||
|
@ -73,12 +73,12 @@ namespace psv_func_detail
|
|||
// first u64 argument is passed in r0-r1, second one is passed in r2-r3 (if g_count = 3)
|
||||
static_assert(g_count == 1 || g_count == 3, "Wrong u64 argument position");
|
||||
|
||||
__forceinline static u64 get_arg(ARMv7Context& context)
|
||||
force_inline static u64 get_arg(ARMv7Context& context)
|
||||
{
|
||||
return context.GPR_D[g_count >> 1];
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, u64 arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, u64 arg)
|
||||
{
|
||||
context.GPR_D[g_count >> 1] = arg;
|
||||
}
|
||||
|
@ -89,12 +89,12 @@ namespace psv_func_detail
|
|||
{
|
||||
static_assert(g_count == 1 || g_count == 3, "Wrong s64 argument position");
|
||||
|
||||
__forceinline static s64 get_arg(ARMv7Context& context)
|
||||
force_inline static s64 get_arg(ARMv7Context& context)
|
||||
{
|
||||
return context.GPR_D[g_count >> 1];
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, s64 arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, s64 arg)
|
||||
{
|
||||
context.GPR_D[g_count >> 1] = arg;
|
||||
}
|
||||
|
@ -106,11 +106,11 @@ namespace psv_func_detail
|
|||
static_assert(f_count <= 0, "TODO: Unsupported argument type (float)");
|
||||
static_assert(sizeof(T) <= 8, "Invalid function argument type for ARG_FLOAT");
|
||||
|
||||
__forceinline static T get_arg(ARMv7Context& context)
|
||||
force_inline static T get_arg(ARMv7Context& context)
|
||||
{
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
@ -121,11 +121,11 @@ namespace psv_func_detail
|
|||
static_assert(v_count <= 0, "TODO: Unsupported argument type (vector)");
|
||||
static_assert(std::is_same<T, u128>::value, "Invalid function argument type for ARG_VECTOR");
|
||||
|
||||
__forceinline static T get_arg(ARMv7Context& context)
|
||||
force_inline static T get_arg(ARMv7Context& context)
|
||||
{
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
@ -137,13 +137,13 @@ namespace psv_func_detail
|
|||
static_assert(v_count <= 0, "TODO: Unsupported stack argument type (vector)");
|
||||
static_assert(sizeof(T) <= 4, "Invalid function argument type for ARG_STACK");
|
||||
|
||||
__forceinline static T get_arg(ARMv7Context& context)
|
||||
force_inline static T get_arg(ARMv7Context& context)
|
||||
{
|
||||
// TODO: check
|
||||
return cast_from_armv7_gpr<T>(vm::psv::read32(context.SP + sizeof(u32) * (g_count - 5)));
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, const T& arg)
|
||||
{
|
||||
// TODO: check
|
||||
const int stack_pos = (g_count - 5) * 4 - FIXED_STACK_FRAME_SIZE;
|
||||
|
@ -156,13 +156,13 @@ namespace psv_func_detail
|
|||
template<int g_count, int f_count, int v_count>
|
||||
struct bind_arg<u64, ARG_STACK, g_count, f_count, v_count>
|
||||
{
|
||||
__forceinline static u64 get_arg(ARMv7Context& context)
|
||||
force_inline static u64 get_arg(ARMv7Context& context)
|
||||
{
|
||||
// TODO: check
|
||||
return vm::psv::read64(context.SP + sizeof(u32) * (g_count - 5));
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, u64 arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, u64 arg)
|
||||
{
|
||||
// TODO: check
|
||||
const int stack_pos = (g_count - 5) * 4 - FIXED_STACK_FRAME_SIZE;
|
||||
|
@ -175,13 +175,13 @@ namespace psv_func_detail
|
|||
template<int g_count, int f_count, int v_count>
|
||||
struct bind_arg<s64, ARG_STACK, g_count, f_count, v_count>
|
||||
{
|
||||
__forceinline static s64 get_arg(ARMv7Context& context)
|
||||
force_inline static s64 get_arg(ARMv7Context& context)
|
||||
{
|
||||
// TODO: check
|
||||
return vm::psv::read64(context.SP + sizeof(u32) * (g_count - 5));
|
||||
}
|
||||
|
||||
__forceinline static void put_arg(ARMv7Context& context, s64 arg)
|
||||
force_inline static void put_arg(ARMv7Context& context, s64 arg)
|
||||
{
|
||||
// TODO: check
|
||||
const int stack_pos = (g_count - 5) * 4 - FIXED_STACK_FRAME_SIZE;
|
||||
|
@ -199,12 +199,12 @@ namespace psv_func_detail
|
|||
static_assert(type == ARG_GENERAL, "Wrong use of bind_result template");
|
||||
static_assert(sizeof(T) <= 4, "Invalid function result type for ARG_GENERAL");
|
||||
|
||||
__forceinline static T get_result(ARMv7Context& context)
|
||||
force_inline static T get_result(ARMv7Context& context)
|
||||
{
|
||||
return cast_from_armv7_gpr<T>(context.GPR[0]);
|
||||
}
|
||||
|
||||
__forceinline static void put_result(ARMv7Context& context, const T& result)
|
||||
force_inline static void put_result(ARMv7Context& context, const T& result)
|
||||
{
|
||||
context.GPR[0] = cast_to_armv7_gpr<T>(result);
|
||||
}
|
||||
|
@ -213,12 +213,12 @@ namespace psv_func_detail
|
|||
template<>
|
||||
struct bind_result<u64, ARG_GENERAL>
|
||||
{
|
||||
__forceinline static u64 get_result(ARMv7Context& context)
|
||||
force_inline static u64 get_result(ARMv7Context& context)
|
||||
{
|
||||
return context.GPR_D[0];
|
||||
}
|
||||
|
||||
__forceinline static void put_result(ARMv7Context& context, u64 result)
|
||||
force_inline static void put_result(ARMv7Context& context, u64 result)
|
||||
{
|
||||
context.GPR_D[0] = result;
|
||||
}
|
||||
|
@ -227,12 +227,12 @@ namespace psv_func_detail
|
|||
template<>
|
||||
struct bind_result<s64, ARG_GENERAL>
|
||||
{
|
||||
__forceinline static s64 get_result(ARMv7Context& context)
|
||||
force_inline static s64 get_result(ARMv7Context& context)
|
||||
{
|
||||
return context.GPR_D[0];
|
||||
}
|
||||
|
||||
__forceinline static void put_result(ARMv7Context& context, s64 result)
|
||||
force_inline static void put_result(ARMv7Context& context, s64 result)
|
||||
{
|
||||
context.GPR_D[0] = result;
|
||||
}
|
||||
|
@ -243,7 +243,7 @@ namespace psv_func_detail
|
|||
//{
|
||||
// static_assert(sizeof(T) <= 8, "Invalid function result type for ARG_FLOAT");
|
||||
|
||||
// static __forceinline void put_result(ARMv7Context& context, const T& result)
|
||||
// static force_inline void put_result(ARMv7Context& context, const T& result)
|
||||
// {
|
||||
// }
|
||||
//};
|
||||
|
@ -253,7 +253,7 @@ namespace psv_func_detail
|
|||
//{
|
||||
// static_assert(std::is_same<T, u128>::value, "Invalid function result type for ARG_VECTOR");
|
||||
|
||||
// static __forceinline void put_result(ARMv7Context& context, const T& result)
|
||||
// static force_inline void put_result(ARMv7Context& context, const T& result)
|
||||
// {
|
||||
// }
|
||||
//};
|
||||
|
@ -289,7 +289,7 @@ namespace psv_func_detail
|
|||
template <typename RT, typename F, typename Tuple, bool Done, int Total, int... N>
|
||||
struct call_impl
|
||||
{
|
||||
static __forceinline RT call(F f, Tuple && t)
|
||||
static force_inline RT call(F f, Tuple && t)
|
||||
{
|
||||
return call_impl<RT, F, Tuple, Total == 1 + sizeof...(N), Total, N..., sizeof...(N)>::call(f, std::forward<Tuple>(t));
|
||||
}
|
||||
|
@ -298,28 +298,28 @@ namespace psv_func_detail
|
|||
template <typename RT, typename F, typename Tuple, int Total, int... N>
|
||||
struct call_impl<RT, F, Tuple, true, Total, N...>
|
||||
{
|
||||
static __forceinline RT call(F f, Tuple && t)
|
||||
static force_inline RT call(F f, Tuple && t)
|
||||
{
|
||||
return f(std::get<N>(std::forward<Tuple>(t))...);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename RT, typename F, typename Tuple>
|
||||
__forceinline RT call(F f, Tuple && t)
|
||||
force_inline RT call(F f, Tuple && t)
|
||||
{
|
||||
typedef typename std::decay<Tuple>::type ttype;
|
||||
return psv_func_detail::call_impl<RT, F, Tuple, 0 == std::tuple_size<ttype>::value, std::tuple_size<ttype>::value>::call(f, std::forward<Tuple>(t));
|
||||
}
|
||||
|
||||
template<int g_count, int f_count, int v_count>
|
||||
__forceinline std::tuple<> get_func_args(ARMv7Context& context)
|
||||
force_inline std::tuple<> get_func_args(ARMv7Context& context)
|
||||
{
|
||||
// terminator
|
||||
return std::tuple<>();
|
||||
}
|
||||
|
||||
template<int g_count, int f_count, int v_count, typename T, typename... A>
|
||||
__forceinline std::tuple<T, A...> get_func_args(ARMv7Context& context)
|
||||
force_inline std::tuple<T, A...> get_func_args(ARMv7Context& context)
|
||||
{
|
||||
typedef arg_type<T, g_count, f_count, v_count> type;
|
||||
const arg_class t = type::value;
|
||||
|
@ -332,14 +332,14 @@ namespace psv_func_detail
|
|||
}
|
||||
|
||||
template<int g_count, int f_count, int v_count>
|
||||
__forceinline static bool put_func_args(ARMv7Context& context)
|
||||
force_inline static bool put_func_args(ARMv7Context& context)
|
||||
{
|
||||
// terminator
|
||||
return false;
|
||||
}
|
||||
|
||||
template<int g_count, int f_count, int v_count, typename T1, typename... T>
|
||||
__forceinline static bool put_func_args(ARMv7Context& context, T1 arg, T... args)
|
||||
force_inline static bool put_func_args(ARMv7Context& context, T1 arg, T... args)
|
||||
{
|
||||
typedef arg_type<T1, g_count, f_count, v_count> type;
|
||||
const arg_class t = type::value;
|
||||
|
@ -404,7 +404,7 @@ namespace psv_func_detail
|
|||
template<typename RT, typename... T>
|
||||
struct func_caller
|
||||
{
|
||||
__forceinline static RT call(ARMv7Context& context, u32 addr, T... args)
|
||||
force_inline static RT call(ARMv7Context& context, u32 addr, T... args)
|
||||
{
|
||||
func_caller<void, T...>::call(context, addr, args...);
|
||||
|
||||
|
@ -415,7 +415,7 @@ namespace psv_func_detail
|
|||
template<typename... T>
|
||||
struct func_caller<void, T...>
|
||||
{
|
||||
__forceinline static void call(ARMv7Context& context, u32 addr, T... args)
|
||||
force_inline static void call(ARMv7Context& context, u32 addr, T... args)
|
||||
{
|
||||
if (put_func_args<0, 0, 0, T...>(context, args...))
|
||||
{
|
||||
|
@ -464,7 +464,7 @@ enum psv_special_function_index : u16
|
|||
// Do not call directly
|
||||
u32 add_psv_func(psv_func data);
|
||||
// Do not call directly
|
||||
template<typename RT, typename... T> __forceinline void call_psv_func(ARMv7Context& context, RT(*func)(T...))
|
||||
template<typename RT, typename... T> force_inline void call_psv_func(ARMv7Context& context, RT(*func)(T...))
|
||||
{
|
||||
psv_func_detail::func_binder<RT, T...>::do_call(context, func);
|
||||
}
|
||||
|
|
|
@ -21,22 +21,16 @@ union psv_uid_t
|
|||
}
|
||||
};
|
||||
|
||||
template<typename T, u32 type>
|
||||
template<typename T, u32 uid_class>
|
||||
class psv_object_list_t // Class for managing object data
|
||||
{
|
||||
public:
|
||||
typedef refcounter_t<T> rc_type;
|
||||
typedef ref_t<T> ref_type;
|
||||
|
||||
static const u32 max = 0x8000;
|
||||
|
||||
private:
|
||||
std::array<rc_type, max> m_data;
|
||||
std::array<std::shared_ptr<T>, 0x8000> m_data;
|
||||
std::atomic<u32> m_hint; // guessing next free position
|
||||
std::mutex m_mutex;
|
||||
|
||||
void error(s32 uid)
|
||||
{
|
||||
throw fmt::format("Invalid UID requested (type=0x%x, uid=0x%x)", type, uid);
|
||||
throw fmt::format("Invalid UID requested (type=0x%x, uid=0x%x)", uid_class, uid);
|
||||
}
|
||||
|
||||
public:
|
||||
|
@ -45,17 +39,8 @@ public:
|
|||
{
|
||||
}
|
||||
|
||||
psv_object_list_t(const psv_object_list_t&) = delete;
|
||||
psv_object_list_t(psv_object_list_t&&) = delete;
|
||||
|
||||
psv_object_list_t& operator =(const psv_object_list_t&) = delete;
|
||||
psv_object_list_t& operator =(psv_object_list_t&&) = delete;
|
||||
|
||||
public:
|
||||
static const u32 uid_class = type;
|
||||
|
||||
// check if UID is potentially valid (will return true even if the object doesn't exist)
|
||||
bool check(s32 uid)
|
||||
inline static bool check(s32 uid)
|
||||
{
|
||||
const psv_uid_t id = psv_uid_t::make(uid);
|
||||
|
||||
|
@ -64,29 +49,35 @@ public:
|
|||
}
|
||||
|
||||
// share object with UID specified
|
||||
ref_type get(s32 uid)
|
||||
inline std::shared_ptr<T> get(s32 uid)
|
||||
{
|
||||
if (!check(uid))
|
||||
{
|
||||
return ref_type();
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return &m_data[psv_uid_t::make(uid).number];
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
return m_data[psv_uid_t::make(uid).number];
|
||||
}
|
||||
|
||||
ref_type operator [](s32 uid)
|
||||
inline std::shared_ptr<T> operator [](s32 uid)
|
||||
{
|
||||
return get(uid);
|
||||
return this->get(uid);
|
||||
}
|
||||
|
||||
// generate UID for newly created object (will return zero if the limit exceeded)
|
||||
s32 add(T* data, s32 error_code)
|
||||
// create new object and generate UID for it, or do nothing and return zero (if limit reached)
|
||||
template<typename... Args> s32 create(Args&&... args)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for (u32 i = 0, j = m_hint; i < m_data.size(); i++, j = (j + 1) % m_data.size())
|
||||
{
|
||||
// find an empty position and copy the pointer
|
||||
if (m_data[j].try_set(data))
|
||||
if (!m_data[j])
|
||||
{
|
||||
m_data[j] = std::make_shared<T>(args...); // construct object with specified arguments
|
||||
|
||||
m_hint = (j + 1) % m_data.size(); // guess next position
|
||||
|
||||
psv_uid_t id = psv_uid_t::make(1); // make UID
|
||||
|
@ -97,8 +88,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
delete data;
|
||||
return error_code;
|
||||
return 0;
|
||||
}
|
||||
|
||||
// remove object with specified UID
|
||||
|
@ -109,19 +99,29 @@ public:
|
|||
return false;
|
||||
}
|
||||
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
const u32 pos = psv_uid_t::make(uid).number;
|
||||
|
||||
m_hint = std::min<u32>(pos, m_hint);
|
||||
|
||||
return m_data[pos].try_remove();
|
||||
if (!m_data[pos])
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
m_data[pos].reset();
|
||||
return true;
|
||||
}
|
||||
|
||||
// remove all objects
|
||||
void clear()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
for (auto& v : m_data)
|
||||
{
|
||||
v.try_remove();
|
||||
v.reset();
|
||||
}
|
||||
|
||||
m_hint = 0;
|
||||
|
|
|
@ -354,17 +354,17 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
__forceinline const std::string& GetName() const
|
||||
force_inline const std::string& GetName() const
|
||||
{
|
||||
return m_name;
|
||||
}
|
||||
|
||||
__forceinline const uint GetArgCount() const
|
||||
force_inline const uint GetArgCount() const
|
||||
{
|
||||
return m_args_count;
|
||||
}
|
||||
|
||||
__forceinline const CodeFieldBase& GetArg(uint index) const
|
||||
force_inline const CodeFieldBase& GetArg(uint index) const
|
||||
{
|
||||
assert(index < m_args_count);
|
||||
return *m_args[index];
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
template<uint size, typename T> __forceinline static T sign(const T value)
|
||||
template<uint size, typename T> force_inline static T sign(const T value)
|
||||
{
|
||||
static_assert(size > 0 && size < sizeof(T) * 8, "Bad sign size");
|
||||
|
||||
|
|
|
@ -34,12 +34,12 @@ std::shared_ptr<CPUThread> CPUThreadManager::AddThread(CPUThreadType type)
|
|||
{
|
||||
case CPU_THREAD_PPU:
|
||||
{
|
||||
new_thread.reset(new PPUThread());
|
||||
new_thread = std::make_shared<PPUThread>();
|
||||
break;
|
||||
}
|
||||
case CPU_THREAD_SPU:
|
||||
{
|
||||
new_thread.reset(new SPUThread());
|
||||
new_thread = std::make_shared<SPUThread>();
|
||||
break;
|
||||
}
|
||||
case CPU_THREAD_RAW_SPU:
|
||||
|
@ -48,7 +48,7 @@ std::shared_ptr<CPUThread> CPUThreadManager::AddThread(CPUThreadType type)
|
|||
{
|
||||
if (!m_raw_spu[i])
|
||||
{
|
||||
new_thread.reset(new RawSPUThread());
|
||||
new_thread = std::make_shared<RawSPUThread>();
|
||||
new_thread->index = i;
|
||||
|
||||
m_raw_spu[i] = new_thread;
|
||||
|
@ -67,7 +67,7 @@ std::shared_ptr<CPUThread> CPUThreadManager::AddThread(CPUThreadType type)
|
|||
|
||||
if (new_thread)
|
||||
{
|
||||
new_thread->SetId(Emu.GetIdManager().GetNewID(new_thread));
|
||||
new_thread->SetId(Emu.GetIdManager().add(new_thread));
|
||||
|
||||
m_threads.push_back(new_thread);
|
||||
SendDbgCommand(DID_CREATE_THREAD, new_thread.get());
|
||||
|
@ -106,13 +106,13 @@ void CPUThreadManager::RemoveThread(u32 id)
|
|||
}
|
||||
|
||||
// Removing the ID should trigger the actual deletion of the thread
|
||||
Emu.GetIdManager().RemoveID<CPUThread>(id);
|
||||
Emu.GetIdManager().remove<CPUThread>(id);
|
||||
Emu.CheckStatus();
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id)
|
||||
{
|
||||
return Emu.GetIdManager().GetIDData<CPUThread>(id);
|
||||
return Emu.GetIdManager().get<CPUThread>(id);
|
||||
}
|
||||
|
||||
std::shared_ptr<CPUThread> CPUThreadManager::GetThread(u32 id, CPUThreadType type)
|
||||
|
|
|
@ -26,13 +26,13 @@ public:
|
|||
static const u32 shift = 31 - to;
|
||||
static const u32 mask = ((1ULL << ((to - from) + 1)) - 1) << shift;
|
||||
|
||||
static __forceinline void encode(u32& data, u32 value)
|
||||
static force_inline void encode(u32& data, u32 value)
|
||||
{
|
||||
data &= ~mask;
|
||||
data |= (value << shift) & mask;
|
||||
}
|
||||
|
||||
static __forceinline u32 decode(u32 data)
|
||||
static force_inline u32 decode(u32 data)
|
||||
{
|
||||
return (data & mask) >> shift;
|
||||
}
|
||||
|
@ -64,13 +64,13 @@ public:
|
|||
static const u32 shift2 = 31 - to2;
|
||||
static const u32 mask2 = ((1 << ((to2 - from2) + 1)) - 1) << shift2;
|
||||
|
||||
static __forceinline void encode(u32& data, u32 value)
|
||||
static force_inline void encode(u32& data, u32 value)
|
||||
{
|
||||
data &= ~(CodeField<from1, to1>::mask | mask2);
|
||||
data |= ((value << CodeField<from1, to1>::shift) & CodeField<from1, to1>::mask) | (((value >> offset) << shift2) & mask2);
|
||||
}
|
||||
|
||||
static __forceinline u32 decode(u32 data)
|
||||
static force_inline u32 decode(u32 data)
|
||||
{
|
||||
return ((data & CodeField<from1, to1>::mask) >> CodeField<from1, to1>::shift) | (((data & mask2) >> shift2) << offset);
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ public:
|
|||
|
||||
static const int size = _size;
|
||||
|
||||
static __forceinline u32 decode(u32 data)
|
||||
static force_inline u32 decode(u32 data)
|
||||
{
|
||||
return sign<size>((data & CodeField<from, to>::mask) >> CodeField<from, to>::shift);
|
||||
}
|
||||
|
@ -117,12 +117,12 @@ public:
|
|||
{
|
||||
}
|
||||
|
||||
static __forceinline u32 decode(u32 data)
|
||||
static force_inline u32 decode(u32 data)
|
||||
{
|
||||
return ((data & CodeField<from, to>::mask) >> CodeField<from, to>::shift) << offset;
|
||||
}
|
||||
|
||||
static __forceinline void encode(u32& data, u32 value)
|
||||
static force_inline void encode(u32& data, u32 value)
|
||||
{
|
||||
data &= ~CodeField<from, to>::mask;
|
||||
data |= ((value >> offset) << CodeField<from, to>::shift) & CodeField<from, to>::mask;
|
||||
|
@ -149,12 +149,12 @@ public:
|
|||
{
|
||||
}
|
||||
|
||||
static __forceinline u32 decode(u32 data)
|
||||
static force_inline u32 decode(u32 data)
|
||||
{
|
||||
return sign<size>((data & CodeField<from, to>::mask) >> CodeField<from, to>::shift) << offset;
|
||||
}
|
||||
|
||||
static __forceinline void encode(u32& data, u32 value)
|
||||
static force_inline void encode(u32& data, u32 value)
|
||||
{
|
||||
data &= ~CodeField<from, to>::mask;
|
||||
data |= ((value >> offset) << CodeField<from, to>::shift) & CodeField<from, to>::mask;
|
||||
|
|
|
@ -24,7 +24,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
__forceinline __m128 operator [] (s32 scale) const
|
||||
force_inline __m128 operator [] (s32 scale) const
|
||||
{
|
||||
return m_data[scale + 31];
|
||||
}
|
||||
|
|
|
@ -2004,19 +2004,10 @@ void Compiler::BC(u32 bo, u32 bi, s32 bd, u32 aa, u32 lk) {
|
|||
}
|
||||
|
||||
void Compiler::HACK(u32 index) {
|
||||
if (index & EIF_SAVE_RTOC) {
|
||||
auto addr_i64 = (Value *)m_ir_builder->getInt64(0x28);
|
||||
auto ra_i64 = GetGpr(1);
|
||||
addr_i64 = m_ir_builder->CreateAdd(ra_i64, addr_i64);
|
||||
|
||||
WriteMemory(addr_i64, GetGpr(2, 64));
|
||||
}
|
||||
Call<void>("execute_ppu_func_by_index", &execute_ppu_func_by_index, m_state.args[CompileTaskState::Args::State], m_ir_builder->getInt32(index & ~EIF_FLAGS));
|
||||
if (index & EIF_PERFORM_BLR) {
|
||||
auto lr_i64 = GetLr();
|
||||
lr_i64 = m_ir_builder->CreateAnd(lr_i64, ~0x3ULL);
|
||||
auto lr_i32 = m_ir_builder->CreateTrunc(lr_i64, m_ir_builder->getInt32Ty());
|
||||
CreateBranch(nullptr, lr_i32, false, true);
|
||||
Call<void>("execute_ppu_func_by_index", &execute_ppu_func_by_index, m_state.args[CompileTaskState::Args::State], m_ir_builder->getInt32(index & EIF_USE_BRANCH ? index : index & ~EIF_PERFORM_BLR));
|
||||
if (index & EIF_PERFORM_BLR || index & EIF_USE_BRANCH) {
|
||||
auto lr_i32 = index & EIF_USE_BRANCH ? GetPc() : m_ir_builder->CreateTrunc(m_ir_builder->CreateAnd(GetLr(), ~0x3ULL), m_ir_builder->getInt32Ty());
|
||||
CreateBranch(nullptr, lr_i32, false, (index & EIF_USE_BRANCH) == 0);
|
||||
}
|
||||
// copied from Compiler::SC()
|
||||
//auto ret_i1 = Call<bool>("PollStatus", m_poll_status_function, m_state.args[CompileTaskState::Args::State]);
|
||||
|
@ -6089,9 +6080,10 @@ BranchType ppu_recompiler_llvm::GetBranchTypeFromInstruction(u32 instruction) {
|
|||
} else if (field2 == 528) {
|
||||
type = lk ? BranchType::FunctionCall : BranchType::LocalBranch;
|
||||
}
|
||||
} else if (field1 == 1 && (instruction & EIF_PERFORM_BLR)) {
|
||||
type = BranchType::Return;
|
||||
} else if (field1 == 1 && (instruction & EIF_PERFORM_BLR)) { // classify HACK instruction
|
||||
type = instruction & EIF_USE_BRANCH ? BranchType::FunctionCall : BranchType::Return;
|
||||
} else if (field1 == 1 && (instruction & EIF_USE_BRANCH)) {
|
||||
type = BranchType::LocalBranch;
|
||||
}
|
||||
|
||||
return type;
|
||||
}
|
||||
|
|
|
@ -847,12 +847,12 @@ struct cast_ppu_gpr
|
|||
|
||||
typedef typename std::underlying_type<T>::type underlying_type;
|
||||
|
||||
__forceinline static u64 to_gpr(const T& value)
|
||||
force_inline static u64 to_gpr(const T& value)
|
||||
{
|
||||
return cast_ppu_gpr<underlying_type>::to_gpr(static_cast<underlying_type>(value));
|
||||
}
|
||||
|
||||
__forceinline static T from_gpr(const u64 reg)
|
||||
force_inline static T from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<T>(cast_ppu_gpr<underlying_type>::from_gpr(reg));
|
||||
}
|
||||
|
@ -861,12 +861,12 @@ struct cast_ppu_gpr
|
|||
template<>
|
||||
struct cast_ppu_gpr<u8, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const u8& value)
|
||||
force_inline static u64 to_gpr(const u8& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u8 from_gpr(const u64 reg)
|
||||
force_inline static u8 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<u8>(reg);
|
||||
}
|
||||
|
@ -875,12 +875,12 @@ struct cast_ppu_gpr<u8, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<u16, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const u16& value)
|
||||
force_inline static u64 to_gpr(const u16& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u16 from_gpr(const u64 reg)
|
||||
force_inline static u16 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<u16>(reg);
|
||||
}
|
||||
|
@ -889,12 +889,12 @@ struct cast_ppu_gpr<u16, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<u32, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const u32& value)
|
||||
force_inline static u64 to_gpr(const u32& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u32 from_gpr(const u64 reg)
|
||||
force_inline static u32 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<u32>(reg);
|
||||
}
|
||||
|
@ -904,12 +904,12 @@ struct cast_ppu_gpr<u32, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<unsigned long, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const unsigned long& value)
|
||||
force_inline static u64 to_gpr(const unsigned long& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static unsigned long from_gpr(const u64 reg)
|
||||
force_inline static unsigned long from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<unsigned long>(reg);
|
||||
}
|
||||
|
@ -919,12 +919,12 @@ struct cast_ppu_gpr<unsigned long, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<u64, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const u64& value)
|
||||
force_inline static u64 to_gpr(const u64& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static u64 from_gpr(const u64 reg)
|
||||
force_inline static u64 from_gpr(const u64 reg)
|
||||
{
|
||||
return reg;
|
||||
}
|
||||
|
@ -933,12 +933,12 @@ struct cast_ppu_gpr<u64, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<s8, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const s8& value)
|
||||
force_inline static u64 to_gpr(const s8& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s8 from_gpr(const u64 reg)
|
||||
force_inline static s8 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<s8>(reg);
|
||||
}
|
||||
|
@ -947,12 +947,12 @@ struct cast_ppu_gpr<s8, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<s16, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const s16& value)
|
||||
force_inline static u64 to_gpr(const s16& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s16 from_gpr(const u64 reg)
|
||||
force_inline static s16 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<s16>(reg);
|
||||
}
|
||||
|
@ -961,12 +961,12 @@ struct cast_ppu_gpr<s16, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<s32, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const s32& value)
|
||||
force_inline static u64 to_gpr(const s32& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s32 from_gpr(const u64 reg)
|
||||
force_inline static s32 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<s32>(reg);
|
||||
}
|
||||
|
@ -975,12 +975,12 @@ struct cast_ppu_gpr<s32, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<s64, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const s64& value)
|
||||
force_inline static u64 to_gpr(const s64& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static s64 from_gpr(const u64 reg)
|
||||
force_inline static s64 from_gpr(const u64 reg)
|
||||
{
|
||||
return static_cast<s64>(reg);
|
||||
}
|
||||
|
@ -989,25 +989,25 @@ struct cast_ppu_gpr<s64, false>
|
|||
template<>
|
||||
struct cast_ppu_gpr<bool, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const bool& value)
|
||||
force_inline static u64 to_gpr(const bool& value)
|
||||
{
|
||||
return value;
|
||||
}
|
||||
|
||||
__forceinline static bool from_gpr(const u64& reg)
|
||||
force_inline static bool from_gpr(const u64& reg)
|
||||
{
|
||||
return reinterpret_cast<const bool&>(reg);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
__forceinline u64 cast_to_ppu_gpr(const T& value)
|
||||
force_inline u64 cast_to_ppu_gpr(const T& value)
|
||||
{
|
||||
return cast_ppu_gpr<T>::to_gpr(value);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
__forceinline T cast_from_ppu_gpr(const u64 reg)
|
||||
force_inline T cast_from_ppu_gpr(const u64 reg)
|
||||
{
|
||||
return cast_ppu_gpr<T>::from_gpr(reg);
|
||||
}
|
||||
|
|
|
@ -9,7 +9,7 @@ enum : u32
|
|||
RAW_SPU_PROB_OFFSET = 0x00040000,
|
||||
};
|
||||
|
||||
__forceinline static u32 GetRawSPURegAddrByNum(int num, int offset)
|
||||
force_inline static u32 GetRawSPURegAddrByNum(int num, int offset)
|
||||
{
|
||||
return RAW_SPU_OFFSET * num + RAW_SPU_BASE_ADDR + RAW_SPU_PROB_OFFSET + offset;
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
File diff suppressed because it is too large
Load diff
|
@ -50,7 +50,7 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
__forceinline spu_inter_func_t operator [] (u32 opcode) const
|
||||
force_inline spu_inter_func_t operator [] (u32 opcode) const
|
||||
{
|
||||
return funcs[opcode >> 21];
|
||||
}
|
||||
|
@ -760,7 +760,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(data);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(data);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -806,7 +806,7 @@ void SPUThread::set_ch_value(u32 ch, u32 value)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(data);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(data);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -1054,7 +1054,7 @@ void SPUThread::stop_and_signal(u32 code)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
std::shared_ptr<event_queue_t> queue;
|
||||
std::shared_ptr<lv2_event_queue_t> queue;
|
||||
|
||||
for (auto& v : this->spuq)
|
||||
{
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
#include "Emu/Cell/SPUContext.h"
|
||||
#include "MFC.h"
|
||||
|
||||
struct event_queue_t;
|
||||
struct lv2_event_queue_t;
|
||||
struct spu_group_t;
|
||||
|
||||
// SPU Channels
|
||||
|
@ -137,7 +137,7 @@ union spu_channel_t
|
|||
u32 value;
|
||||
};
|
||||
|
||||
atomic_t<sync_var_t> sync_var; // atomic variable
|
||||
atomic<sync_var_t> sync_var; // atomic variable
|
||||
|
||||
public:
|
||||
bool push(u32 value)
|
||||
|
@ -223,8 +223,8 @@ struct spu_channel_4_t
|
|||
u32 value2;
|
||||
};
|
||||
|
||||
atomic_le_t<sync_var_t> sync_var;
|
||||
atomic_le_t<u32> value3;
|
||||
atomic<sync_var_t> sync_var;
|
||||
atomic<u32> value3;
|
||||
|
||||
public:
|
||||
void clear()
|
||||
|
@ -280,10 +280,10 @@ public:
|
|||
|
||||
struct spu_interrupt_tag_t
|
||||
{
|
||||
atomic_le_t<u64> mask;
|
||||
atomic_le_t<u64> stat;
|
||||
atomic<u64> mask;
|
||||
atomic<u64> stat;
|
||||
|
||||
atomic_le_t<s32> assigned;
|
||||
atomic<s32> assigned;
|
||||
|
||||
std::mutex handler_mutex;
|
||||
std::condition_variable cond;
|
||||
|
@ -338,7 +338,7 @@ struct g_spu_imm_table_t
|
|||
}
|
||||
}
|
||||
|
||||
__forceinline __m128 operator [] (s32 scale) const
|
||||
force_inline __m128 operator [] (s32 scale) const
|
||||
{
|
||||
return m_data[scale + 155];
|
||||
}
|
||||
|
@ -527,22 +527,22 @@ public:
|
|||
spu_channel_t ch_snr2; // SPU Signal Notification Register 2
|
||||
|
||||
u32 ch_event_mask;
|
||||
atomic_le_t<u32> ch_event_stat;
|
||||
atomic<u32> ch_event_stat;
|
||||
|
||||
u64 ch_dec_start_timestamp; // timestamp of writing decrementer value
|
||||
u32 ch_dec_value; // written decrementer value
|
||||
|
||||
atomic_le_t<u32> run_ctrl; // SPU Run Control register (only provided to get latest data written)
|
||||
atomic_le_t<u32> status; // SPU Status register
|
||||
atomic_le_t<u32> npc; // SPU Next Program Counter register
|
||||
atomic<u32> run_ctrl; // SPU Run Control register (only provided to get latest data written)
|
||||
atomic<u32> status; // SPU Status register
|
||||
atomic<u32> npc; // SPU Next Program Counter register
|
||||
|
||||
spu_interrupt_tag_t int0; // SPU Class 0 Interrupt Management
|
||||
spu_interrupt_tag_t int2; // SPU Class 2 Interrupt Management
|
||||
|
||||
std::weak_ptr<spu_group_t> tg; // SPU Thread Group
|
||||
|
||||
std::array<std::pair<u32, std::weak_ptr<event_queue_t>>, 32> spuq; // Event Queue Keys for SPU Thread
|
||||
std::weak_ptr<event_queue_t> spup[64]; // SPU Ports
|
||||
std::array<std::pair<u32, std::weak_ptr<lv2_event_queue_t>>, 32> spuq; // Event Queue Keys for SPU Thread
|
||||
std::weak_ptr<lv2_event_queue_t> spup[64]; // SPU Ports
|
||||
|
||||
void write_snr(bool number, u32 value)
|
||||
{
|
||||
|
|
|
@ -28,9 +28,9 @@ bool EventManager::CheckKey(u64 key)
|
|||
return eq_map.find(key) != eq_map.end();
|
||||
}
|
||||
|
||||
bool EventManager::RegisterKey(std::shared_ptr<event_queue_t>& data, u64 key)
|
||||
bool EventManager::RegisterKey(const std::shared_ptr<lv2_event_queue_t>& data)
|
||||
{
|
||||
if (!key)
|
||||
if (!data->key)
|
||||
{
|
||||
// always ok
|
||||
return true;
|
||||
|
@ -38,12 +38,12 @@ bool EventManager::RegisterKey(std::shared_ptr<event_queue_t>& data, u64 key)
|
|||
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
|
||||
if (eq_map.find(key) != eq_map.end())
|
||||
if (eq_map.find(data->key) != eq_map.end())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
eq_map[key] = data;
|
||||
eq_map[data->key] = data;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
@ -68,7 +68,7 @@ bool EventManager::UnregisterKey(u64 key)
|
|||
return false;
|
||||
}
|
||||
|
||||
std::shared_ptr<event_queue_t> EventManager::GetEventQueue(u64 key)
|
||||
std::shared_ptr<lv2_event_queue_t> EventManager::GetEventQueue(u64 key)
|
||||
{
|
||||
if (!key)
|
||||
{
|
||||
|
|
|
@ -1,18 +1,25 @@
|
|||
#pragma once
|
||||
|
||||
struct event_queue_t;
|
||||
struct lv2_event_queue_t;
|
||||
|
||||
class EventManager
|
||||
{
|
||||
std::mutex m_lock;
|
||||
std::unordered_map<u64, std::shared_ptr<event_queue_t>> eq_map;
|
||||
std::unordered_map<u64, std::shared_ptr<lv2_event_queue_t>> eq_map;
|
||||
|
||||
public:
|
||||
void Init();
|
||||
void Clear();
|
||||
bool CheckKey(u64 key);
|
||||
bool RegisterKey(std::shared_ptr<event_queue_t>& data, u64 key);
|
||||
bool RegisterKey(const std::shared_ptr<lv2_event_queue_t>& data);
|
||||
bool UnregisterKey(u64 key);
|
||||
|
||||
std::shared_ptr<event_queue_t> GetEventQueue(u64 key);
|
||||
template<typename... Args> std::shared_ptr<lv2_event_queue_t> MakeEventQueue(Args&&... args)
|
||||
{
|
||||
const auto queue = std::make_shared<lv2_event_queue_t>(args...);
|
||||
|
||||
return RegisterKey(queue) ? queue : nullptr;
|
||||
}
|
||||
|
||||
std::shared_ptr<lv2_event_queue_t> GetEventQueue(u64 key);
|
||||
};
|
||||
|
|
|
@ -19,14 +19,14 @@ struct vfsStream
|
|||
|
||||
virtual u64 Write(const void* src, u64 count) = 0;
|
||||
|
||||
template<typename T> __forceinline bool SWrite(const T& data, u64 count = sizeof(T))
|
||||
template<typename T> force_inline bool SWrite(const T& data, u64 count = sizeof(T))
|
||||
{
|
||||
return Write(&data, count) == count;
|
||||
}
|
||||
|
||||
virtual u64 Read(void* dst, u64 count) = 0;
|
||||
|
||||
template<typename T> __forceinline bool SRead(T& data, u64 count = sizeof(T))
|
||||
template<typename T> force_inline bool SRead(T& data, u64 count = sizeof(T))
|
||||
{
|
||||
return Read(&data, count) == count;
|
||||
}
|
||||
|
|
|
@ -82,7 +82,7 @@ class vfsHDDFile
|
|||
|
||||
void WriteEntry(u64 block, const vfsHDD_Entry& data, const std::string& name);
|
||||
|
||||
__forceinline u32 GetMaxNameLen() const
|
||||
force_inline u32 GetMaxNameLen() const
|
||||
{
|
||||
return m_hdd_info.block_size - sizeof(vfsHDD_Entry);
|
||||
}
|
||||
|
@ -149,7 +149,7 @@ class vfsHDD : public vfsFileBase
|
|||
public:
|
||||
vfsHDD(vfsDevice* device, const std::string& hdd_path);
|
||||
|
||||
__forceinline u32 GetMaxNameLen() const
|
||||
force_inline u32 GetMaxNameLen() const
|
||||
{
|
||||
return m_hdd_info.block_size - sizeof(vfsHDD_Entry);
|
||||
}
|
||||
|
|
|
@ -2,210 +2,174 @@
|
|||
|
||||
#define ID_MANAGER_INCLUDED
|
||||
|
||||
enum IDType
|
||||
// ID type
|
||||
enum : u32
|
||||
{
|
||||
// Special objects
|
||||
TYPE_MEM,
|
||||
TYPE_MUTEX,
|
||||
TYPE_COND,
|
||||
TYPE_RWLOCK,
|
||||
TYPE_INTR_TAG,
|
||||
TYPE_INTR_SERVICE_HANDLE,
|
||||
TYPE_EVENT_QUEUE,
|
||||
TYPE_EVENT_PORT,
|
||||
TYPE_TRACE,
|
||||
TYPE_SPUIMAGE,
|
||||
TYPE_PRX,
|
||||
TYPE_SPUPORT,
|
||||
TYPE_LWMUTEX,
|
||||
TYPE_TIMER,
|
||||
TYPE_SEMAPHORE,
|
||||
TYPE_FS_FILE,
|
||||
TYPE_FS_DIR,
|
||||
TYPE_LWCOND,
|
||||
TYPE_EVENT_FLAG,
|
||||
|
||||
// Any other objects
|
||||
TYPE_OTHER,
|
||||
ID_TYPE_NONE = 0,
|
||||
};
|
||||
|
||||
class ID final
|
||||
// Helper template to detect type
|
||||
template<typename T> struct ID_type
|
||||
{
|
||||
const std::type_info& m_info;
|
||||
std::shared_ptr<void> m_data;
|
||||
IDType m_type;
|
||||
//static_assert(sizeof(T) == 0, "ID type not registered (use REG_ID_TYPE)");
|
||||
|
||||
static const u32 type = ID_TYPE_NONE; // default type
|
||||
};
|
||||
|
||||
class ID_data_t final
|
||||
{
|
||||
public:
|
||||
template<typename T> ID(std::shared_ptr<T>& data, const IDType type)
|
||||
: m_info(typeid(T))
|
||||
, m_data(data)
|
||||
, m_type(type)
|
||||
const std::shared_ptr<void> data;
|
||||
const std::type_info& info;
|
||||
const u32 type;
|
||||
|
||||
template<typename T> force_inline ID_data_t(std::shared_ptr<T> data, u32 type)
|
||||
: data(std::move(data))
|
||||
, info(typeid(T))
|
||||
, type(type)
|
||||
{
|
||||
}
|
||||
|
||||
ID()
|
||||
: m_info(typeid(void))
|
||||
, m_data(nullptr)
|
||||
, m_type(TYPE_OTHER)
|
||||
ID_data_t(const ID_data_t& right) = delete;
|
||||
|
||||
ID_data_t& operator =(const ID_data_t& right) = delete;
|
||||
|
||||
ID_data_t(ID_data_t&& right)
|
||||
: data(std::move(const_cast<std::shared_ptr<void>&>(right.data)))
|
||||
, info(right.info)
|
||||
, type(right.type)
|
||||
{
|
||||
}
|
||||
|
||||
ID(const ID& right) = delete;
|
||||
|
||||
ID(ID&& right)
|
||||
: m_info(right.m_info)
|
||||
, m_data(right.m_data)
|
||||
, m_type(right.m_type)
|
||||
{
|
||||
right.m_data = nullptr;
|
||||
right.m_type = TYPE_OTHER;
|
||||
}
|
||||
|
||||
ID& operator=(ID&& other) = delete;
|
||||
|
||||
const std::type_info& GetInfo() const
|
||||
{
|
||||
return m_info;
|
||||
}
|
||||
|
||||
template<typename T> std::shared_ptr<T> GetData() const
|
||||
{
|
||||
return std::static_pointer_cast<T>(m_data);
|
||||
}
|
||||
|
||||
IDType GetType() const
|
||||
{
|
||||
return m_type;
|
||||
}
|
||||
ID_data_t& operator =(ID_data_t&& other) = delete;
|
||||
};
|
||||
|
||||
class IdManager
|
||||
class ID_manager
|
||||
{
|
||||
static const u32 s_first_id = 1;
|
||||
static const u32 s_max_id = -1;
|
||||
|
||||
std::unordered_map<u32, ID> m_id_map;
|
||||
std::set<u32> m_types[TYPE_OTHER];
|
||||
std::mutex m_mutex;
|
||||
|
||||
u32 m_cur_id = s_first_id;
|
||||
std::unordered_map<u32, ID_data_t> m_id_map;
|
||||
u32 m_cur_id = 1; // first ID
|
||||
|
||||
public:
|
||||
template<typename T> bool CheckID(const u32 id)
|
||||
// check if ID exists and has specified type
|
||||
template<typename T> bool check_id(const u32 id)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
auto f = m_id_map.find(id);
|
||||
|
||||
return f != m_id_map.end() && f->second.GetInfo() == typeid(T);
|
||||
return f != m_id_map.end() && f->second.info == typeid(T);
|
||||
}
|
||||
|
||||
void Clear()
|
||||
// must be called from the constructor called through make() to get further ID of current object
|
||||
u32 get_cur_id()
|
||||
{
|
||||
// if called correctly from make(), the mutex is locked
|
||||
// if called illegally, the mutex is unlocked with high probability (wrong ID is returned otherwise)
|
||||
|
||||
if (m_mutex.try_lock())
|
||||
{
|
||||
// schedule unlocking
|
||||
std::lock_guard<std::mutex> lock(m_mutex, std::adopt_lock);
|
||||
|
||||
throw "Invalid get_cur_id() usage";
|
||||
}
|
||||
|
||||
return m_cur_id;
|
||||
}
|
||||
|
||||
void clear()
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
m_id_map.clear();
|
||||
m_cur_id = s_first_id;
|
||||
m_cur_id = 1; // first ID
|
||||
}
|
||||
|
||||
template<typename T> u32 GetNewID(std::shared_ptr<T>& data, const IDType type = TYPE_OTHER)
|
||||
// add new ID using existing std::shared_ptr (not recommended, use make() instead)
|
||||
template<typename T> u32 add(std::shared_ptr<T> data, u32 type = ID_type<T>::type)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
m_id_map.emplace(m_cur_id, ID(data, type));
|
||||
|
||||
if (type < TYPE_OTHER)
|
||||
{
|
||||
m_types[type].insert(m_cur_id);
|
||||
}
|
||||
m_id_map.emplace(m_cur_id, ID_data_t(std::move(data), type));
|
||||
|
||||
return m_cur_id++;
|
||||
}
|
||||
|
||||
template<typename T> std::shared_ptr<T> GetIDData(const u32 id)
|
||||
// add new ID of specified type with specified constructor arguments (passed to std::make_shared<>)
|
||||
template<typename T, typename... Args> u32 make(Args&&... args)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
const u32 type = ID_type<T>::type;
|
||||
|
||||
m_id_map.emplace(m_cur_id, ID_data_t(std::make_shared<T>(args...), type));
|
||||
|
||||
return m_cur_id++;
|
||||
}
|
||||
|
||||
template<typename T> std::shared_ptr<T> get(u32 id)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
auto f = m_id_map.find(id);
|
||||
|
||||
if (f == m_id_map.end() || f->second.GetInfo() != typeid(T))
|
||||
if (f == m_id_map.end() || f->second.info != typeid(T))
|
||||
{
|
||||
return nullptr;
|
||||
}
|
||||
|
||||
return f->second.GetData<T>();
|
||||
return std::static_pointer_cast<T>(f->second.data);
|
||||
}
|
||||
|
||||
bool HasID(const u32 id)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
return m_id_map.find(id) != m_id_map.end();
|
||||
}
|
||||
|
||||
IDType GetIDType(const u32 id)
|
||||
template<typename T> bool remove(u32 id)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
auto item = m_id_map.find(id);
|
||||
|
||||
if (item == m_id_map.end())
|
||||
{
|
||||
return TYPE_OTHER;
|
||||
}
|
||||
|
||||
return item->second.GetType();
|
||||
}
|
||||
|
||||
template<typename T> bool RemoveID(const u32 id)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
auto item = m_id_map.find(id);
|
||||
|
||||
if (item == m_id_map.end() || item->second.GetInfo() != typeid(T))
|
||||
if (item == m_id_map.end() || item->second.info != typeid(T))
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (item->second.GetType() < TYPE_OTHER)
|
||||
{
|
||||
m_types[item->second.GetType()].erase(id);
|
||||
}
|
||||
|
||||
m_id_map.erase(item);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
u32 GetTypeCount(IDType type)
|
||||
u32 get_count_by_type(u32 type)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
if (type < TYPE_OTHER)
|
||||
u32 result = 0;
|
||||
|
||||
for (auto& v : m_id_map)
|
||||
{
|
||||
return (u32)m_types[type].size();
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(!"Invalid ID type");
|
||||
return 0;
|
||||
if (v.second.type == type)
|
||||
{
|
||||
result++;
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
std::set<u32> GetTypeIDs(IDType type)
|
||||
std::set<u32> get_IDs_by_type(u32 type)
|
||||
{
|
||||
// you cannot simply return reference to existing set
|
||||
std::lock_guard<std::mutex> lock(m_mutex);
|
||||
|
||||
if (type < TYPE_OTHER)
|
||||
std::set<u32> result;
|
||||
|
||||
for (auto& v : m_id_map)
|
||||
{
|
||||
return m_types[type];
|
||||
}
|
||||
else
|
||||
{
|
||||
assert(!"Invalid ID type");
|
||||
return std::set<u32>{};
|
||||
if (v.second.type == type)
|
||||
{
|
||||
result.insert(v.first);
|
||||
}
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
};
|
||||
|
|
|
@ -1,11 +1,5 @@
|
|||
#pragma once
|
||||
|
||||
#undef InterlockedExchange
|
||||
#undef InterlockedCompareExchange
|
||||
#undef InterlockedOr
|
||||
#undef InterlockedAnd
|
||||
#undef InterlockedXor
|
||||
|
||||
template<typename T, size_t size = sizeof(T)>
|
||||
struct _to_atomic_subtype
|
||||
{
|
||||
|
@ -51,73 +45,74 @@ union _atomic_base
|
|||
type data; // unsafe direct access
|
||||
subtype sub_data; // unsafe direct access to substitute type
|
||||
|
||||
__forceinline static const subtype to_subtype(const type& value)
|
||||
force_inline static const subtype to_subtype(const type& value)
|
||||
{
|
||||
return reinterpret_cast<const subtype&>(value);
|
||||
}
|
||||
|
||||
__forceinline static const type from_subtype(const subtype value)
|
||||
force_inline static const type from_subtype(const subtype value)
|
||||
{
|
||||
return reinterpret_cast<const type&>(value);
|
||||
}
|
||||
|
||||
__forceinline static type& to_type(subtype& value)
|
||||
force_inline static type& to_type(subtype& value)
|
||||
{
|
||||
return reinterpret_cast<type&>(value);
|
||||
}
|
||||
|
||||
public:
|
||||
// atomically compare data with cmp, replace with exch if equal, return previous data value anyway
|
||||
__forceinline const type compare_and_swap(const type& cmp, const type& exch) volatile
|
||||
force_inline const type compare_and_swap(const type& cmp, const type& exch) volatile
|
||||
{
|
||||
return from_subtype(InterlockedCompareExchange(&sub_data, to_subtype(exch), to_subtype(cmp)));
|
||||
return from_subtype(sync_val_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch)));
|
||||
}
|
||||
|
||||
// atomically compare data with cmp, replace with exch if equal, return true if data was replaced
|
||||
__forceinline bool compare_and_swap_test(const type& cmp, const type& exch) volatile
|
||||
force_inline bool compare_and_swap_test(const type& cmp, const type& exch) volatile
|
||||
{
|
||||
return InterlockedCompareExchangeTest(&sub_data, to_subtype(exch), to_subtype(cmp));
|
||||
return sync_bool_compare_and_swap(&sub_data, to_subtype(cmp), to_subtype(exch));
|
||||
}
|
||||
|
||||
// read data with memory barrier
|
||||
__forceinline const type read_sync() const volatile
|
||||
force_inline const type read_sync() const volatile
|
||||
{
|
||||
return from_subtype(InterlockedCompareExchange(const_cast<subtype*>(&sub_data), 0, 0));
|
||||
const subtype zero = {};
|
||||
return from_subtype(sync_val_compare_and_swap(const_cast<subtype*>(&sub_data), zero, zero));
|
||||
}
|
||||
|
||||
// atomically replace data with exch, return previous data value
|
||||
__forceinline const type exchange(const type& exch) volatile
|
||||
force_inline const type exchange(const type& exch) volatile
|
||||
{
|
||||
return from_subtype(InterlockedExchange(&sub_data, to_subtype(exch)));
|
||||
return from_subtype(sync_lock_test_and_set(&sub_data, to_subtype(exch)));
|
||||
}
|
||||
|
||||
// read data without memory barrier
|
||||
__forceinline const type read_relaxed() const volatile
|
||||
force_inline const type read_relaxed() const volatile
|
||||
{
|
||||
const subtype value = const_cast<const subtype&>(sub_data);
|
||||
return from_subtype(value);
|
||||
}
|
||||
|
||||
// write data without memory barrier
|
||||
__forceinline void write_relaxed(const type& value) volatile
|
||||
force_inline void write_relaxed(const type& value) volatile
|
||||
{
|
||||
const_cast<subtype&>(sub_data) = to_subtype(value);
|
||||
}
|
||||
|
||||
// perform atomic operation on data
|
||||
template<typename FT> __forceinline void atomic_op(const FT atomic_proc) volatile
|
||||
template<typename FT> force_inline void atomic_op(const FT atomic_proc) volatile
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const subtype old = const_cast<const subtype&>(sub_data);
|
||||
subtype _new = old;
|
||||
atomic_proc(to_type(_new)); // function should accept reference to T type
|
||||
if (InterlockedCompareExchangeTest(&sub_data, _new, old)) return;
|
||||
if (sync_bool_compare_and_swap(&sub_data, old, _new)) return;
|
||||
}
|
||||
}
|
||||
|
||||
// perform atomic operation on data with special exit condition (if intermediate result != proceed_value)
|
||||
template<typename RT, typename FT> __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile
|
||||
template<typename RT, typename FT> force_inline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
|
@ -125,155 +120,114 @@ public:
|
|||
subtype _new = old;
|
||||
auto res = static_cast<RT>(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value
|
||||
if (res != proceed_value) return res;
|
||||
if (InterlockedCompareExchangeTest(&sub_data, _new, old)) return proceed_value;
|
||||
if (sync_bool_compare_and_swap(&sub_data, old, _new)) return proceed_value;
|
||||
}
|
||||
}
|
||||
|
||||
// perform atomic operation on data with additional memory barrier
|
||||
template<typename FT> __forceinline void atomic_op_sync(const FT atomic_proc) volatile
|
||||
template<typename FT> force_inline void atomic_op_sync(const FT atomic_proc) volatile
|
||||
{
|
||||
subtype old = InterlockedCompareExchange(&sub_data, 0, 0);
|
||||
const subtype zero = {};
|
||||
subtype old = sync_val_compare_and_swap(&sub_data, zero, zero);
|
||||
while (true)
|
||||
{
|
||||
subtype _new = old;
|
||||
atomic_proc(to_type(_new)); // function should accept reference to T type
|
||||
const subtype val = InterlockedCompareExchange(&sub_data, _new, old);
|
||||
const subtype val = sync_val_compare_and_swap(&sub_data, old, _new);
|
||||
if (val == old) return;
|
||||
old = val;
|
||||
}
|
||||
}
|
||||
|
||||
// perform atomic operation on data with additional memory barrier and special exit condition (if intermediate result != proceed_value)
|
||||
template<typename RT, typename FT> __forceinline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc) volatile
|
||||
template<typename RT, typename FT> force_inline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc) volatile
|
||||
{
|
||||
subtype old = InterlockedCompareExchange(&sub_data, 0, 0);
|
||||
const subtype zero = {};
|
||||
subtype old = sync_val_compare_and_swap(&sub_data, zero, zero);
|
||||
while (true)
|
||||
{
|
||||
subtype _new = old;
|
||||
auto res = static_cast<RT>(atomic_proc(to_type(_new))); // function should accept reference to T type and return some value
|
||||
if (res != proceed_value) return res;
|
||||
const subtype val = InterlockedCompareExchange(&sub_data, _new, old);
|
||||
const subtype val = sync_val_compare_and_swap(&sub_data, old, _new);
|
||||
if (val == old) return proceed_value;
|
||||
old = val;
|
||||
}
|
||||
}
|
||||
|
||||
// atomic bitwise OR, returns previous data
|
||||
__forceinline const type _or(const type& right) volatile
|
||||
force_inline const type _or(const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedOr(&sub_data, to_subtype(right)));
|
||||
return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)));
|
||||
}
|
||||
|
||||
// atomic bitwise AND, returns previous data
|
||||
__forceinline const type _and(const type& right) volatile
|
||||
force_inline const type _and(const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedAnd(&sub_data, to_subtype(right)));
|
||||
return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)));
|
||||
}
|
||||
|
||||
// atomic bitwise AND NOT (inverts right argument), returns previous data
|
||||
__forceinline const type _and_not(const type& right) volatile
|
||||
force_inline const type _and_not(const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedAnd(&sub_data, ~to_subtype(right)));
|
||||
return from_subtype(sync_fetch_and_and(&sub_data, ~to_subtype(right)));
|
||||
}
|
||||
|
||||
// atomic bitwise XOR, returns previous data
|
||||
__forceinline const type _xor(const type& right) volatile
|
||||
force_inline const type _xor(const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedXor(&sub_data, to_subtype(right)));
|
||||
return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)));
|
||||
}
|
||||
|
||||
__forceinline const type operator |= (const type& right) volatile
|
||||
force_inline const type operator |= (const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedOr(&sub_data, to_subtype(right)) | to_subtype(right));
|
||||
return from_subtype(sync_fetch_and_or(&sub_data, to_subtype(right)) | to_subtype(right));
|
||||
}
|
||||
|
||||
__forceinline const type operator &= (const type& right) volatile
|
||||
force_inline const type operator &= (const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedAnd(&sub_data, to_subtype(right)) & to_subtype(right));
|
||||
return from_subtype(sync_fetch_and_and(&sub_data, to_subtype(right)) & to_subtype(right));
|
||||
}
|
||||
|
||||
__forceinline const type operator ^= (const type& right) volatile
|
||||
force_inline const type operator ^= (const type& right) volatile
|
||||
{
|
||||
return from_subtype(InterlockedXor(&sub_data, to_subtype(right)) ^ to_subtype(right));
|
||||
return from_subtype(sync_fetch_and_xor(&sub_data, to_subtype(right)) ^ to_subtype(right));
|
||||
}
|
||||
};
|
||||
|
||||
// Helper definitions
|
||||
template<typename T, typename T2 = T> using if_arithmetic_t = const typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<T2>::value, T>::type;
|
||||
|
||||
template<typename T, typename T2 = T> using if_arithmetic_le_t = const typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<T2>::value, le_t<T>>::type;
|
||||
template<typename T, typename T2 = T> using if_arithmetic_be_t = const typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<T2>::value, be_t<T>>::type;
|
||||
template<typename T, typename T2 = T> using if_arithmetic_atomic_t = typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<T2>::value, _atomic_base<T>&>::type;
|
||||
template<typename T, typename T2 = T> using if_arithmetic_atomic_be_t = typename std::enable_if<std::is_arithmetic<T>::value && std::is_arithmetic<T2>::value, _atomic_base<be_t<T>>&>::type;
|
||||
|
||||
template<typename T> inline static if_arithmetic_t<T> operator ++(_atomic_base<T>& left)
|
||||
template<typename T> inline static if_arithmetic_le_t<T> operator ++(_atomic_base<le_t<T>>& left)
|
||||
{
|
||||
T result;
|
||||
|
||||
left.atomic_op([&result](T& value)
|
||||
{
|
||||
result = ++value;
|
||||
});
|
||||
|
||||
return result;
|
||||
return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1) + 1);
|
||||
}
|
||||
|
||||
template<typename T> inline static if_arithmetic_t<T> operator --(_atomic_base<T>& left)
|
||||
template<typename T> inline static if_arithmetic_le_t<T> operator --(_atomic_base<le_t<T>>& left)
|
||||
{
|
||||
T result;
|
||||
|
||||
left.atomic_op([&result](T& value)
|
||||
{
|
||||
result = --value;
|
||||
});
|
||||
|
||||
return result;
|
||||
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1) - 1);
|
||||
}
|
||||
|
||||
template<typename T> inline static if_arithmetic_t<T> operator ++(_atomic_base<T>& left, int)
|
||||
template<typename T> inline static if_arithmetic_le_t<T> operator ++(_atomic_base<le_t<T>>& left, int)
|
||||
{
|
||||
T result;
|
||||
|
||||
left.atomic_op([&result](T& value)
|
||||
{
|
||||
result = value++;
|
||||
});
|
||||
|
||||
return result;
|
||||
return left.from_subtype(sync_fetch_and_add(&left.sub_data, 1));
|
||||
}
|
||||
|
||||
template<typename T> inline static if_arithmetic_t<T> operator --(_atomic_base<T>& left, int)
|
||||
template<typename T> inline static if_arithmetic_le_t<T> operator --(_atomic_base<le_t<T>>& left, int)
|
||||
{
|
||||
T result;
|
||||
|
||||
left.atomic_op([&result](T& value)
|
||||
{
|
||||
result = value--;
|
||||
});
|
||||
|
||||
return result;
|
||||
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, 1));
|
||||
}
|
||||
|
||||
template<typename T, typename T2> inline static if_arithmetic_t<T, T2> operator +=(_atomic_base<T>& left, T2 right)
|
||||
template<typename T, typename T2> inline static if_arithmetic_le_t<T, T2> operator +=(_atomic_base<le_t<T>>& left, T2 right)
|
||||
{
|
||||
T result;
|
||||
|
||||
left.atomic_op([&result, right](T& value)
|
||||
{
|
||||
result = (value += right);
|
||||
});
|
||||
|
||||
return result;
|
||||
return left.from_subtype(sync_fetch_and_add(&left.sub_data, right) + right);
|
||||
}
|
||||
|
||||
template<typename T, typename T2> inline static if_arithmetic_t<T, T2> operator -=(_atomic_base<T>& left, T2 right)
|
||||
template<typename T, typename T2> inline static if_arithmetic_le_t<T, T2> operator -=(_atomic_base<le_t<T>>& left, T2 right)
|
||||
{
|
||||
T result;
|
||||
|
||||
left.atomic_op([&result, right](T& value)
|
||||
{
|
||||
result = (value -= right);
|
||||
});
|
||||
|
||||
return result;
|
||||
return left.from_subtype(sync_fetch_and_sub(&left.sub_data, right) - right);
|
||||
}
|
||||
|
||||
template<typename T> inline static if_arithmetic_be_t<T> operator ++(_atomic_base<be_t<T>>& left)
|
||||
|
@ -348,18 +302,8 @@ template<typename T, typename T2> inline static if_arithmetic_be_t<T, T2> operat
|
|||
return result;
|
||||
}
|
||||
|
||||
template<typename T> using atomic_le_t = _atomic_base<T>;
|
||||
template<typename T> using atomic = _atomic_base<T>; // Atomic Type with native endianness (for emulator memory)
|
||||
|
||||
template<typename T> using atomic_be_t = _atomic_base<typename to_be_t<T>::type>;
|
||||
template<typename T> using atomic_be_t = _atomic_base<typename to_be_t<T>::type>; // Atomic BE Type (for PS3 virtual memory)
|
||||
|
||||
namespace ps3
|
||||
{
|
||||
template<typename T> using atomic_t = atomic_be_t<T>;
|
||||
}
|
||||
|
||||
namespace psv
|
||||
{
|
||||
template<typename T> using atomic_t = atomic_le_t<T>;
|
||||
}
|
||||
|
||||
using namespace ps3;
|
||||
template<typename T> using atomic_le_t = _atomic_base<typename to_le_t<T>::type>; // Atomic LE Type (for PSV virtual memory)
|
||||
|
|
|
@ -1,196 +0,0 @@
|
|||
#pragma once
|
||||
#include "atomic.h"
|
||||
|
||||
// run endless loop for debugging
|
||||
__forceinline static void deadlock()
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
std::this_thread::yield();
|
||||
}
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
class ref_t;
|
||||
|
||||
template<typename T>
|
||||
class refcounter_t // non-relocateable "smart" pointer with ref counter
|
||||
{
|
||||
public:
|
||||
typedef T type, * p_type;
|
||||
typedef refcounter_t<T> rc_type;
|
||||
|
||||
// counter > 0, ptr != nullptr : object exists and shared
|
||||
// counter > 0, ptr == nullptr : object exists and shared, but not owned by refcounter_t
|
||||
// counter == 0, ptr != nullptr : object exists and not shared
|
||||
// counter == 0, ptr == nullptr : object doesn't exist
|
||||
// counter < 0 : bad state, used to provoke error for debugging
|
||||
|
||||
struct sync_var_t
|
||||
{
|
||||
s64 counter;
|
||||
p_type ptr;
|
||||
};
|
||||
|
||||
private:
|
||||
atomic_le_t<sync_var_t> m_var;
|
||||
|
||||
friend class ref_t<T>;
|
||||
|
||||
// try to share object (increment counter), returns nullptr if doesn't exist or cannot be shared
|
||||
__forceinline p_type ref_inc()
|
||||
{
|
||||
p_type out_ptr;
|
||||
|
||||
m_var.atomic_op([&out_ptr](sync_var_t& v)
|
||||
{
|
||||
assert(v.counter >= 0);
|
||||
|
||||
if ((out_ptr = v.ptr))
|
||||
{
|
||||
v.counter++;
|
||||
}
|
||||
});
|
||||
|
||||
return out_ptr;
|
||||
}
|
||||
|
||||
// try to release previously shared object (decrement counter), returns true if should be deleted
|
||||
__forceinline bool ref_dec()
|
||||
{
|
||||
bool do_delete;
|
||||
|
||||
m_var.atomic_op([&do_delete](sync_var_t& v)
|
||||
{
|
||||
assert(v.counter > 0);
|
||||
|
||||
do_delete = !--v.counter && !v.ptr;
|
||||
});
|
||||
|
||||
return do_delete;
|
||||
}
|
||||
|
||||
public:
|
||||
refcounter_t()
|
||||
{
|
||||
// initialize ref counter
|
||||
m_var.write_relaxed({ 0, nullptr });
|
||||
}
|
||||
|
||||
~refcounter_t()
|
||||
{
|
||||
// set bad state
|
||||
auto ref = m_var.exchange({ -1, nullptr });
|
||||
|
||||
// finalize
|
||||
if (ref.counter)
|
||||
{
|
||||
deadlock();
|
||||
}
|
||||
else if (ref.ptr)
|
||||
{
|
||||
delete ref.ptr;
|
||||
}
|
||||
}
|
||||
|
||||
refcounter_t(const rc_type& right) = delete;
|
||||
refcounter_t(rc_type&& right_rv) = delete;
|
||||
|
||||
rc_type& operator =(const rc_type& right) = delete;
|
||||
rc_type& operator =(rc_type&& right_rv) = delete;
|
||||
|
||||
public:
|
||||
// try to set new object (if it doesn't exist)
|
||||
bool try_set(p_type ptr)
|
||||
{
|
||||
return m_var.compare_and_swap_test({ 0, nullptr }, { 0, ptr });
|
||||
}
|
||||
|
||||
// try to remove object (if exists)
|
||||
bool try_remove()
|
||||
{
|
||||
bool out_res;
|
||||
p_type out_ptr;
|
||||
|
||||
m_var.atomic_op([&out_res, &out_ptr](sync_var_t& v)
|
||||
{
|
||||
out_res = (out_ptr = v.ptr);
|
||||
|
||||
if (v.counter)
|
||||
{
|
||||
out_ptr = nullptr;
|
||||
}
|
||||
|
||||
v.ptr = nullptr;
|
||||
});
|
||||
|
||||
if (out_ptr)
|
||||
{
|
||||
delete out_ptr;
|
||||
}
|
||||
|
||||
return out_res;
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class ref_t
|
||||
{
|
||||
public:
|
||||
typedef T type, * p_type;
|
||||
typedef refcounter_t<T> * rc_type;
|
||||
|
||||
private:
|
||||
rc_type m_rc;
|
||||
p_type m_ptr;
|
||||
|
||||
public:
|
||||
ref_t()
|
||||
: m_rc(nullptr)
|
||||
, m_ptr(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
ref_t(rc_type rc)
|
||||
: m_rc(rc)
|
||||
, m_ptr(rc->ref_inc())
|
||||
{
|
||||
}
|
||||
|
||||
~ref_t()
|
||||
{
|
||||
if (m_ptr && m_rc->ref_dec())
|
||||
{
|
||||
delete m_ptr;
|
||||
}
|
||||
}
|
||||
|
||||
ref_t(const ref_t& right) = delete;
|
||||
|
||||
ref_t(ref_t&& right_rv)
|
||||
: m_rc(right_rv.m_rc)
|
||||
, m_ptr(right_rv.m_ptr)
|
||||
{
|
||||
right_rv.m_rc = nullptr;
|
||||
right_rv.m_ptr = nullptr;
|
||||
}
|
||||
|
||||
ref_t& operator =(const ref_t& right) = delete;
|
||||
ref_t& operator =(ref_t&& right_rv) = delete;
|
||||
|
||||
public:
|
||||
T& operator *() const
|
||||
{
|
||||
return *m_ptr;
|
||||
}
|
||||
|
||||
T* operator ->() const
|
||||
{
|
||||
return m_ptr;
|
||||
}
|
||||
|
||||
explicit operator bool() const
|
||||
{
|
||||
return m_ptr;
|
||||
}
|
||||
};
|
|
@ -75,7 +75,7 @@ namespace vm
|
|||
void* g_base_addr = (atexit(finalize), initialize());
|
||||
void* g_priv_addr;
|
||||
|
||||
std::array<atomic_le_t<u8>, 0x100000000ull / 4096> g_page_info = {}; // information about every page
|
||||
std::array<atomic<u8>, 0x100000000ull / 4096> g_page_info = {}; // information about every page
|
||||
|
||||
class reservation_mutex_t
|
||||
{
|
||||
|
@ -91,7 +91,7 @@ namespace vm
|
|||
|
||||
bool do_notify;
|
||||
|
||||
__noinline void lock()
|
||||
never_inline void lock()
|
||||
{
|
||||
NamedThreadBase* owner = GetCurrentNamedThread();
|
||||
NamedThreadBase* old = nullptr;
|
||||
|
@ -113,7 +113,7 @@ namespace vm
|
|||
do_notify = true;
|
||||
}
|
||||
|
||||
__noinline void unlock()
|
||||
never_inline void unlock()
|
||||
{
|
||||
NamedThreadBase* owner = GetCurrentNamedThread();
|
||||
|
||||
|
|
|
@ -90,14 +90,14 @@ namespace vm
|
|||
|
||||
u32 get_addr(const void* real_pointer);
|
||||
|
||||
__noinline void error(const u64 addr, const char* func);
|
||||
never_inline void error(const u64 addr, const char* func);
|
||||
|
||||
template<typename T>
|
||||
struct cast_ptr
|
||||
{
|
||||
static_assert(std::is_same<T, u32>::value, "Unsupported vm::cast() type");
|
||||
|
||||
__forceinline static u32 cast(const T& addr, const char* func)
|
||||
force_inline static u32 cast(const T& addr, const char* func)
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
@ -106,7 +106,7 @@ namespace vm
|
|||
template<>
|
||||
struct cast_ptr<u32>
|
||||
{
|
||||
__forceinline static u32 cast(const u32 addr, const char* func)
|
||||
force_inline static u32 cast(const u32 addr, const char* func)
|
||||
{
|
||||
return addr;
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ namespace vm
|
|||
template<>
|
||||
struct cast_ptr<u64>
|
||||
{
|
||||
__forceinline static u32 cast(const u64 addr, const char* func)
|
||||
force_inline static u32 cast(const u64 addr, const char* func)
|
||||
{
|
||||
const u32 res = static_cast<u32>(addr);
|
||||
if (res != addr)
|
||||
|
@ -130,14 +130,14 @@ namespace vm
|
|||
template<typename T, typename T2>
|
||||
struct cast_ptr<be_t<T, T2>>
|
||||
{
|
||||
__forceinline static u32 cast(const be_t<T, T2>& addr, const char* func)
|
||||
force_inline static u32 cast(const be_t<T, T2>& addr, const char* func)
|
||||
{
|
||||
return cast_ptr<T>::cast(addr.value(), func);
|
||||
}
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
__forceinline static u32 cast(const T& addr, const char* func = "vm::cast")
|
||||
force_inline static u32 cast(const T& addr, const char* func = "vm::cast")
|
||||
{
|
||||
return cast_ptr<T>::cast(addr, func);
|
||||
}
|
||||
|
@ -298,10 +298,16 @@ namespace vm
|
|||
u32 alloc_offset;
|
||||
|
||||
template<typename T = char>
|
||||
ptr<T> alloc(u32 count) const
|
||||
ptr<T> alloc(u32 count = 1) const
|
||||
{
|
||||
return ptr<T>::make(allocator(count * sizeof(T)));
|
||||
}
|
||||
|
||||
template<typename T = char>
|
||||
ptr<T> fixed_alloc(u32 addr, u32 count = 1) const
|
||||
{
|
||||
return ptr<T>::make(fixed_allocator(addr, count * sizeof(T)));
|
||||
}
|
||||
};
|
||||
|
||||
extern location_info g_locations[memory_location_count];
|
||||
|
|
|
@ -6,11 +6,10 @@ struct ARMv7Context;
|
|||
namespace vm
|
||||
{
|
||||
template<typename T, int lvl = 1, typename AT = u32>
|
||||
class _ptr_base
|
||||
struct _ptr_base
|
||||
{
|
||||
AT m_addr;
|
||||
|
||||
public:
|
||||
typedef typename std::remove_cv<T>::type type;
|
||||
static const u32 address_size = sizeof(AT);
|
||||
|
||||
|
@ -57,24 +56,26 @@ namespace vm
|
|||
_ptr_base operator - (typename remove_be_t<AT>::type count) const { return make(m_addr - count * address_size); }
|
||||
_ptr_base operator - (typename to_be_t<AT>::type count) const { return make(m_addr - count * address_size); }
|
||||
|
||||
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
__forceinline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
__forceinline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
__forceinline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
__forceinline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
__forceinline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
__forceinline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
__forceinline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
force_inline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
force_inline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
force_inline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
force_inline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
force_inline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
force_inline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
force_inline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
force_inline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
__forceinline _ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>& operator *() const
|
||||
force_inline _ptr_base<T, lvl - 1, typename std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>::type> operator *() const
|
||||
{
|
||||
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>(vm::cast(m_addr));
|
||||
AT addr = convert_le_be<AT>(read64(convert_le_be<u32>(m_addr)));
|
||||
return (_ptr_base<T, lvl - 1, typename std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>::type>&)addr;
|
||||
}
|
||||
|
||||
__forceinline _ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>& operator [](AT index) const
|
||||
force_inline _ptr_base<T, lvl - 1, typename std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>::type> operator [](AT index) const
|
||||
{
|
||||
return vm::get_ref<_ptr_base<T, lvl - 1, std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>>>(vm::cast(m_addr + sizeof(AT)* index));
|
||||
AT addr = convert_le_be<AT>(read64(convert_le_be<u32>(m_addr + 8 * index)));
|
||||
return (_ptr_base<T, lvl - 1, typename std::conditional<is_be_t<T>::value, typename to_be_t<AT>::type, AT>::type>&)addr;
|
||||
}
|
||||
|
||||
template<typename AT2>
|
||||
|
@ -89,35 +90,35 @@ namespace vm
|
|||
return m_addr;
|
||||
}
|
||||
|
||||
void set(const AT value)
|
||||
template<typename U>
|
||||
void set(U&& value)
|
||||
{
|
||||
m_addr = value;
|
||||
m_addr = convert_le_be<AT>(value);
|
||||
}
|
||||
|
||||
static _ptr_base make(const AT& addr)
|
||||
{
|
||||
return reinterpret_cast<_ptr_base&>(addr);
|
||||
return reinterpret_cast<const _ptr_base&>(addr);
|
||||
}
|
||||
|
||||
_ptr_base& operator = (const _ptr_base& right) = default;
|
||||
};
|
||||
|
||||
template<typename T, typename AT>
|
||||
class _ptr_base<T, 1, AT>
|
||||
struct _ptr_base<T, 1, AT>
|
||||
{
|
||||
AT m_addr;
|
||||
|
||||
public:
|
||||
static_assert(!std::is_pointer<T>::value, "vm::_ptr_base<> error: invalid type (pointer)");
|
||||
static_assert(!std::is_reference<T>::value, "vm::_ptr_base<> error: invalid type (reference)");
|
||||
typedef typename std::remove_cv<T>::type type;
|
||||
|
||||
__forceinline static const u32 data_size()
|
||||
force_inline static const u32 data_size()
|
||||
{
|
||||
return sizeof(T);
|
||||
return convert_le_be<AT>(sizeof(T));
|
||||
}
|
||||
|
||||
__forceinline T* const operator -> () const
|
||||
force_inline T* const operator -> () const
|
||||
{
|
||||
return vm::get_ptr<T>(vm::cast(m_addr));
|
||||
}
|
||||
|
@ -160,34 +161,34 @@ namespace vm
|
|||
return *this;
|
||||
}
|
||||
|
||||
_ptr_base operator + (typename remove_be_t<AT>::type count) const { return make(m_addr + count * data_size()); }
|
||||
_ptr_base operator + (typename to_be_t<AT>::type count) const { return make(m_addr + count * data_size()); }
|
||||
_ptr_base operator - (typename remove_be_t<AT>::type count) const { return make(m_addr - count * data_size()); }
|
||||
_ptr_base operator - (typename to_be_t<AT>::type count) const { return make(m_addr - count * data_size()); }
|
||||
_ptr_base operator + (typename remove_be_t<AT>::type count) const { return make(convert_le_be<AT>(convert_le_be<decltype(count)>(m_addr) + count * convert_le_be<decltype(count)>(data_size()))); }
|
||||
_ptr_base operator + (typename to_be_t<AT>::type count) const { return make(convert_le_be<AT>(convert_le_be<decltype(count)>(m_addr) + count * convert_le_be<decltype(count)>(data_size()))); }
|
||||
_ptr_base operator - (typename remove_be_t<AT>::type count) const { return make(convert_le_be<AT>(convert_le_be<decltype(count)>(m_addr) - count * convert_le_be<decltype(count)>(data_size()))); }
|
||||
_ptr_base operator - (typename to_be_t<AT>::type count) const { return make(convert_le_be<AT>(convert_le_be<decltype(count)>(m_addr) - count * convert_le_be<decltype(count)>(data_size()))); }
|
||||
|
||||
__forceinline T& operator *() const
|
||||
force_inline T& operator *() const
|
||||
{
|
||||
return vm::get_ref<T>(vm::cast(m_addr));
|
||||
}
|
||||
|
||||
__forceinline T& operator [](typename remove_be_t<AT>::type index) const
|
||||
force_inline T& operator [](typename remove_be_t<AT>::type index) const
|
||||
{
|
||||
return vm::get_ref<T>(vm::cast(m_addr + data_size() * index));
|
||||
}
|
||||
|
||||
__forceinline T& operator [](typename to_be_t<AT>::forced_type index) const
|
||||
force_inline T& operator [](typename to_be_t<AT>::forced_type index) const
|
||||
{
|
||||
return vm::get_ref<T>(vm::cast(m_addr + data_size() * index));
|
||||
}
|
||||
|
||||
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
__forceinline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
__forceinline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
__forceinline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
__forceinline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
__forceinline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
__forceinline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
__forceinline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
force_inline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
force_inline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
force_inline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
force_inline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
force_inline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
force_inline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
force_inline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
force_inline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
explicit operator bool() const { return m_addr != 0; }
|
||||
explicit operator T*() const { return get_ptr(); }
|
||||
|
||||
|
@ -228,19 +229,19 @@ namespace vm
|
|||
};
|
||||
|
||||
template<typename AT>
|
||||
class _ptr_base<void, 1, AT>
|
||||
struct _ptr_base<void, 1, AT>
|
||||
{
|
||||
AT m_addr;
|
||||
|
||||
public:
|
||||
AT addr() const
|
||||
{
|
||||
return m_addr;
|
||||
}
|
||||
|
||||
void set(const AT value)
|
||||
template<typename U>
|
||||
void set(U&& value)
|
||||
{
|
||||
m_addr = value;
|
||||
m_addr = convert_le_be<AT>(value);
|
||||
}
|
||||
|
||||
void* get_ptr() const
|
||||
|
@ -258,14 +259,14 @@ namespace vm
|
|||
return get_ptr();
|
||||
}
|
||||
|
||||
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
__forceinline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
__forceinline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
__forceinline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
__forceinline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
__forceinline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
__forceinline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
__forceinline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
force_inline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
force_inline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
force_inline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
force_inline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
force_inline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
force_inline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
force_inline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
force_inline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
template<typename AT2>
|
||||
|
@ -291,19 +292,19 @@ namespace vm
|
|||
};
|
||||
|
||||
template<typename AT>
|
||||
class _ptr_base<const void, 1, AT>
|
||||
struct _ptr_base<const void, 1, AT>
|
||||
{
|
||||
AT m_addr;
|
||||
|
||||
public:
|
||||
AT addr() const
|
||||
{
|
||||
return m_addr;
|
||||
}
|
||||
|
||||
void set(const AT value)
|
||||
template<typename U>
|
||||
void set(U&& value)
|
||||
{
|
||||
m_addr = value;
|
||||
m_addr = convert_le_be<AT>(value);
|
||||
}
|
||||
|
||||
const void* get_ptr() const
|
||||
|
@ -321,14 +322,14 @@ namespace vm
|
|||
return get_ptr();
|
||||
}
|
||||
|
||||
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
__forceinline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
__forceinline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
__forceinline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
__forceinline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
__forceinline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
__forceinline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
__forceinline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
force_inline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
force_inline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
force_inline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
force_inline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
force_inline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
force_inline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
force_inline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
force_inline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
template<typename AT2>
|
||||
|
@ -347,37 +348,40 @@ namespace vm
|
|||
};
|
||||
|
||||
template<typename AT, typename RT, typename ...T>
|
||||
class _ptr_base<RT(T...), 1, AT>
|
||||
struct _ptr_base<RT(T...), 1, AT>
|
||||
{
|
||||
AT m_addr;
|
||||
|
||||
public:
|
||||
typedef RT(type)(T...);
|
||||
|
||||
RT operator()(PPUThread& CPU, T... args) const; // defined in CB_FUNC.h, call using specified PPU thread context
|
||||
// defined in CB_FUNC.h, call using specified PPU thread context
|
||||
RT operator()(PPUThread& CPU, T... args) const;
|
||||
|
||||
RT operator()(ARMv7Context& context, T... args) const; // defined in ARMv7Callback.h, passing context is mandatory
|
||||
// defined in ARMv7Callback.h, passing context is mandatory
|
||||
RT operator()(ARMv7Context& context, T... args) const;
|
||||
|
||||
RT operator()(T... args) const; // defined in CB_FUNC.h, call using current PPU thread context
|
||||
// defined in CB_FUNC.h, call using current PPU thread context
|
||||
RT operator()(T... args) const;
|
||||
|
||||
AT addr() const
|
||||
{
|
||||
return m_addr;
|
||||
}
|
||||
|
||||
void set(const AT value)
|
||||
template<typename U>
|
||||
void set(U&& value)
|
||||
{
|
||||
m_addr = value;
|
||||
m_addr = convert_le_be<AT>(value);
|
||||
}
|
||||
|
||||
__forceinline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
__forceinline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
__forceinline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
__forceinline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
__forceinline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
__forceinline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
__forceinline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
__forceinline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
force_inline bool operator <(const _ptr_base& right) const { return m_addr < right.m_addr; }
|
||||
force_inline bool operator <=(const _ptr_base& right) const { return m_addr <= right.m_addr; }
|
||||
force_inline bool operator >(const _ptr_base& right) const { return m_addr > right.m_addr; }
|
||||
force_inline bool operator >=(const _ptr_base& right) const { return m_addr >= right.m_addr; }
|
||||
force_inline bool operator ==(const _ptr_base& right) const { return m_addr == right.m_addr; }
|
||||
force_inline bool operator !=(const _ptr_base& right) const { return m_addr != right.m_addr; }
|
||||
force_inline bool operator ==(const nullptr_t& right) const { return m_addr == 0; }
|
||||
force_inline bool operator !=(const nullptr_t& right) const { return m_addr != 0; }
|
||||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
template<typename AT2>
|
||||
|
@ -402,39 +406,47 @@ namespace vm
|
|||
};
|
||||
|
||||
template<typename AT, typename RT, typename ...T>
|
||||
class _ptr_base<RT(*)(T...), 1, AT>
|
||||
struct _ptr_base<RT(*)(T...), 1, AT>
|
||||
{
|
||||
AT m_addr;
|
||||
|
||||
public:
|
||||
static_assert(!sizeof(AT), "vm::_ptr_base<> error: use RT(T...) format for functions instead of RT(*)(T...)");
|
||||
};
|
||||
|
||||
// Native endianness pointer to LE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using ptrl = _ptr_base<typename to_le_t<T>::type, lvl, AT>;
|
||||
|
||||
// Native endianness pointer to BE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using ptrb = _ptr_base<typename to_be_t<T>::type, lvl, AT>;
|
||||
|
||||
// BE pointer to LE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using bptrl = _ptr_base<T, lvl, typename to_be_t<AT>::type>;
|
||||
template<typename T, int lvl = 1, typename AT = u32> using bptrl = _ptr_base<typename to_le_t<T>::type, lvl, typename to_be_t<AT>::type>;
|
||||
|
||||
// BE pointer to BE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using bptrb = _ptr_base<typename to_be_t<T>::type, lvl, typename to_be_t<AT>::type>;
|
||||
|
||||
// LE pointer to BE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using lptrb = _ptr_base<typename to_be_t<T>::type, lvl, AT>;
|
||||
|
||||
// LE pointer to LE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using lptrl = _ptr_base<T, lvl, AT>;
|
||||
template<typename T, int lvl = 1, typename AT = u32> using lptrl = _ptr_base<typename to_le_t<T>::type, lvl, typename to_le_t<AT>::type>;
|
||||
|
||||
// LE pointer to BE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> using lptrb = _ptr_base<typename to_be_t<T>::type, lvl, typename to_le_t<AT>::type>;
|
||||
|
||||
namespace ps3
|
||||
{
|
||||
// default pointer for HLE functions (LE pointer to BE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> using ptr = lptrb<T, lvl, AT>;
|
||||
// default pointer for PS3 HLE functions (Native endianness pointer to BE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> using ptr = ptrb<T, lvl, AT>;
|
||||
|
||||
// default pointer for HLE structures (BE pointer to BE data)
|
||||
// default pointer for PS3 HLE structures (BE pointer to BE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> using bptr = bptrb<T, lvl, AT>;
|
||||
}
|
||||
|
||||
namespace psv
|
||||
{
|
||||
// default pointer for HLE functions & structures (LE pointer to LE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> using ptr = lptrl<T, lvl, AT>;
|
||||
// default pointer for PSV HLE functions (Native endianness pointer to LE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> using ptr = ptrl<T, lvl, AT>;
|
||||
|
||||
// default pointer for PSV HLE structures (LE pointer to LE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> using lptr = lptrl<T, lvl, AT>;
|
||||
}
|
||||
|
||||
// PS3 emulation is main now, so lets it be as default
|
||||
|
@ -462,7 +474,7 @@ namespace fmt
|
|||
{
|
||||
typedef typename unveil<AT>::result_type result_type;
|
||||
|
||||
__forceinline static result_type get_value(const vm::_ptr_base<T, lvl, AT>& arg)
|
||||
force_inline static result_type get_value(const vm::_ptr_base<T, lvl, AT>& arg)
|
||||
{
|
||||
return unveil<AT>::get_value(arg.addr());
|
||||
}
|
||||
|
@ -477,12 +489,12 @@ struct cast_ppu_gpr;
|
|||
template<typename T, int lvl, typename AT>
|
||||
struct cast_ppu_gpr<vm::_ptr_base<T, lvl, AT>, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const vm::_ptr_base<T, lvl, AT>& value)
|
||||
force_inline static u64 to_gpr(const vm::_ptr_base<T, lvl, AT>& value)
|
||||
{
|
||||
return cast_ppu_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ptr_base<T, lvl, AT> from_gpr(const u64 reg)
|
||||
force_inline static vm::_ptr_base<T, lvl, AT> from_gpr(const u64 reg)
|
||||
{
|
||||
return vm::_ptr_base<T, lvl, AT>::make(cast_ppu_gpr<AT, std::is_enum<AT>::value>::from_gpr(reg));
|
||||
}
|
||||
|
@ -496,12 +508,12 @@ struct cast_armv7_gpr;
|
|||
template<typename T, int lvl, typename AT>
|
||||
struct cast_armv7_gpr<vm::_ptr_base<T, lvl, AT>, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const vm::_ptr_base<T, lvl, AT>& value)
|
||||
force_inline static u32 to_gpr(const vm::_ptr_base<T, lvl, AT>& value)
|
||||
{
|
||||
return cast_armv7_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ptr_base<T, lvl, AT> from_gpr(const u32 reg)
|
||||
force_inline static vm::_ptr_base<T, lvl, AT> from_gpr(const u32 reg)
|
||||
{
|
||||
return vm::_ptr_base<T, lvl, AT>::make(cast_armv7_gpr<AT, std::is_enum<AT>::value>::from_gpr(reg));
|
||||
}
|
||||
|
|
|
@ -60,31 +60,40 @@ namespace vm
|
|||
}
|
||||
};
|
||||
|
||||
//BE reference to LE data
|
||||
template<typename T, typename AT = u32> using brefl = _ref_base<T, typename to_be_t<AT>::type>;
|
||||
// Native endianness reference to LE data
|
||||
template<typename T, typename AT = u32> using refl = _ref_base<typename to_le_t<T>::type, AT>;
|
||||
|
||||
//BE reference to BE data
|
||||
// Native endianness reference to BE data
|
||||
template<typename T, typename AT = u32> using refb = _ref_base<typename to_be_t<T>::type, AT>;
|
||||
|
||||
// BE reference to LE data
|
||||
template<typename T, typename AT = u32> using brefl = _ref_base<typename to_le_t<T>::type, typename to_be_t<AT>::type>;
|
||||
|
||||
// BE reference to BE data
|
||||
template<typename T, typename AT = u32> using brefb = _ref_base<typename to_be_t<T>::type, typename to_be_t<AT>::type>;
|
||||
|
||||
//LE reference to BE data
|
||||
template<typename T, typename AT = u32> using lrefb = _ref_base<typename to_be_t<T>::type, AT>;
|
||||
// LE reference to LE data
|
||||
template<typename T, typename AT = u32> using lrefl = _ref_base<typename to_le_t<T>::type, typename to_le_t<AT>::type>;
|
||||
|
||||
//LE reference to LE data
|
||||
template<typename T, typename AT = u32> using lrefl = _ref_base<T, AT>;
|
||||
// LE reference to BE data
|
||||
template<typename T, typename AT = u32> using lrefb = _ref_base<typename to_be_t<T>::type, typename to_le_t<AT>::type>;
|
||||
|
||||
namespace ps3
|
||||
{
|
||||
//default reference for HLE functions (LE reference to BE data)
|
||||
template<typename T, typename AT = u32> using ref = lrefb<T, AT>;
|
||||
// default reference for PS3 HLE functions (Native endianness reference to BE data)
|
||||
template<typename T, typename AT = u32> using ref = refb<T, AT>;
|
||||
|
||||
//default reference for HLE structures (BE reference to BE data)
|
||||
// default reference for PS3 HLE structures (BE reference to BE data)
|
||||
template<typename T, typename AT = u32> using bref = brefb<T, AT>;
|
||||
}
|
||||
|
||||
namespace psv
|
||||
{
|
||||
//default reference for HLE functions & structures (LE reference to LE data)
|
||||
template<typename T, typename AT = u32> using ref = lrefl<T, AT>;
|
||||
// default reference for PSV HLE functions (Native endianness reference to LE data)
|
||||
template<typename T, typename AT = u32> using ref = refl<T, AT>;
|
||||
|
||||
// default reference for PSV HLE structures (LE reference to LE data)
|
||||
template<typename T, typename AT = u32> using lref = lrefl<T, AT>;
|
||||
}
|
||||
|
||||
//PS3 emulation is main now, so lets it be as default
|
||||
|
@ -100,7 +109,7 @@ namespace fmt
|
|||
{
|
||||
typedef typename unveil<AT>::result_type result_type;
|
||||
|
||||
__forceinline static result_type get_value(const vm::_ref_base<T, AT>& arg)
|
||||
force_inline static result_type get_value(const vm::_ref_base<T, AT>& arg)
|
||||
{
|
||||
return unveil<AT>::get_value(arg.addr());
|
||||
}
|
||||
|
@ -115,12 +124,12 @@ struct cast_ppu_gpr;
|
|||
template<typename T, typename AT>
|
||||
struct cast_ppu_gpr<vm::_ref_base<T, AT>, false>
|
||||
{
|
||||
__forceinline static u64 to_gpr(const vm::_ref_base<T, AT>& value)
|
||||
force_inline static u64 to_gpr(const vm::_ref_base<T, AT>& value)
|
||||
{
|
||||
return cast_ppu_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ref_base<T, AT> from_gpr(const u64 reg)
|
||||
force_inline static vm::_ref_base<T, AT> from_gpr(const u64 reg)
|
||||
{
|
||||
return vm::_ref_base<T, AT>::make(cast_ppu_gpr<AT, std::is_enum<AT>::value>::from_gpr(reg));
|
||||
}
|
||||
|
@ -134,12 +143,12 @@ struct cast_armv7_gpr;
|
|||
template<typename T, typename AT>
|
||||
struct cast_armv7_gpr<vm::_ref_base<T, AT>, false>
|
||||
{
|
||||
__forceinline static u32 to_gpr(const vm::_ref_base<T, AT>& value)
|
||||
force_inline static u32 to_gpr(const vm::_ref_base<T, AT>& value)
|
||||
{
|
||||
return cast_armv7_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ref_base<T, AT> from_gpr(const u32 reg)
|
||||
force_inline static vm::_ref_base<T, AT> from_gpr(const u32 reg)
|
||||
{
|
||||
return vm::_ref_base<T, AT>::make(cast_armv7_gpr<AT, std::is_enum<AT>::value>::from_gpr(reg));
|
||||
}
|
||||
|
|
|
@ -425,7 +425,7 @@ namespace vm
|
|||
return m_addr;
|
||||
}
|
||||
|
||||
__forceinline uint count() const
|
||||
force_inline uint count() const
|
||||
{
|
||||
return _count;
|
||||
}
|
||||
|
|
|
@ -201,9 +201,9 @@ enum
|
|||
|
||||
struct CellGcmControl
|
||||
{
|
||||
atomic_t<u32> put;
|
||||
atomic_t<u32> get;
|
||||
atomic_t<u32> ref;
|
||||
atomic_be_t<u32> put;
|
||||
atomic_be_t<u32> get;
|
||||
atomic_be_t<u32> ref;
|
||||
};
|
||||
|
||||
struct CellGcmConfig
|
||||
|
|
|
@ -24,7 +24,7 @@ namespace cb_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 8, "Invalid callback argument type for ARG_GENERAL");
|
||||
|
||||
__forceinline static void set_value(PPUThread& CPU, const T& arg)
|
||||
force_inline static void set_value(PPUThread& CPU, const T& arg)
|
||||
{
|
||||
CPU.GPR[g_count + 2] = cast_to_ppu_gpr<T>(arg);
|
||||
}
|
||||
|
@ -35,7 +35,7 @@ namespace cb_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 8, "Invalid callback argument type for ARG_FLOAT");
|
||||
|
||||
__forceinline static void set_value(PPUThread& CPU, const T& arg)
|
||||
force_inline static void set_value(PPUThread& CPU, const T& arg)
|
||||
{
|
||||
CPU.FPR[f_count] = static_cast<T>(arg);
|
||||
}
|
||||
|
@ -46,7 +46,7 @@ namespace cb_detail
|
|||
{
|
||||
static_assert(std::is_same<T, u128>::value, "Invalid callback argument type for ARG_VECTOR");
|
||||
|
||||
__forceinline static void set_value(PPUThread& CPU, const T& arg)
|
||||
force_inline static void set_value(PPUThread& CPU, const T& arg)
|
||||
{
|
||||
CPU.VPR[v_count + 1] = arg;
|
||||
}
|
||||
|
@ -59,7 +59,7 @@ namespace cb_detail
|
|||
static_assert(v_count <= 12, "TODO: Unsupported stack argument type (vector)");
|
||||
static_assert(sizeof(T) <= 8, "Invalid callback argument type for ARG_STACK");
|
||||
|
||||
__forceinline static void set_value(PPUThread& CPU, const T& arg)
|
||||
force_inline static void set_value(PPUThread& CPU, const T& arg)
|
||||
{
|
||||
const int stack_pos = (g_count - 9) * 8 - FIXED_STACK_FRAME_SIZE;
|
||||
static_assert(stack_pos < 0, "TODO: Increase fixed stack frame size (arg count limit broken)");
|
||||
|
@ -68,14 +68,14 @@ namespace cb_detail
|
|||
};
|
||||
|
||||
template<int g_count, int f_count, int v_count>
|
||||
__forceinline static bool _bind_func_args(PPUThread& CPU)
|
||||
force_inline static bool _bind_func_args(PPUThread& CPU)
|
||||
{
|
||||
// terminator
|
||||
return false;
|
||||
}
|
||||
|
||||
template<int g_count, int f_count, int v_count, typename T1, typename... T>
|
||||
__forceinline static bool _bind_func_args(PPUThread& CPU, T1 arg1, T... args)
|
||||
force_inline static bool _bind_func_args(PPUThread& CPU, T1 arg1, T... args)
|
||||
{
|
||||
static_assert(!std::is_pointer<T1>::value, "Invalid callback argument type (pointer)");
|
||||
static_assert(!std::is_reference<T1>::value, "Invalid callback argument type (reference)");
|
||||
|
@ -99,7 +99,7 @@ namespace cb_detail
|
|||
static_assert(type == ARG_GENERAL, "Wrong use of _func_res template");
|
||||
static_assert(sizeof(T) <= 8, "Invalid callback result type for ARG_GENERAL");
|
||||
|
||||
__forceinline static T get_value(const PPUThread& CPU)
|
||||
force_inline static T get_value(const PPUThread& CPU)
|
||||
{
|
||||
return cast_from_ppu_gpr<T>(CPU.GPR[3]);
|
||||
}
|
||||
|
@ -110,7 +110,7 @@ namespace cb_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 8, "Invalid callback result type for ARG_FLOAT");
|
||||
|
||||
__forceinline static T get_value(const PPUThread& CPU)
|
||||
force_inline static T get_value(const PPUThread& CPU)
|
||||
{
|
||||
return static_cast<T>(CPU.FPR[1]);
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ namespace cb_detail
|
|||
{
|
||||
static_assert(std::is_same<T, u128>::value, "Invalid callback result type for ARG_VECTOR");
|
||||
|
||||
__forceinline static T get_value(const PPUThread& CPU)
|
||||
force_inline static T get_value(const PPUThread& CPU)
|
||||
{
|
||||
return CPU.VPR[2];
|
||||
}
|
||||
|
@ -130,7 +130,7 @@ namespace cb_detail
|
|||
template<typename RT, typename... T>
|
||||
struct _func_caller
|
||||
{
|
||||
__forceinline static RT call(PPUThread& CPU, u32 pc, u32 rtoc, T... args)
|
||||
force_inline static RT call(PPUThread& CPU, u32 pc, u32 rtoc, T... args)
|
||||
{
|
||||
_func_caller<void, T...>::call(CPU, pc, rtoc, args...);
|
||||
|
||||
|
@ -147,7 +147,7 @@ namespace cb_detail
|
|||
template<typename... T>
|
||||
struct _func_caller<void, T...>
|
||||
{
|
||||
__forceinline static void call(PPUThread& CPU, u32 pc, u32 rtoc, T... args)
|
||||
force_inline static void call(PPUThread& CPU, u32 pc, u32 rtoc, T... args)
|
||||
{
|
||||
const bool stack = _bind_func_args<0, 0, 0, T...>(CPU, args...);
|
||||
if (stack) CPU.GPR[1] -= FIXED_STACK_FRAME_SIZE;
|
||||
|
@ -162,7 +162,7 @@ namespace cb_detail
|
|||
namespace vm
|
||||
{
|
||||
template<typename AT, typename RT, typename... T>
|
||||
__forceinline RT _ptr_base<RT(T...), 1, AT>::operator()(PPUThread& CPU, T... args) const
|
||||
force_inline RT _ptr_base<RT(T...), 1, AT>::operator()(PPUThread& CPU, T... args) const
|
||||
{
|
||||
const auto data = vm::get_ptr<be_t<u32>>(vm::cast(m_addr));
|
||||
const u32 pc = data[0];
|
||||
|
@ -172,14 +172,14 @@ namespace vm
|
|||
}
|
||||
|
||||
template<typename AT, typename RT, typename... T>
|
||||
__forceinline RT _ptr_base<RT(T...), 1, AT>::operator()(T... args) const
|
||||
force_inline RT _ptr_base<RT(T...), 1, AT>::operator()(T... args) const
|
||||
{
|
||||
return operator()(GetCurrentPPUThread(), args...);
|
||||
}
|
||||
}
|
||||
|
||||
template<typename RT, typename... T>
|
||||
__forceinline RT cb_call(PPUThread& CPU, u32 pc, u32 rtoc, T... args)
|
||||
force_inline RT cb_call(PPUThread& CPU, u32 pc, u32 rtoc, T... args)
|
||||
{
|
||||
return cb_detail::_func_caller<RT, T...>::call(CPU, pc, rtoc, args...);
|
||||
}
|
||||
|
|
|
@ -17,8 +17,7 @@ class LogBase
|
|||
|
||||
void LogOutput(LogType type, const std::string& text) const;
|
||||
|
||||
template<typename... Targs>
|
||||
__noinline void LogPrepare(LogType type, const char* fmt, Targs... args) const
|
||||
template<typename... Args> never_inline void LogPrepare(LogType type, const char* fmt, Args... args) const
|
||||
{
|
||||
LogOutput(type, fmt::Format(fmt, args...));
|
||||
}
|
||||
|
@ -36,14 +35,12 @@ public:
|
|||
|
||||
virtual const std::string& GetName() const = 0;
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Notice(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Notice(const char* fmt, Args... args) const
|
||||
{
|
||||
LogPrepare(LogNotice, fmt, fmt::do_unveil(args)...);
|
||||
}
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Log(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Log(const char* fmt, Args... args) const
|
||||
{
|
||||
if (CheckLogging())
|
||||
{
|
||||
|
@ -51,32 +48,27 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Success(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Success(const char* fmt, Args... args) const
|
||||
{
|
||||
LogPrepare(LogSuccess, fmt, fmt::do_unveil(args)...);
|
||||
}
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Warning(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Warning(const char* fmt, Args... args) const
|
||||
{
|
||||
LogPrepare(LogWarning, fmt, fmt::do_unveil(args)...);
|
||||
}
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Error(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Error(const char* fmt, Args... args) const
|
||||
{
|
||||
LogPrepare(LogError, fmt, fmt::do_unveil(args)...);
|
||||
}
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Fatal(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Fatal(const char* fmt, Args... args) const
|
||||
{
|
||||
LogPrepare(LogFatal, fmt, fmt::do_unveil(args)...);
|
||||
}
|
||||
|
||||
template<typename... Targs>
|
||||
__forceinline void Todo(const char* fmt, Targs... args) const
|
||||
template<typename... Args> force_inline void Todo(const char* fmt, Args... args) const
|
||||
{
|
||||
LogPrepare(LogTodo, fmt, fmt::do_unveil(args)...);
|
||||
}
|
||||
|
|
|
@ -15,9 +15,19 @@ std::vector<StaticFunc> g_ppu_func_subs;
|
|||
|
||||
u32 add_ppu_func(ModuleFunc func)
|
||||
{
|
||||
if (g_ppu_func_list.empty())
|
||||
{
|
||||
// prevent relocations if the array growths, must be sizeof(ModuleFunc) * 0x8000 ~~ about 1 MB of memory
|
||||
g_ppu_func_list.reserve(0x8000);
|
||||
}
|
||||
|
||||
for (auto& f : g_ppu_func_list)
|
||||
{
|
||||
assert(f.id != func.id);
|
||||
if (f.id == func.id)
|
||||
{
|
||||
// TODO: if NIDs overlap or if the same function is added twice
|
||||
assert(!"add_ppu_func(): NID already exists");
|
||||
}
|
||||
}
|
||||
|
||||
g_ppu_func_list.push_back(func);
|
||||
|
@ -93,15 +103,56 @@ void execute_ppu_func_by_index(PPUThread& CPU, u32 index)
|
|||
{
|
||||
if (auto func = get_ppu_func_by_index(index))
|
||||
{
|
||||
auto old_last_syscall = CPU.m_last_syscall;
|
||||
CPU.m_last_syscall = func->id;
|
||||
|
||||
// save RTOC if necessary
|
||||
if (index & EIF_SAVE_RTOC)
|
||||
{
|
||||
// save RTOC if necessary
|
||||
vm::write64(vm::cast(CPU.GPR[1] + 0x28), CPU.GPR[2]);
|
||||
}
|
||||
|
||||
// save old syscall/NID value
|
||||
auto old_last_syscall = CPU.m_last_syscall;
|
||||
|
||||
// branch directly to the LLE function
|
||||
if (index & EIF_USE_BRANCH)
|
||||
{
|
||||
// for example, FastCall2 can't work with functions which do user level context switch
|
||||
|
||||
if (old_last_syscall)
|
||||
{
|
||||
throw "Unfortunately, this function cannot be called from the callback.";
|
||||
}
|
||||
|
||||
if (!func->lle_func)
|
||||
{
|
||||
throw "Wrong usage: LLE function not set.";
|
||||
}
|
||||
|
||||
if (func->flags & MFF_FORCED_HLE)
|
||||
{
|
||||
throw "Wrong usage: Forced HLE enabled.";
|
||||
}
|
||||
|
||||
if (Ini.HLELogging.GetValue())
|
||||
{
|
||||
LOG_NOTICE(HLE, "Branch to LLE function: %s", SysCalls::GetFuncName(func->id));
|
||||
}
|
||||
|
||||
if (index & EIF_PERFORM_BLR)
|
||||
{
|
||||
throw "TODO: Branch with link";
|
||||
// CPU.LR = CPU.PC + 4;
|
||||
}
|
||||
|
||||
const auto data = vm::get_ptr<be_t<u32>>(func->lle_func.addr());
|
||||
CPU.SetBranch(data[0]);
|
||||
CPU.GPR[2] = data[1]; // set rtoc
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
// change current syscall/NID value
|
||||
CPU.m_last_syscall = func->id;
|
||||
|
||||
if (func->lle_func && !(func->flags & MFF_FORCED_HLE))
|
||||
{
|
||||
// call LLE function if available
|
||||
|
@ -427,6 +478,15 @@ bool patch_ppu_import(u32 addr, u32 index)
|
|||
|
||||
using namespace PPU_instr;
|
||||
|
||||
if (index >= g_ppu_func_list.size())
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
const u32 imm = (g_ppu_func_list[index].flags & MFF_NO_RETURN) && !(g_ppu_func_list[index].flags & MFF_FORCED_HLE)
|
||||
? index | EIF_USE_BRANCH
|
||||
: index | EIF_PERFORM_BLR;
|
||||
|
||||
// check different patterns:
|
||||
|
||||
if (vm::check_addr(addr, 32) &&
|
||||
|
@ -439,7 +499,7 @@ bool patch_ppu_import(u32 addr, u32 index)
|
|||
data[6] == MTCTR(r0) &&
|
||||
data[7] == BCTR())
|
||||
{
|
||||
vm::write32(addr, HACK(index | EIF_SAVE_RTOC | EIF_PERFORM_BLR));
|
||||
vm::write32(addr, HACK(imm | EIF_SAVE_RTOC));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -467,7 +527,7 @@ bool patch_ppu_import(u32 addr, u32 index)
|
|||
sub[0xd] == MTLR(r0) &&
|
||||
sub[0xe] == BLR())
|
||||
{
|
||||
vm::write32(addr, HACK(index | EIF_PERFORM_BLR));
|
||||
vm::write32(addr, HACK(imm));
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -490,7 +550,7 @@ bool patch_ppu_import(u32 addr, u32 index)
|
|||
data[0xe] == MTLR(r0) &&
|
||||
data[0xf] == BLR())
|
||||
{
|
||||
vm::write32(addr, HACK(index | EIF_PERFORM_BLR));
|
||||
vm::write32(addr, HACK(imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -511,7 +571,7 @@ bool patch_ppu_import(u32 addr, u32 index)
|
|||
data[0xd] == MTLR(r0) &&
|
||||
data[0xe] == BLR())
|
||||
{
|
||||
vm::write32(addr, HACK(index | EIF_PERFORM_BLR));
|
||||
vm::write32(addr, HACK(imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -531,10 +591,11 @@ bool patch_ppu_import(u32 addr, u32 index)
|
|||
data[0xc] == LD(r2, r1, 0x28) &&
|
||||
data[0xd] == BLR())
|
||||
{
|
||||
vm::write32(addr, HACK(index | EIF_PERFORM_BLR));
|
||||
vm::write32(addr, HACK(imm));
|
||||
return true;
|
||||
}
|
||||
|
||||
//vm::write32(addr, HACK(imm));
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ class Module;
|
|||
enum : u32
|
||||
{
|
||||
MFF_FORCED_HLE = (1 << 0), // always call HLE function
|
||||
MFF_NO_RETURN = (1 << 1), // uses EIF_USE_BRANCH flag with LLE, ignored with MFF_FORCED_HLE
|
||||
};
|
||||
|
||||
// flags passed with index
|
||||
|
@ -16,8 +17,9 @@ enum : u32
|
|||
{
|
||||
EIF_SAVE_RTOC = (1 << 25), // save RTOC in [SP+0x28] before calling HLE/LLE function
|
||||
EIF_PERFORM_BLR = (1 << 24), // do BLR after calling HLE/LLE function
|
||||
EIF_USE_BRANCH = (1 << 23), // do only branch, LLE must be set, last_syscall must be zero
|
||||
|
||||
EIF_FLAGS = 0x3000000, // all flags
|
||||
EIF_FLAGS = 0x3800000, // all flags
|
||||
};
|
||||
|
||||
struct ModuleFunc
|
||||
|
@ -121,6 +123,7 @@ bool patch_ppu_import(u32 addr, u32 index);
|
|||
|
||||
#define REG_FUNC(module, name) add_ppu_func(ModuleFunc(get_function_id(#name), 0, &module, #name, bind_func(name)))
|
||||
#define REG_FUNC_FH(module, name) add_ppu_func(ModuleFunc(get_function_id(#name), MFF_FORCED_HLE, &module, #name, bind_func(name)))
|
||||
#define REG_FUNC_NR(module, name) add_ppu_func(ModuleFunc(get_function_id(#name), MFF_NO_RETURN, &module, #name, bind_func(name)))
|
||||
|
||||
#define REG_UNNAMED(module, nid) add_ppu_func(ModuleFunc(0x##nid, 0, &module, "_nid_"#nid, bind_func(_nid_##nid)))
|
||||
|
||||
|
|
|
@ -216,12 +216,10 @@ next:
|
|||
}
|
||||
}
|
||||
|
||||
u32 adecOpen(AudioDecoder* adec_ptr)
|
||||
void adecOpen(u32 adec_id) // TODO: call from the constructor
|
||||
{
|
||||
std::shared_ptr<AudioDecoder> sptr(adec_ptr);
|
||||
AudioDecoder& adec = *adec_ptr;
|
||||
|
||||
u32 adec_id = Emu.GetIdManager().GetNewID(sptr);
|
||||
const auto sptr = Emu.GetIdManager().get<AudioDecoder>(adec_id);
|
||||
AudioDecoder& adec = *sptr;
|
||||
|
||||
adec.id = adec_id;
|
||||
|
||||
|
@ -234,9 +232,9 @@ u32 adecOpen(AudioDecoder* adec_ptr)
|
|||
adec.adecCb->InitRegs();
|
||||
adec.adecCb->DoRun();
|
||||
|
||||
thread_t t(fmt::format("AudioDecoder[0x%x] Thread", adec_id), [adec_ptr, sptr]()
|
||||
thread_t t(fmt::format("AudioDecoder[0x%x] Thread", adec_id), [sptr]()
|
||||
{
|
||||
AudioDecoder& adec = *adec_ptr;
|
||||
AudioDecoder& adec = *sptr;
|
||||
AdecTask& task = adec.task;
|
||||
|
||||
while (true)
|
||||
|
@ -478,8 +476,6 @@ u32 adecOpen(AudioDecoder* adec_ptr)
|
|||
|
||||
adec.is_finished = true;
|
||||
});
|
||||
|
||||
return adec_id;
|
||||
}
|
||||
|
||||
bool adecCheckType(AudioCodecType type)
|
||||
|
@ -536,7 +532,7 @@ s32 cellAdecOpen(vm::ptr<CellAdecType> type, vm::ptr<CellAdecResource> res, vm::
|
|||
return CELL_ADEC_ERROR_ARG;
|
||||
}
|
||||
|
||||
*handle = adecOpen(new AudioDecoder(type->audioCodecType, res->startAddr, res->totalMemSize, cb->cbFunc, cb->cbArg));
|
||||
adecOpen(*handle = Emu.GetIdManager().make<AudioDecoder>(type->audioCodecType, res->startAddr, res->totalMemSize, cb->cbFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -550,7 +546,7 @@ s32 cellAdecOpenEx(vm::ptr<CellAdecType> type, vm::ptr<CellAdecResourceEx> res,
|
|||
return CELL_ADEC_ERROR_ARG;
|
||||
}
|
||||
|
||||
*handle = adecOpen(new AudioDecoder(type->audioCodecType, res->startAddr, res->totalMemSize, cb->cbFunc, cb->cbArg));
|
||||
adecOpen(*handle = Emu.GetIdManager().make<AudioDecoder>(type->audioCodecType, res->startAddr, res->totalMemSize, cb->cbFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -564,7 +560,7 @@ s32 cellAdecClose(u32 handle)
|
|||
{
|
||||
cellAdec.Warning("cellAdecClose(handle=0x%x)", handle);
|
||||
|
||||
const auto adec = Emu.GetIdManager().GetIDData<AudioDecoder>(handle);
|
||||
const auto adec = Emu.GetIdManager().get<AudioDecoder>(handle);
|
||||
|
||||
if (!adec)
|
||||
{
|
||||
|
@ -585,7 +581,7 @@ s32 cellAdecClose(u32 handle)
|
|||
}
|
||||
|
||||
if (adec->adecCb) Emu.GetCPU().RemoveThread(adec->adecCb->GetId());
|
||||
Emu.GetIdManager().RemoveID<AudioDecoder>(handle);
|
||||
Emu.GetIdManager().remove<AudioDecoder>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -593,7 +589,7 @@ s32 cellAdecStartSeq(u32 handle, u32 param)
|
|||
{
|
||||
cellAdec.Warning("cellAdecStartSeq(handle=0x%x, param=*0x%x)", handle, param);
|
||||
|
||||
const auto adec = Emu.GetIdManager().GetIDData<AudioDecoder>(handle);
|
||||
const auto adec = Emu.GetIdManager().get<AudioDecoder>(handle);
|
||||
|
||||
if (!adec)
|
||||
{
|
||||
|
@ -646,7 +642,7 @@ s32 cellAdecEndSeq(u32 handle)
|
|||
{
|
||||
cellAdec.Warning("cellAdecEndSeq(handle=0x%x)", handle);
|
||||
|
||||
const auto adec = Emu.GetIdManager().GetIDData<AudioDecoder>(handle);
|
||||
const auto adec = Emu.GetIdManager().get<AudioDecoder>(handle);
|
||||
|
||||
if (!adec)
|
||||
{
|
||||
|
@ -661,7 +657,7 @@ s32 cellAdecDecodeAu(u32 handle, vm::ptr<CellAdecAuInfo> auInfo)
|
|||
{
|
||||
cellAdec.Log("cellAdecDecodeAu(handle=0x%x, auInfo=*0x%x)", handle, auInfo);
|
||||
|
||||
const auto adec = Emu.GetIdManager().GetIDData<AudioDecoder>(handle);
|
||||
const auto adec = Emu.GetIdManager().get<AudioDecoder>(handle);
|
||||
|
||||
if (!adec)
|
||||
{
|
||||
|
@ -684,7 +680,7 @@ s32 cellAdecGetPcm(u32 handle, vm::ptr<float> outBuffer)
|
|||
{
|
||||
cellAdec.Log("cellAdecGetPcm(handle=0x%x, outBuffer=*0x%x)", handle, outBuffer);
|
||||
|
||||
const auto adec = Emu.GetIdManager().GetIDData<AudioDecoder>(handle);
|
||||
const auto adec = Emu.GetIdManager().get<AudioDecoder>(handle);
|
||||
|
||||
if (!adec)
|
||||
{
|
||||
|
@ -800,7 +796,7 @@ s32 cellAdecGetPcmItem(u32 handle, vm::ptr<vm::bptr<CellAdecPcmItem>> pcmItem)
|
|||
{
|
||||
cellAdec.Log("cellAdecGetPcmItem(handle=0x%x, pcmItem=**0x%x)", handle, pcmItem);
|
||||
|
||||
const auto adec = Emu.GetIdManager().GetIDData<AudioDecoder>(handle);
|
||||
const auto adec = Emu.GetIdManager().get<AudioDecoder>(handle);
|
||||
|
||||
if (!adec)
|
||||
{
|
||||
|
|
|
@ -763,12 +763,12 @@ s32 cellAudioCreateNotifyEventQueue(vm::ptr<u32> id, vm::ptr<u64> key)
|
|||
{
|
||||
const u64 key_value = 0x80004d494f323221ull + k;
|
||||
|
||||
std::shared_ptr<event_queue_t> queue(new event_queue_t(SYS_SYNC_FIFO, SYS_PPU_QUEUE, 0, key_value, 32));
|
||||
auto queue = std::make_shared<lv2_event_queue_t>(SYS_SYNC_FIFO, SYS_PPU_QUEUE, 0, key_value, 32);
|
||||
|
||||
// register key if not used yet
|
||||
if (Emu.GetEventManager().RegisterKey(queue, key_value))
|
||||
if (Emu.GetEventManager().RegisterKey(queue))
|
||||
{
|
||||
*id = Emu.GetIdManager().GetNewID(queue, TYPE_EVENT_QUEUE);
|
||||
*id = Emu.GetIdManager().add(std::move(queue));
|
||||
*key = key_value;
|
||||
|
||||
return CELL_OK;
|
||||
|
|
|
@ -98,7 +98,7 @@ enum AudioPortState : u32
|
|||
struct AudioPortConfig
|
||||
{
|
||||
std::mutex mutex;
|
||||
atomic_le_t<AudioPortState> state;
|
||||
atomic<AudioPortState> state;
|
||||
|
||||
u32 channel;
|
||||
u32 block;
|
||||
|
@ -116,13 +116,13 @@ struct AudioPortConfig
|
|||
};
|
||||
|
||||
float level;
|
||||
atomic_le_t<level_set_t> level_set;
|
||||
atomic<level_set_t> level_set;
|
||||
};
|
||||
|
||||
struct AudioConfig //custom structure
|
||||
{
|
||||
std::mutex mutex;
|
||||
atomic_le_t<AudioState> state;
|
||||
atomic<AudioState> state;
|
||||
thread_t audio_thread;
|
||||
|
||||
AudioPortConfig ports[AUDIO_PORT_COUNT];
|
||||
|
|
|
@ -298,12 +298,10 @@ void dmuxQueryEsAttr(u32 info /* may be 0 */, vm::ptr<const CellCodecEsFilterId>
|
|||
cellDmux.Warning("*** filter(0x%x, 0x%x, 0x%x, 0x%x)", esFilterId->filterIdMajor, esFilterId->filterIdMinor, esFilterId->supplementalInfo1, esFilterId->supplementalInfo2);
|
||||
}
|
||||
|
||||
u32 dmuxOpen(Demuxer* dmux_ptr)
|
||||
void dmuxOpen(u32 dmux_id) // TODO: call from the constructor
|
||||
{
|
||||
std::shared_ptr<Demuxer> sptr(dmux_ptr);
|
||||
Demuxer& dmux = *dmux_ptr;
|
||||
|
||||
u32 dmux_id = Emu.GetIdManager().GetNewID(sptr);
|
||||
const auto sptr = Emu.GetIdManager().get<Demuxer>(dmux_id);
|
||||
Demuxer& dmux = *sptr;
|
||||
|
||||
dmux.id = dmux_id;
|
||||
|
||||
|
@ -316,9 +314,9 @@ u32 dmuxOpen(Demuxer* dmux_ptr)
|
|||
dmux.dmuxCb->InitRegs();
|
||||
dmux.dmuxCb->DoRun();
|
||||
|
||||
thread_t t(fmt::format("Demuxer[0x%x] Thread", dmux_id), [dmux_ptr, sptr]()
|
||||
thread_t t(fmt::format("Demuxer[0x%x] Thread", dmux_id), [sptr]()
|
||||
{
|
||||
Demuxer& dmux = *dmux_ptr;
|
||||
Demuxer& dmux = *sptr;
|
||||
|
||||
DemuxerTask task;
|
||||
DemuxerStream stream = {};
|
||||
|
@ -711,7 +709,7 @@ u32 dmuxOpen(Demuxer* dmux_ptr)
|
|||
}
|
||||
}
|
||||
es.dmux = nullptr;
|
||||
Emu.GetIdManager().RemoveID<ElementaryStream>(task.es.es);
|
||||
Emu.GetIdManager().remove<ElementaryStream>(task.es.es);
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -772,8 +770,6 @@ u32 dmuxOpen(Demuxer* dmux_ptr)
|
|||
|
||||
dmux.is_finished = true;
|
||||
});
|
||||
|
||||
return dmux_id;
|
||||
}
|
||||
|
||||
s32 cellDmuxQueryAttr(vm::ptr<const CellDmuxType> type, vm::ptr<CellDmuxAttr> attr)
|
||||
|
@ -813,7 +809,7 @@ s32 cellDmuxOpen(vm::ptr<const CellDmuxType> type, vm::ptr<const CellDmuxResourc
|
|||
|
||||
// TODO: check demuxerResource and demuxerCb arguments
|
||||
|
||||
*handle = dmuxOpen(new Demuxer(res->memAddr, res->memSize, cb->cbMsgFunc, cb->cbArg));
|
||||
dmuxOpen(*handle = Emu.GetIdManager().make<Demuxer>(res->memAddr, res->memSize, cb->cbMsgFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -829,7 +825,7 @@ s32 cellDmuxOpenEx(vm::ptr<const CellDmuxType> type, vm::ptr<const CellDmuxResou
|
|||
|
||||
// TODO: check demuxerResourceEx and demuxerCb arguments
|
||||
|
||||
*handle = dmuxOpen(new Demuxer(resEx->memAddr, resEx->memSize, cb->cbMsgFunc, cb->cbArg));
|
||||
dmuxOpen(*handle = Emu.GetIdManager().make<Demuxer>(resEx->memAddr, resEx->memSize, cb->cbMsgFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -850,7 +846,7 @@ s32 cellDmuxOpen2(vm::ptr<const CellDmuxType2> type2, vm::ptr<const CellDmuxReso
|
|||
|
||||
// TODO: check demuxerType2, demuxerResource2 and demuxerCb arguments
|
||||
|
||||
*handle = dmuxOpen(new Demuxer(res2->memAddr, res2->memSize, cb->cbMsgFunc, cb->cbArg));
|
||||
dmuxOpen(*handle = Emu.GetIdManager().make<Demuxer>(res2->memAddr, res2->memSize, cb->cbMsgFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -859,7 +855,7 @@ s32 cellDmuxClose(u32 handle)
|
|||
{
|
||||
cellDmux.Warning("cellDmuxClose(handle=0x%x)", handle);
|
||||
|
||||
const auto dmux = Emu.GetIdManager().GetIDData<Demuxer>(handle);
|
||||
const auto dmux = Emu.GetIdManager().get<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -881,7 +877,7 @@ s32 cellDmuxClose(u32 handle)
|
|||
}
|
||||
|
||||
if (dmux->dmuxCb) Emu.GetCPU().RemoveThread(dmux->dmuxCb->GetId());
|
||||
Emu.GetIdManager().RemoveID<Demuxer>(handle);
|
||||
Emu.GetIdManager().remove<Demuxer>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -889,7 +885,7 @@ s32 cellDmuxSetStream(u32 handle, u32 streamAddress, u32 streamSize, bool discon
|
|||
{
|
||||
cellDmux.Log("cellDmuxSetStream(handle=0x%x, streamAddress=0x%x, streamSize=%d, discontinuity=%d, userData=0x%llx)", handle, streamAddress, streamSize, discontinuity, userData);
|
||||
|
||||
const auto dmux = Emu.GetIdManager().GetIDData<Demuxer>(handle);
|
||||
const auto dmux = Emu.GetIdManager().get<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -917,7 +913,7 @@ s32 cellDmuxResetStream(u32 handle)
|
|||
{
|
||||
cellDmux.Warning("cellDmuxResetStream(handle=0x%x)", handle);
|
||||
|
||||
const auto dmux = Emu.GetIdManager().GetIDData<Demuxer>(handle);
|
||||
const auto dmux = Emu.GetIdManager().get<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -932,7 +928,7 @@ s32 cellDmuxResetStreamAndWaitDone(u32 handle)
|
|||
{
|
||||
cellDmux.Warning("cellDmuxResetStreamAndWaitDone(handle=0x%x)", handle);
|
||||
|
||||
const auto dmux = Emu.GetIdManager().GetIDData<Demuxer>(handle);
|
||||
const auto dmux = Emu.GetIdManager().get<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -993,7 +989,7 @@ s32 cellDmuxEnableEs(u32 handle, vm::ptr<const CellCodecEsFilterId> esFilterId,
|
|||
{
|
||||
cellDmux.Warning("cellDmuxEnableEs(handle=0x%x, esFilterId=*0x%x, esResourceInfo=*0x%x, esCb=*0x%x, esSpecificInfo=*0x%x, esHandle=*0x%x)", handle, esFilterId, esResourceInfo, esCb, esSpecificInfo, esHandle);
|
||||
|
||||
const auto dmux = Emu.GetIdManager().GetIDData<Demuxer>(handle);
|
||||
const auto dmux = Emu.GetIdManager().get<Demuxer>(handle);
|
||||
|
||||
if (!dmux)
|
||||
{
|
||||
|
@ -1002,11 +998,11 @@ s32 cellDmuxEnableEs(u32 handle, vm::ptr<const CellCodecEsFilterId> esFilterId,
|
|||
|
||||
// TODO: check esFilterId, esResourceInfo, esCb and esSpecificInfo correctly
|
||||
|
||||
std::shared_ptr<ElementaryStream> es(new ElementaryStream(dmux.get(), esResourceInfo->memAddr, esResourceInfo->memSize,
|
||||
auto es = std::make_shared<ElementaryStream>(dmux.get(), esResourceInfo->memAddr, esResourceInfo->memSize,
|
||||
esFilterId->filterIdMajor, esFilterId->filterIdMinor, esFilterId->supplementalInfo1, esFilterId->supplementalInfo2,
|
||||
esCb->cbEsMsgFunc, esCb->cbArg, esSpecificInfo));
|
||||
esCb->cbEsMsgFunc, esCb->cbArg, esSpecificInfo);
|
||||
|
||||
u32 id = Emu.GetIdManager().GetNewID(es);
|
||||
u32 id = Emu.GetIdManager().add(es);
|
||||
es->id = id;
|
||||
*esHandle = id;
|
||||
|
||||
|
@ -1025,7 +1021,7 @@ s32 cellDmuxDisableEs(u32 esHandle)
|
|||
{
|
||||
cellDmux.Warning("cellDmuxDisableEs(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1044,7 +1040,7 @@ s32 cellDmuxResetEs(u32 esHandle)
|
|||
{
|
||||
cellDmux.Log("cellDmuxResetEs(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1063,7 +1059,7 @@ s32 cellDmuxGetAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpecificInfo
|
|||
{
|
||||
cellDmux.Log("cellDmuxGetAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1086,7 +1082,7 @@ s32 cellDmuxPeekAu(u32 esHandle, vm::ptr<u32> auInfo, vm::ptr<u32> auSpecificInf
|
|||
{
|
||||
cellDmux.Log("cellDmuxPeekAu(esHandle=0x%x, auInfo=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfo, auSpecificInfo);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1109,7 +1105,7 @@ s32 cellDmuxGetAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> auSpecific
|
|||
{
|
||||
cellDmux.Log("cellDmuxGetAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1132,7 +1128,7 @@ s32 cellDmuxPeekAuEx(u32 esHandle, vm::ptr<u32> auInfoEx, vm::ptr<u32> auSpecifi
|
|||
{
|
||||
cellDmux.Log("cellDmuxPeekAuEx(esHandle=0x%x, auInfoEx=**0x%x, auSpecificInfo=**0x%x)", esHandle, auInfoEx, auSpecificInfo);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1155,7 +1151,7 @@ s32 cellDmuxReleaseAu(u32 esHandle)
|
|||
{
|
||||
cellDmux.Log("cellDmuxReleaseAu(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
@ -1173,7 +1169,7 @@ s32 cellDmuxFlushEs(u32 esHandle)
|
|||
{
|
||||
cellDmux.Warning("cellDmuxFlushEs(esHandle=0x%x)", esHandle);
|
||||
|
||||
const auto es = Emu.GetIdManager().GetIDData<ElementaryStream>(esHandle);
|
||||
const auto es = Emu.GetIdManager().get<ElementaryStream>(esHandle);
|
||||
|
||||
if (!es)
|
||||
{
|
||||
|
|
|
@ -293,57 +293,57 @@ int cellFiberPpuUtilWorkerControlInitializeWithAttribute()
|
|||
|
||||
Module cellFiber("cellFiber", []()
|
||||
{
|
||||
REG_FUNC(cellFiber, _cellFiberPpuInitialize);
|
||||
REG_FUNC_NR(cellFiber, _cellFiberPpuInitialize);
|
||||
|
||||
REG_FUNC(cellFiber, _cellFiberPpuSchedulerAttributeInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuInitializeScheduler);
|
||||
REG_FUNC(cellFiber, cellFiberPpuFinalizeScheduler);
|
||||
REG_FUNC(cellFiber, cellFiberPpuRunFibers);
|
||||
REG_FUNC(cellFiber, cellFiberPpuCheckFlags);
|
||||
REG_FUNC(cellFiber, cellFiberPpuHasRunnableFiber);
|
||||
REG_FUNC_NR(cellFiber, _cellFiberPpuSchedulerAttributeInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuInitializeScheduler);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuFinalizeScheduler);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuRunFibers);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuCheckFlags);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuHasRunnableFiber);
|
||||
|
||||
REG_FUNC(cellFiber, _cellFiberPpuAttributeInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuCreateFiber);
|
||||
REG_FUNC(cellFiber, cellFiberPpuExit);
|
||||
REG_FUNC(cellFiber, cellFiberPpuYield);
|
||||
REG_FUNC(cellFiber, cellFiberPpuJoinFiber);
|
||||
REG_FUNC(cellFiber, cellFiberPpuSelf);
|
||||
REG_FUNC(cellFiber, cellFiberPpuSendSignal);
|
||||
REG_FUNC(cellFiber, cellFiberPpuWaitSignal);
|
||||
REG_FUNC(cellFiber, cellFiberPpuWaitFlag);
|
||||
REG_FUNC(cellFiber, cellFiberPpuGetScheduler);
|
||||
REG_FUNC(cellFiber, cellFiberPpuSetPriority);
|
||||
REG_FUNC(cellFiber, cellFiberPpuCheckStackLimit);
|
||||
REG_FUNC_NR(cellFiber, _cellFiberPpuAttributeInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuCreateFiber);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuExit);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuYield);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuJoinFiber);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSelf);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSendSignal);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuWaitSignal);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuWaitFlag);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuGetScheduler);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSetPriority);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuCheckStackLimit);
|
||||
|
||||
REG_FUNC(cellFiber, _cellFiberPpuContextAttributeInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextFinalize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextRun);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextSwitch);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextSelf);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextReturnToThread);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextCheckStackLimit);
|
||||
REG_FUNC_NR(cellFiber, _cellFiberPpuContextAttributeInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextFinalize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextRun);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextSwitch);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextSelf);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextReturnToThread);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextCheckStackLimit);
|
||||
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextRunScheduler);
|
||||
REG_FUNC(cellFiber, cellFiberPpuContextEnterScheduler);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextRunScheduler);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuContextEnterScheduler);
|
||||
|
||||
REG_FUNC(cellFiber, cellFiberPpuSchedulerTraceInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuSchedulerTraceFinalize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuSchedulerTraceStart);
|
||||
REG_FUNC(cellFiber, cellFiberPpuSchedulerTraceStop);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSchedulerTraceInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSchedulerTraceFinalize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSchedulerTraceStart);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuSchedulerTraceStop);
|
||||
|
||||
REG_FUNC(cellFiber, _cellFiberPpuUtilWorkerControlAttributeInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlRunFibers);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlInitialize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlSetPollingMode);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlJoinFiber);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlDisconnectEventQueue);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlSendSignal);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlConnectEventQueueToSpurs);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlFinalize);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlWakeup);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlCreateFiber);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlShutdown);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlCheckFlags);
|
||||
REG_FUNC(cellFiber, cellFiberPpuUtilWorkerControlInitializeWithAttribute);
|
||||
REG_FUNC_NR(cellFiber, _cellFiberPpuUtilWorkerControlAttributeInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlRunFibers);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlInitialize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlSetPollingMode);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlJoinFiber);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlDisconnectEventQueue);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlSendSignal);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlConnectEventQueueToSpurs);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlFinalize);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlWakeup);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlCreateFiber);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlShutdown);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlCheckFlags);
|
||||
REG_FUNC_NR(cellFiber, cellFiberPpuUtilWorkerControlInitializeWithAttribute);
|
||||
});
|
||||
|
|
|
@ -210,7 +210,7 @@ s32 cellFsGetDirectoryEntries(u32 fd, vm::ptr<CellFsDirectoryEntry> entries, u32
|
|||
{
|
||||
cellFs.Warning("cellFsGetDirectoryEntries(fd=0x%x, entries=*0x%x, entries_size=0x%x, data_count=*0x%x)", fd, entries, entries_size, data_count);
|
||||
|
||||
const auto directory = Emu.GetIdManager().GetIDData<vfsDirBase>(fd);
|
||||
const auto directory = Emu.GetIdManager().get<lv2_dir_t>(fd);
|
||||
|
||||
if (!directory)
|
||||
{
|
||||
|
@ -255,7 +255,7 @@ s32 cellFsReadWithOffset(u32 fd, u64 offset, vm::ptr<void> buf, u64 buffer_size,
|
|||
|
||||
// TODO: use single sys_fs_fcntl syscall
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file || file->flags & CELL_FS_O_WRONLY)
|
||||
{
|
||||
|
@ -286,7 +286,7 @@ s32 cellFsWriteWithOffset(u32 fd, u64 offset, vm::ptr<const void> buf, u64 data_
|
|||
|
||||
// TODO: use single sys_fs_fcntl syscall
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
|
||||
{
|
||||
|
@ -330,7 +330,7 @@ s32 cellFsStReadInit(u32 fd, vm::ptr<const CellFsRingBuffer> ringbuf)
|
|||
return CELL_FS_EINVAL;
|
||||
}
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -368,7 +368,7 @@ s32 cellFsStReadFinish(u32 fd)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadFinish(fd=0x%x)", fd);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -391,7 +391,7 @@ s32 cellFsStReadGetRingBuf(u32 fd, vm::ptr<CellFsRingBuffer> ringbuf)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadGetRingBuf(fd=0x%x, ringbuf=*0x%x)", fd, ringbuf);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -415,7 +415,7 @@ s32 cellFsStReadGetStatus(u32 fd, vm::ptr<u64> status)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadGetRingBuf(fd=0x%x, status=*0x%x)", fd, status);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -449,7 +449,7 @@ s32 cellFsStReadGetRegid(u32 fd, vm::ptr<u64> regid)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadGetRingBuf(fd=0x%x, regid=*0x%x)", fd, regid);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -470,7 +470,7 @@ s32 cellFsStReadStart(u32 fd, u64 offset, u64 size)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadStart(fd=0x%x, offset=0x%llx, size=0x%llx)", fd, offset, size);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -552,7 +552,7 @@ s32 cellFsStReadStop(u32 fd)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadStop(fd=0x%x)", fd);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -583,7 +583,7 @@ s32 cellFsStRead(u32 fd, vm::ptr<u8> buf, u64 size, vm::ptr<u64> rsize)
|
|||
{
|
||||
cellFs.Warning("cellFsStRead(fd=0x%x, buf=*0x%x, size=0x%llx, rsize=*0x%x)", fd, buf, size, rsize);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -617,7 +617,7 @@ s32 cellFsStReadGetCurrentAddr(u32 fd, vm::ptr<u32> addr, vm::ptr<u64> size)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadGetCurrentAddr(fd=0x%x, addr=*0x%x, size=*0x%x)", fd, addr, size);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -650,7 +650,7 @@ s32 cellFsStReadPutCurrentAddr(u32 fd, vm::ptr<u8> addr, u64 size)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadPutCurrentAddr(fd=0x%x, addr=*0x%x, size=0x%llx)", fd, addr, size);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -677,7 +677,7 @@ s32 cellFsStReadWait(u32 fd, u64 size)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadWait(fd=0x%x, size=0x%llx)", fd, size);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -711,7 +711,7 @@ s32 cellFsStReadWaitCallback(u32 fd, u64 size, fs_st_cb_t func)
|
|||
{
|
||||
cellFs.Warning("cellFsStReadWaitCallback(fd=0x%x, size=0x%llx, func=*0x%x)", fd, size, func);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -879,7 +879,7 @@ void fsAio(vm::ptr<CellFsAio> aio, bool write, s32 xid, fs_aio_cb_t func)
|
|||
s32 error = CELL_OK;
|
||||
u64 result = 0;
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(aio->fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(aio->fd);
|
||||
|
||||
if (!file || (!write && file->flags & CELL_FS_O_WRONLY) || (write && !(file->flags & CELL_FS_O_ACCMODE)))
|
||||
{
|
||||
|
@ -931,11 +931,6 @@ s32 cellFsAioRead(vm::ptr<CellFsAio> aio, vm::ptr<s32> id, fs_aio_cb_t func)
|
|||
{
|
||||
cellFs.Warning("cellFsAioRead(aio=*0x%x, id=*0x%x, func=*0x%x)", aio, id, func);
|
||||
|
||||
if (!Emu.GetIdManager().CheckID<fs_file_t>(aio->fd))
|
||||
{
|
||||
return CELL_FS_EBADF;
|
||||
}
|
||||
|
||||
// TODO: detect mount point and send AIO request to the AIO thread of this mount point
|
||||
|
||||
const s32 xid = (*id = ++g_fs_aio_id);
|
||||
|
@ -949,11 +944,6 @@ s32 cellFsAioWrite(vm::ptr<CellFsAio> aio, vm::ptr<s32> id, fs_aio_cb_t func)
|
|||
{
|
||||
cellFs.Warning("cellFsAioWrite(aio=*0x%x, id=*0x%x, func=*0x%x)", aio, id, func);
|
||||
|
||||
if (!Emu.GetIdManager().CheckID<fs_file_t>(aio->fd))
|
||||
{
|
||||
return CELL_FS_EBADF;
|
||||
}
|
||||
|
||||
// TODO: detect mount point and send AIO request to the AIO thread of this mount point
|
||||
|
||||
const s32 xid = (*id = ++g_fs_aio_id);
|
||||
|
@ -983,7 +973,7 @@ s32 cellFsSetIoBufferFromDefaultContainer(u32 fd, u32 buffer_size, u32 page_type
|
|||
{
|
||||
cellFs.Todo("cellFsSetIoBufferFromDefaultContainer(fd=0x%x, buffer_size=%d, page_type=%d)", fd, buffer_size, page_type);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
|
|
@ -34,7 +34,7 @@ s32 cellGifDecOpen(u32 mainHandle, vm::ptr<u32> subHandle, vm::ptr<CellGifDecSrc
|
|||
{
|
||||
cellGifDec.Warning("cellGifDecOpen(mainHandle=0x%x, subHandle=*0x%x, src=*0x%x, openInfo=*0x%x)", mainHandle, subHandle, src, openInfo);
|
||||
|
||||
std::shared_ptr<CellGifDecSubHandle> current_subHandle(new CellGifDecSubHandle);
|
||||
auto current_subHandle = std::make_shared<CellGifDecSubHandle>();
|
||||
current_subHandle->fd = 0;
|
||||
current_subHandle->src = *src;
|
||||
|
||||
|
@ -48,16 +48,16 @@ s32 cellGifDecOpen(u32 mainHandle, vm::ptr<u32> subHandle, vm::ptr<CellGifDecSrc
|
|||
{
|
||||
// Get file descriptor and size
|
||||
std::shared_ptr<vfsStream> file_s(Emu.GetVFS().OpenFile(src->fileName.get_ptr(), vfsRead));
|
||||
std::shared_ptr<fs_file_t> file(new fs_file_t(file_s, 0, 0));
|
||||
if (!file) return CELL_GIFDEC_ERROR_OPEN_FILE;
|
||||
current_subHandle->fd = Emu.GetIdManager().GetNewID(file, TYPE_FS_FILE);
|
||||
current_subHandle->fileSize = file->file->GetSize();
|
||||
if (!file_s) return CELL_GIFDEC_ERROR_OPEN_FILE;
|
||||
|
||||
current_subHandle->fd = Emu.GetIdManager().make<lv2_file_t>(file_s, 0, 0);
|
||||
current_subHandle->fileSize = file_s->GetSize();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// From now, every u32 subHandle argument is a pointer to a CellGifDecSubHandle struct.
|
||||
*subHandle = Emu.GetIdManager().GetNewID(current_subHandle);
|
||||
*subHandle = Emu.GetIdManager().add(std::move(current_subHandle));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -66,7 +66,7 @@ s32 cellGifDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellGifDecInfo>
|
|||
{
|
||||
cellGifDec.Warning("cellGifDecReadHeader(mainHandle=0x%x, subHandle=0x%x, info=*0x%x)", mainHandle, subHandle, info);
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellGifDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellGifDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -88,7 +88,7 @@ s32 cellGifDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellGifDecInfo>
|
|||
|
||||
case se32(CELL_GIFDEC_FILE):
|
||||
{
|
||||
auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
file->file->Seek(0);
|
||||
file->file->Read(buffer.begin(), buffer.size());
|
||||
break;
|
||||
|
@ -120,7 +120,7 @@ s32 cellGifDecSetParameter(u32 mainHandle, u32 subHandle, vm::ptr<const CellGifD
|
|||
{
|
||||
cellGifDec.Warning("cellGifDecSetParameter(mainHandle=0x%x, subHandle=0x%x, inParam=*0x%x, outParam=*0x%x)", mainHandle, subHandle, inParam, outParam);
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellGifDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellGifDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -154,7 +154,7 @@ s32 cellGifDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data, vm::pt
|
|||
|
||||
dataOutInfo->status = CELL_GIFDEC_DEC_STATUS_STOP;
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellGifDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellGifDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -176,7 +176,7 @@ s32 cellGifDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data, vm::pt
|
|||
|
||||
case se32(CELL_GIFDEC_FILE):
|
||||
{
|
||||
auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
file->file->Seek(0);
|
||||
file->file->Read(gif.ptr(), gif.size());
|
||||
break;
|
||||
|
@ -273,15 +273,15 @@ s32 cellGifDecClose(u32 mainHandle, u32 subHandle)
|
|||
{
|
||||
cellGifDec.Warning("cellGifDecClose(mainHandle=0x%x, subHandle=0x%x)", mainHandle, subHandle);
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellGifDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellGifDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
return CELL_GIFDEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<fs_file_t>(subHandle_data->fd);
|
||||
Emu.GetIdManager().RemoveID<CellGifDecSubHandle>(subHandle);
|
||||
Emu.GetIdManager().remove<lv2_file_t>(subHandle_data->fd);
|
||||
Emu.GetIdManager().remove<CellGifDecSubHandle>(subHandle);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ s32 cellJpgDecOpen(u32 mainHandle, vm::ptr<u32> subHandle, vm::ptr<CellJpgDecSrc
|
|||
{
|
||||
cellJpgDec.Warning("cellJpgDecOpen(mainHandle=0x%x, subHandle=*0x%x, src=*0x%x, openInfo=*0x%x)", mainHandle, subHandle, src, openInfo);
|
||||
|
||||
std::shared_ptr<CellJpgDecSubHandle> current_subHandle(new CellJpgDecSubHandle);
|
||||
auto current_subHandle = std::make_shared<CellJpgDecSubHandle>();
|
||||
|
||||
current_subHandle->fd = 0;
|
||||
current_subHandle->src = *src;
|
||||
|
@ -54,16 +54,16 @@ s32 cellJpgDecOpen(u32 mainHandle, vm::ptr<u32> subHandle, vm::ptr<CellJpgDecSrc
|
|||
{
|
||||
// Get file descriptor and size
|
||||
std::shared_ptr<vfsStream> file_s(Emu.GetVFS().OpenFile(src->fileName.get_ptr(), vfsRead));
|
||||
std::shared_ptr<fs_file_t> file(new fs_file_t(file_s, 0, 0));
|
||||
if (!file) return CELL_JPGDEC_ERROR_OPEN_FILE;
|
||||
current_subHandle->fd = Emu.GetIdManager().GetNewID(file, TYPE_FS_FILE);
|
||||
current_subHandle->fileSize = file->file->GetSize();
|
||||
if (!file_s) return CELL_JPGDEC_ERROR_OPEN_FILE;
|
||||
|
||||
current_subHandle->fd = Emu.GetIdManager().make<lv2_file_t>(file_s, 0, 0);
|
||||
current_subHandle->fileSize = file_s->GetSize();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// From now, every u32 subHandle argument is a pointer to a CellJpgDecSubHandle struct.
|
||||
*subHandle = Emu.GetIdManager().GetNewID(current_subHandle);
|
||||
*subHandle = Emu.GetIdManager().add(std::move(current_subHandle));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -72,15 +72,15 @@ s32 cellJpgDecClose(u32 mainHandle, u32 subHandle)
|
|||
{
|
||||
cellJpgDec.Warning("cellJpgDecOpen(mainHandle=0x%x, subHandle=0x%x)", mainHandle, subHandle);
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
return CELL_JPGDEC_ERROR_FATAL;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<fs_file_t>(subHandle_data->fd);
|
||||
Emu.GetIdManager().RemoveID<CellJpgDecSubHandle>(subHandle);
|
||||
Emu.GetIdManager().remove<lv2_file_t>(subHandle_data->fd);
|
||||
Emu.GetIdManager().remove<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -89,7 +89,7 @@ s32 cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDecInfo>
|
|||
{
|
||||
cellJpgDec.Log("cellJpgDecReadHeader(mainHandle=0x%x, subHandle=0x%x, info=*0x%x)", mainHandle, subHandle, info);
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -111,7 +111,7 @@ s32 cellJpgDecReadHeader(u32 mainHandle, u32 subHandle, vm::ptr<CellJpgDecInfo>
|
|||
|
||||
case se32(CELL_JPGDEC_FILE):
|
||||
{
|
||||
auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
file->file->Seek(0);
|
||||
file->file->Read(buffer.ptr(), buffer.size());
|
||||
break;
|
||||
|
@ -163,7 +163,7 @@ s32 cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data, vm::pt
|
|||
|
||||
dataOutInfo->status = CELL_JPGDEC_DEC_STATUS_STOP;
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
@ -185,7 +185,7 @@ s32 cellJpgDecDecodeData(u32 mainHandle, u32 subHandle, vm::ptr<u8> data, vm::pt
|
|||
|
||||
case se32(CELL_JPGDEC_FILE):
|
||||
{
|
||||
auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
file->file->Seek(0);
|
||||
file->file->Read(jpg.ptr(), jpg.size());
|
||||
break;
|
||||
|
@ -297,7 +297,7 @@ s32 cellJpgDecSetParameter(u32 mainHandle, u32 subHandle, vm::ptr<const CellJpgD
|
|||
{
|
||||
cellJpgDec.Log("cellJpgDecSetParameter(mainHandle=0x%x, subHandle=0x%x, inParam=*0x%x, outParam=*0x%x)", mainHandle, subHandle, inParam, outParam);
|
||||
|
||||
const auto subHandle_data = Emu.GetIdManager().GetIDData<CellJpgDecSubHandle>(subHandle);
|
||||
const auto subHandle_data = Emu.GetIdManager().get<CellJpgDecSubHandle>(subHandle);
|
||||
|
||||
if (!subHandle_data)
|
||||
{
|
||||
|
|
|
@ -86,10 +86,10 @@ s32 pngDecOpen(
|
|||
{
|
||||
// Get file descriptor and size
|
||||
std::shared_ptr<vfsStream> file_s(Emu.GetVFS().OpenFile(src->fileName.get_ptr(), vfsRead));
|
||||
std::shared_ptr<fs_file_t> file(new fs_file_t(file_s, 0, 0));
|
||||
if (!file) return CELL_PNGDEC_ERROR_OPEN_FILE;
|
||||
stream->fd = Emu.GetIdManager().GetNewID(file, TYPE_FS_FILE);
|
||||
stream->fileSize = file->file->GetSize();
|
||||
if (!file_s) return CELL_PNGDEC_ERROR_OPEN_FILE;
|
||||
|
||||
stream->fd = Emu.GetIdManager().make<lv2_file_t>(file_s, 0, 0);
|
||||
stream->fileSize = file_s->GetSize();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
@ -115,7 +115,7 @@ s32 pngDecOpen(
|
|||
|
||||
s32 pngDecClose(CellPngDecSubHandle stream)
|
||||
{
|
||||
Emu.GetIdManager().RemoveID<fs_file_t>(stream->fd);
|
||||
Emu.GetIdManager().remove<lv2_file_t>(stream->fd);
|
||||
|
||||
if (!Memory.Free(stream.addr()))
|
||||
{
|
||||
|
@ -148,7 +148,7 @@ s32 pngReadHeader(
|
|||
break;
|
||||
case se32(CELL_PNGDEC_FILE):
|
||||
{
|
||||
auto file = Emu.GetIdManager().GetIDData<fs_file_t>(stream->fd);
|
||||
auto file = Emu.GetIdManager().get<lv2_file_t>(stream->fd);
|
||||
file->file->Seek(0);
|
||||
file->file->Read(buffer.begin(), buffer.size());
|
||||
break;
|
||||
|
@ -260,7 +260,7 @@ s32 pngDecodeData(
|
|||
|
||||
case se32(CELL_PNGDEC_FILE):
|
||||
{
|
||||
auto file = Emu.GetIdManager().GetIDData<fs_file_t>(stream->fd);
|
||||
auto file = Emu.GetIdManager().get<lv2_file_t>(stream->fd);
|
||||
file->file->Seek(0);
|
||||
file->file->Read(png.ptr(), png.size());
|
||||
break;
|
||||
|
|
|
@ -33,7 +33,7 @@ enum : u32
|
|||
SAVEDATA_OP_FIXED_DELETE = 14,
|
||||
};
|
||||
|
||||
__noinline s32 savedata_op(
|
||||
never_inline s32 savedata_op(
|
||||
PPUThread& CPU,
|
||||
u32 operation,
|
||||
u32 version,
|
||||
|
@ -61,7 +61,8 @@ __noinline s32 savedata_op(
|
|||
return CELL_SAVEDATA_ERROR_BUSY;
|
||||
}
|
||||
|
||||
std::string base_dir = "/dev_hdd0/home/00000001/savedata/"; // TODO: Get the path of the current or specified user
|
||||
// path of the specified user (00000001 by default)
|
||||
const std::string base_dir = fmt::format("/dev_hdd0/home/%08d/savedata/", userId ? userId : 1u);
|
||||
|
||||
vm::stackvar<CellSaveDataCBResult> result(CPU);
|
||||
|
||||
|
@ -498,7 +499,7 @@ __noinline s32 savedata_op(
|
|||
fileGet->excSize = 0;
|
||||
memset(fileGet->reserved, 0, sizeof(fileGet->reserved));
|
||||
|
||||
while (true)
|
||||
while (funcFile)
|
||||
{
|
||||
funcFile(CPU, result, fileGet, fileSet);
|
||||
|
||||
|
@ -785,10 +786,10 @@ s32 cellSaveDataUserListSave(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserListSave(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserListSave(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, setList, setBuf, funcList, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_LIST_SAVE, version, vm::null, 0, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserListLoad(
|
||||
|
@ -803,10 +804,10 @@ s32 cellSaveDataUserListLoad(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserListLoad(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserListLoad(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcList=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, setList, setBuf, funcList, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_LIST_LOAD, version, vm::null, 0, setList, setBuf, funcList, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserFixedSave(
|
||||
|
@ -821,10 +822,10 @@ s32 cellSaveDataUserFixedSave(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserFixedSave(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserFixedSave(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_FIXED_SAVE, version, vm::null, 0, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserFixedLoad(
|
||||
|
@ -839,10 +840,10 @@ s32 cellSaveDataUserFixedLoad(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserFixedLoad(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserFixedLoad(version=%d, userId=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_FIXED_LOAD, version, vm::null, 0, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserAutoSave(
|
||||
|
@ -857,10 +858,10 @@ s32 cellSaveDataUserAutoSave(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserAutoSave(version=%d, userId=%d, dirName=*0x%x, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserAutoSave(version=%d, userId=%d, dirName=*0x%x, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, dirName, errDialog, setBuf, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_AUTO_SAVE, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserAutoLoad(
|
||||
|
@ -875,10 +876,10 @@ s32 cellSaveDataUserAutoLoad(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserAutoLoad(version=%d, userId=%d, dirName=*0x%x, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserAutoLoad(version=%d, userId=%d, dirName=*0x%x, errDialog=%d, setBuf=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, dirName, errDialog, setBuf, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_AUTO_LOAD, version, dirName, errDialog, vm::null, setBuf, vm::null, vm::null, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserListAutoSave(
|
||||
|
@ -894,10 +895,10 @@ s32 cellSaveDataUserListAutoSave(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserListAutoSave(version=%d, userId=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserListAutoSave(version=%d, userId=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, errDialog, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_LIST_AUTO_SAVE, version, vm::null, errDialog, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserListAutoLoad(
|
||||
|
@ -913,10 +914,10 @@ s32 cellSaveDataUserListAutoLoad(
|
|||
u32 container,
|
||||
vm::ptr<void> userdata)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataUserListAutoLoad(version=%d, userId=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
cellSysutil.Error("cellSaveDataUserListAutoLoad(version=%d, userId=%d, errDialog=%d, setList=*0x%x, setBuf=*0x%x, funcFixed=*0x%x, funcStat=*0x%x, funcFile=*0x%x, container=0x%x, userdata=*0x%x)",
|
||||
version, userId, errDialog, setList, setBuf, funcFixed, funcStat, funcFile, container, userdata);
|
||||
|
||||
return CELL_OK;
|
||||
return savedata_op(CPU, SAVEDATA_OP_LIST_AUTO_LOAD, version, vm::null, errDialog, setList, setBuf, vm::null, funcFixed, funcStat, funcFile, container, 6, userdata, userId, vm::null);
|
||||
}
|
||||
|
||||
s32 cellSaveDataUserFixedDelete(
|
||||
|
@ -937,7 +938,7 @@ s32 cellSaveDataUserFixedDelete(
|
|||
|
||||
void cellSaveDataEnableOverlay(s32 enable)
|
||||
{
|
||||
cellSysutil.Todo("cellSaveDataEnableOverlay(enable=%d)", enable);
|
||||
cellSysutil.Error("cellSaveDataEnableOverlay(enable=%d)", enable);
|
||||
|
||||
return;
|
||||
}
|
||||
|
|
|
@ -3,6 +3,8 @@
|
|||
#include "Emu/System.h"
|
||||
#include "Emu/SysCalls/Modules.h"
|
||||
#include "Emu/SysCalls/CB_FUNC.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/Event.h"
|
||||
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/Cell/SPUThread.h"
|
||||
|
@ -27,16 +29,20 @@ s32 _cellSpursSendSignal(vm::ptr<CellSpursTaskset> taskset, u32 taskID);
|
|||
|
||||
s32 spursCreateLv2EventQueue(vm::ptr<CellSpurs> spurs, u32& queue_id, vm::ptr<u8> port, s32 size, u64 name_u64)
|
||||
{
|
||||
queue_id = event_queue_create(SYS_SYNC_FIFO, SYS_PPU_QUEUE, name_u64, 0, size);
|
||||
if (!queue_id)
|
||||
auto queue = Emu.GetEventManager().MakeEventQueue(SYS_SYNC_FIFO, SYS_PPU_QUEUE, name_u64, 0, size);
|
||||
|
||||
if (!queue) // rough
|
||||
{
|
||||
return CELL_EAGAIN; // rough
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
||||
queue_id = Emu.GetIdManager().add(std::move(queue));
|
||||
|
||||
if (s32 res = spursAttachLv2EventQueue(spurs, queue_id, port, 1, true))
|
||||
{
|
||||
assert(!"spursAttachLv2EventQueue() failed");
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -105,7 +111,7 @@ s32 spursInit(
|
|||
u32 sem;
|
||||
for (u32 i = 0; i < 0x10; i++)
|
||||
{
|
||||
sem = semaphore_create(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuWkl");
|
||||
sem = Emu.GetIdManager().make<lv2_sema_t>(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuWkl");
|
||||
assert(sem && ~sem); // should rollback if semaphore creation failed and return the error
|
||||
spurs->m.wklF1[i].sem = sem;
|
||||
}
|
||||
|
@ -113,12 +119,12 @@ s32 spursInit(
|
|||
{
|
||||
for (u32 i = 0; i < 0x10; i++)
|
||||
{
|
||||
sem = semaphore_create(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuWkl");
|
||||
sem = Emu.GetIdManager().make<lv2_sema_t>(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuWkl");
|
||||
assert(sem && ~sem);
|
||||
spurs->m.wklF2[i].sem = sem;
|
||||
}
|
||||
}
|
||||
sem = semaphore_create(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuPrv");
|
||||
sem = Emu.GetIdManager().make<lv2_sema_t>(0, 1, SYS_SYNC_PRIORITY, *(u64*)"_spuPrv");
|
||||
assert(sem && ~sem);
|
||||
spurs->m.semPrv = sem;
|
||||
spurs->m.unk11 = -1;
|
||||
|
@ -186,7 +192,7 @@ s32 spursInit(
|
|||
}
|
||||
spurs->m.queue = queue;
|
||||
|
||||
u32 port = event_port_create(0);
|
||||
u32 port = Emu.GetIdManager().make<lv2_event_port_t>(SYS_EVENT_PORT_LOCAL, 0);
|
||||
assert(port && ~port);
|
||||
spurs->m.port = port;
|
||||
|
||||
|
|
|
@ -304,7 +304,7 @@ struct CellSpursWorkloadFlag
|
|||
{
|
||||
be_t<u64> unused0;
|
||||
be_t<u32> unused1;
|
||||
atomic_t<u32> flag;
|
||||
atomic_be_t<u32> flag;
|
||||
};
|
||||
|
||||
typedef void(CellSpursShutdownCompletionEventHook)(vm::ptr<CellSpurs>, u32 wid, vm::ptr<void> arg);
|
||||
|
@ -412,7 +412,7 @@ struct CellSpurs
|
|||
vm::bptr<const void, 1, u64> addr; // Address of the executable
|
||||
be_t<u64> arg; // spu argument
|
||||
be_t<u32> size;
|
||||
atomic_t<u8> uniqueId; // The unique id is the same for all workloads with the same addr
|
||||
atomic_be_t<u8> uniqueId; // The unique id is the same for all workloads with the same addr
|
||||
u8 pad[3];
|
||||
u8 priority[8];
|
||||
};
|
||||
|
@ -436,30 +436,30 @@ struct CellSpurs
|
|||
// real data
|
||||
struct
|
||||
{
|
||||
atomic_t<u8> wklReadyCount1[0x10]; // 0x00 Number of SPUs requested by each workload (0..15 wids).
|
||||
atomic_t<u8> wklIdleSpuCountOrReadyCount2[0x10]; // 0x10 SPURS1: Number of idle SPUs requested by each workload (0..15 wids). SPURS2: Number of SPUs requested by each workload (16..31 wids).
|
||||
atomic_be_t<u8> wklReadyCount1[0x10]; // 0x00 Number of SPUs requested by each workload (0..15 wids).
|
||||
atomic_be_t<u8> wklIdleSpuCountOrReadyCount2[0x10]; // 0x10 SPURS1: Number of idle SPUs requested by each workload (0..15 wids). SPURS2: Number of SPUs requested by each workload (16..31 wids).
|
||||
u8 wklCurrentContention[0x10]; // 0x20 Number of SPUs used by each workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
|
||||
u8 wklPendingContention[0x10]; // 0x30 Number of SPUs that are pending to context switch to the workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
|
||||
u8 wklMinContention[0x10]; // 0x40 Min SPUs required for each workload. SPURS1: index = wid. SPURS2: Unused.
|
||||
atomic_t<u8> wklMaxContention[0x10]; // 0x50 Max SPUs that may be allocated to each workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
|
||||
atomic_be_t<u8> wklMaxContention[0x10]; // 0x50 Max SPUs that may be allocated to each workload. SPURS1: index = wid. SPURS2: packed 4-bit data, index = wid % 16, internal index = wid / 16.
|
||||
CellSpursWorkloadFlag wklFlag; // 0x60
|
||||
atomic_t<u16> wklSignal1; // 0x70 (bitset for 0..15 wids)
|
||||
atomic_t<u8> sysSrvMessage; // 0x72
|
||||
atomic_be_t<u16> wklSignal1; // 0x70 (bitset for 0..15 wids)
|
||||
atomic_be_t<u8> sysSrvMessage; // 0x72
|
||||
u8 spuIdling; // 0x73
|
||||
u8 flags1; // 0x74 Type is SpursFlags1
|
||||
u8 sysSrvTraceControl; // 0x75
|
||||
u8 nSpus; // 0x76
|
||||
atomic_t<u8> wklFlagReceiver; // 0x77
|
||||
atomic_t<u16> wklSignal2; // 0x78 (bitset for 16..32 wids)
|
||||
atomic_be_t<u8> wklFlagReceiver; // 0x77
|
||||
atomic_be_t<u16> wklSignal2; // 0x78 (bitset for 16..32 wids)
|
||||
u8 x7A[6]; // 0x7A
|
||||
atomic_t<u8> wklState1[0x10]; // 0x80 SPURS_WKL_STATE_*
|
||||
atomic_be_t<u8> wklState1[0x10]; // 0x80 SPURS_WKL_STATE_*
|
||||
u8 wklStatus1[0x10]; // 0x90
|
||||
u8 wklEvent1[0x10]; // 0xA0
|
||||
atomic_t<u32> wklMskA; // 0xB0 - System service - Available workloads (32*u1)
|
||||
atomic_t<u32> wklMskB; // 0xB4 - System service - Available module id
|
||||
atomic_be_t<u32> wklMskA; // 0xB0 - System service - Available workloads (32*u1)
|
||||
atomic_be_t<u32> wklMskB; // 0xB4 - System service - Available module id
|
||||
u32 xB8; // 0xB8
|
||||
u8 sysSrvExitBarrier; // 0xBC
|
||||
atomic_t<u8> sysSrvMsgUpdateWorkload; // 0xBD
|
||||
atomic_be_t<u8> sysSrvMsgUpdateWorkload; // 0xBD
|
||||
u8 xBE; // 0xBE
|
||||
u8 sysSrvMsgTerminate; // 0xBF
|
||||
u8 sysSrvWorkload[8]; // 0xC0
|
||||
|
@ -471,7 +471,7 @@ struct CellSpurs
|
|||
u8 xCD; // 0xCD
|
||||
u8 sysSrvMsgUpdateTrace; // 0xCE
|
||||
u8 xCF; // 0xCF
|
||||
atomic_t<u8> wklState2[0x10]; // 0xD0 SPURS_WKL_STATE_*
|
||||
atomic_be_t<u8> wklState2[0x10]; // 0xD0 SPURS_WKL_STATE_*
|
||||
u8 wklStatus2[0x10]; // 0xE0
|
||||
u8 wklEvent2[0x10]; // 0xF0
|
||||
_sub_str1 wklF1[0x10]; // 0x100
|
||||
|
@ -495,10 +495,10 @@ struct CellSpurs
|
|||
u8 unknown3[0xD5C - 0xD54];
|
||||
be_t<u32> queue; // 0xD5C - Event queue
|
||||
be_t<u32> port; // 0xD60 - Event port
|
||||
atomic_t<u8> xD64; // 0xD64 - SPURS handler dirty
|
||||
atomic_t<u8> xD65; // 0xD65 - SPURS handler waiting
|
||||
atomic_t<u8> xD66; // 0xD66 - SPURS handler exiting
|
||||
atomic_t<u32> enableEH; // 0xD68
|
||||
atomic_be_t<u8> xD64; // 0xD64 - SPURS handler dirty
|
||||
atomic_be_t<u8> xD65; // 0xD65 - SPURS handler waiting
|
||||
atomic_be_t<u8> xD66; // 0xD66 - SPURS handler exiting
|
||||
atomic_be_t<u32> enableEH; // 0xD68
|
||||
be_t<u32> exception; // 0xD6C
|
||||
sys_spu_image spuImg; // 0xD70
|
||||
be_t<u32> flags; // 0xD80
|
||||
|
@ -509,7 +509,7 @@ struct CellSpurs
|
|||
be_t<u32> unk5; // 0xD9C
|
||||
be_t<u32> revision; // 0xDA0
|
||||
be_t<u32> sdkVersion; // 0xDA4
|
||||
atomic_t<u64> spups; // 0xDA8 - SPU port bits
|
||||
atomic_be_t<u64> spups; // 0xDA8 - SPU port bits
|
||||
sys_lwmutex_t mutex; // 0xDB0
|
||||
sys_lwcond_t cond; // 0xDC8
|
||||
u8 unknown9[0xE00 - 0xDD0];
|
||||
|
@ -528,7 +528,7 @@ struct CellSpurs
|
|||
} c;
|
||||
};
|
||||
|
||||
__forceinline atomic_t<u8>& wklState(const u32 wid)
|
||||
force_inline atomic_be_t<u8>& wklState(const u32 wid)
|
||||
{
|
||||
if (wid & 0x10)
|
||||
{
|
||||
|
@ -540,12 +540,12 @@ struct CellSpurs
|
|||
}
|
||||
}
|
||||
|
||||
__forceinline vm::ptr<sys_lwmutex_t> get_lwmutex()
|
||||
force_inline vm::ptr<sys_lwmutex_t> get_lwmutex()
|
||||
{
|
||||
return vm::ptr<sys_lwmutex_t>::make(vm::get_addr(&m.mutex));
|
||||
}
|
||||
|
||||
__forceinline vm::ptr<sys_lwcond_t> get_lwcond()
|
||||
force_inline vm::ptr<sys_lwcond_t> get_lwcond()
|
||||
{
|
||||
return vm::ptr<sys_lwcond_t>::make(vm::get_addr(&m.cond));
|
||||
}
|
||||
|
|
|
@ -39,11 +39,11 @@ union CellSyncMutex
|
|||
|
||||
struct
|
||||
{
|
||||
atomic_t<u16> release_count;
|
||||
atomic_t<u16> acquire_count;
|
||||
atomic_be_t<u16> release_count;
|
||||
atomic_be_t<u16> acquire_count;
|
||||
};
|
||||
|
||||
atomic_t<sync_t> sync_var;
|
||||
atomic_be_t<sync_t> sync_var;
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncMutex) == 4, "CellSyncMutex: wrong size");
|
||||
|
@ -56,7 +56,7 @@ struct CellSyncBarrier
|
|||
be_t<s16> m_count;
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
atomic_be_t<data_t> data;
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncBarrier) == 4, "CellSyncBarrier: wrong size");
|
||||
|
@ -69,7 +69,7 @@ struct CellSyncRwm
|
|||
be_t<u16> m_writers;
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
atomic_be_t<data_t> data;
|
||||
be_t<u32> m_size;
|
||||
vm::bptr<void, 1, u64> m_buffer;
|
||||
};
|
||||
|
@ -84,7 +84,7 @@ struct CellSyncQueue
|
|||
be_t<u32> m_v2;
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
atomic_be_t<data_t> data;
|
||||
be_t<u32> m_size;
|
||||
be_t<u32> m_depth;
|
||||
vm::bptr<u8, 1, u64> m_buffer;
|
||||
|
@ -143,14 +143,14 @@ struct CellSyncLFQueue
|
|||
|
||||
union // 0x0
|
||||
{
|
||||
atomic_t<pop1_t> pop1;
|
||||
atomic_t<pop3_t> pop3;
|
||||
atomic_be_t<pop1_t> pop1;
|
||||
atomic_be_t<pop3_t> pop3;
|
||||
};
|
||||
|
||||
union // 0x8
|
||||
{
|
||||
atomic_t<push1_t> push1;
|
||||
atomic_t<push3_t> push3;
|
||||
atomic_be_t<push1_t> push1;
|
||||
atomic_be_t<push3_t> push3;
|
||||
};
|
||||
|
||||
be_t<u32> m_size; // 0x10
|
||||
|
@ -159,10 +159,10 @@ struct CellSyncLFQueue
|
|||
u8 m_bs[4]; // 0x20
|
||||
be_t<CellSyncQueueDirection> m_direction; // 0x24
|
||||
be_t<u32> m_v1; // 0x28
|
||||
atomic_t<u32> init; // 0x2C
|
||||
atomic_t<push2_t> push2; // 0x30
|
||||
atomic_be_t<u32> init; // 0x2C
|
||||
atomic_be_t<push2_t> push2; // 0x30
|
||||
be_t<u16> m_hs1[15]; // 0x32
|
||||
atomic_t<pop2_t> pop2; // 0x50
|
||||
atomic_be_t<pop2_t> pop2; // 0x50
|
||||
be_t<u16> m_hs2[15]; // 0x52
|
||||
vm::bptr<void, 1, u64> m_eaSignal; // 0x70
|
||||
be_t<u32> m_v2; // 0x78
|
||||
|
|
|
@ -207,12 +207,11 @@ u32 vdecQueryAttr(CellVdecCodecType type, u32 profile, u32 spec_addr /* may be 0
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 vdecOpen(VideoDecoder* vdec_ptr)
|
||||
void vdecOpen(u32 vdec_id) // TODO: call from the constructor
|
||||
{
|
||||
std::shared_ptr<VideoDecoder> sptr(vdec_ptr);
|
||||
VideoDecoder& vdec = *vdec_ptr;
|
||||
const auto sptr = Emu.GetIdManager().get<VideoDecoder>(vdec_id);
|
||||
|
||||
u32 vdec_id = Emu.GetIdManager().GetNewID(sptr);
|
||||
VideoDecoder& vdec = *sptr;
|
||||
|
||||
vdec.id = vdec_id;
|
||||
|
||||
|
@ -225,9 +224,9 @@ u32 vdecOpen(VideoDecoder* vdec_ptr)
|
|||
vdec.vdecCb->InitRegs();
|
||||
vdec.vdecCb->DoRun();
|
||||
|
||||
thread_t t(fmt::format("VideoDecoder[0x%x] Thread", vdec_id), [vdec_ptr, sptr]()
|
||||
thread_t t(fmt::format("VideoDecoder[0x%x] Thread", vdec_id), [sptr]()
|
||||
{
|
||||
VideoDecoder& vdec = *vdec_ptr;
|
||||
VideoDecoder& vdec = *sptr;
|
||||
VdecTask& task = vdec.task;
|
||||
|
||||
while (true)
|
||||
|
@ -549,8 +548,6 @@ u32 vdecOpen(VideoDecoder* vdec_ptr)
|
|||
|
||||
vdec.is_finished = true;
|
||||
});
|
||||
|
||||
return vdec_id;
|
||||
}
|
||||
|
||||
s32 cellVdecQueryAttr(vm::ptr<const CellVdecType> type, vm::ptr<CellVdecAttr> attr)
|
||||
|
@ -571,7 +568,7 @@ s32 cellVdecOpen(vm::ptr<const CellVdecType> type, vm::ptr<const CellVdecResourc
|
|||
{
|
||||
cellVdec.Warning("cellVdecOpen(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
|
||||
|
||||
*handle = vdecOpen(new VideoDecoder(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg));
|
||||
vdecOpen(*handle = Emu.GetIdManager().make<VideoDecoder>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -580,7 +577,7 @@ s32 cellVdecOpenEx(vm::ptr<const CellVdecTypeEx> type, vm::ptr<const CellVdecRes
|
|||
{
|
||||
cellVdec.Warning("cellVdecOpenEx(type=*0x%x, res=*0x%x, cb=*0x%x, handle=*0x%x)", type, res, cb, handle);
|
||||
|
||||
*handle = vdecOpen(new VideoDecoder(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg));
|
||||
vdecOpen(*handle = Emu.GetIdManager().make<VideoDecoder>(type->codecType, type->profileLevel, res->memAddr, res->memSize, cb->cbFunc, cb->cbArg));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -589,7 +586,7 @@ s32 cellVdecClose(u32 handle)
|
|||
{
|
||||
cellVdec.Warning("cellVdecClose(handle=0x%x)", handle);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -610,7 +607,7 @@ s32 cellVdecClose(u32 handle)
|
|||
}
|
||||
|
||||
if (vdec->vdecCb) Emu.GetCPU().RemoveThread(vdec->vdecCb->GetId());
|
||||
Emu.GetIdManager().RemoveID<VideoDecoder>(handle);
|
||||
Emu.GetIdManager().remove<VideoDecoder>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -618,7 +615,7 @@ s32 cellVdecStartSeq(u32 handle)
|
|||
{
|
||||
cellVdec.Log("cellVdecStartSeq(handle=0x%x)", handle);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -633,7 +630,7 @@ s32 cellVdecEndSeq(u32 handle)
|
|||
{
|
||||
cellVdec.Warning("cellVdecEndSeq(handle=0x%x)", handle);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -648,7 +645,7 @@ s32 cellVdecDecodeAu(u32 handle, CellVdecDecodeMode mode, vm::ptr<const CellVdec
|
|||
{
|
||||
cellVdec.Log("cellVdecDecodeAu(handle=0x%x, mode=%d, auInfo=*0x%x)", handle, mode, auInfo);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec || mode > CELL_VDEC_DEC_MODE_PB_SKIP)
|
||||
{
|
||||
|
@ -678,7 +675,7 @@ s32 cellVdecGetPicture(u32 handle, vm::ptr<const CellVdecPicFormat> format, vm::
|
|||
{
|
||||
cellVdec.Log("cellVdecGetPicture(handle=0x%x, format=*0x%x, outBuff=*0x%x)", handle, format, outBuff);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec || !format)
|
||||
{
|
||||
|
@ -802,7 +799,7 @@ s32 cellVdecGetPicItem(u32 handle, vm::ptr<vm::bptr<CellVdecPicItem>> picItem)
|
|||
{
|
||||
cellVdec.Log("cellVdecGetPicItem(handle=0x%x, picItem=**0x%x)", handle, picItem);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
@ -947,7 +944,7 @@ s32 cellVdecSetFrameRate(u32 handle, CellVdecFrameRate frc)
|
|||
{
|
||||
cellVdec.Log("cellVdecSetFrameRate(handle=0x%x, frc=0x%x)", handle, frc);
|
||||
|
||||
const auto vdec = Emu.GetIdManager().GetIDData<VideoDecoder>(handle);
|
||||
const auto vdec = Emu.GetIdManager().get<VideoDecoder>(handle);
|
||||
|
||||
if (!vdec)
|
||||
{
|
||||
|
|
|
@ -27,22 +27,12 @@ s32 cellVpostQueryAttr(vm::ptr<const CellVpostCfgParam> cfgParam, vm::ptr<CellVp
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 vpostOpen(VpostInstance* data)
|
||||
{
|
||||
std::shared_ptr<VpostInstance> data_ptr(data);
|
||||
u32 id = Emu.GetIdManager().GetNewID(data_ptr);
|
||||
|
||||
cellVpost.Notice("*** Vpost instance created (to_rgba=%d): id = %d", data->to_rgba, id);
|
||||
|
||||
return id;
|
||||
}
|
||||
|
||||
s32 cellVpostOpen(vm::ptr<const CellVpostCfgParam> cfgParam, vm::ptr<const CellVpostResource> resource, vm::ptr<u32> handle)
|
||||
{
|
||||
cellVpost.Warning("cellVpostOpen(cfgParam=*0x%x, resource=*0x%x, handle=*0x%x)", cfgParam, resource, handle);
|
||||
|
||||
// TODO: check values
|
||||
*handle = vpostOpen(new VpostInstance(cfgParam->outPicFmt == CELL_VPOST_PIC_FMT_OUT_RGBA_ILV));
|
||||
*handle = Emu.GetIdManager().make<VpostInstance>(cfgParam->outPicFmt == CELL_VPOST_PIC_FMT_OUT_RGBA_ILV);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -51,7 +41,7 @@ s32 cellVpostOpenEx(vm::ptr<const CellVpostCfgParam> cfgParam, vm::ptr<const Cel
|
|||
cellVpost.Warning("cellVpostOpenEx(cfgParam=*0x%x, resource=*0x%x, handle=*0x%x)", cfgParam, resource, handle);
|
||||
|
||||
// TODO: check values
|
||||
*handle = vpostOpen(new VpostInstance(cfgParam->outPicFmt == CELL_VPOST_PIC_FMT_OUT_RGBA_ILV));
|
||||
*handle = Emu.GetIdManager().make<VpostInstance>(cfgParam->outPicFmt == CELL_VPOST_PIC_FMT_OUT_RGBA_ILV);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -59,14 +49,14 @@ s32 cellVpostClose(u32 handle)
|
|||
{
|
||||
cellVpost.Warning("cellVpostClose(handle=0x%x)", handle);
|
||||
|
||||
const auto vpost = Emu.GetIdManager().GetIDData<VpostInstance>(handle);
|
||||
const auto vpost = Emu.GetIdManager().get<VpostInstance>(handle);
|
||||
|
||||
if (!vpost)
|
||||
{
|
||||
return CELL_VPOST_ERROR_C_ARG_HDL_INVALID;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<VpostInstance>(handle);
|
||||
Emu.GetIdManager().remove<VpostInstance>(handle);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -74,7 +64,7 @@ s32 cellVpostExec(u32 handle, vm::ptr<const u8> inPicBuff, vm::ptr<const CellVpo
|
|||
{
|
||||
cellVpost.Log("cellVpostExec(handle=0x%x, inPicBuff=*0x%x, ctrlParam=*0x%x, outPicBuff=*0x%x, picInfo=*0x%x)", handle, inPicBuff, ctrlParam, outPicBuff, picInfo);
|
||||
|
||||
const auto vpost = Emu.GetIdManager().GetIDData<VpostInstance>(handle);
|
||||
const auto vpost = Emu.GetIdManager().get<VpostInstance>(handle);
|
||||
|
||||
if (!vpost)
|
||||
{
|
||||
|
|
|
@ -112,12 +112,10 @@ s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attri
|
|||
default: sysPrxForUser.Error("sys_lwmutex_create(): invalid protocol (0x%x)", protocol); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<lwmutex_t> lw(new lwmutex_t(protocol, attr->name_u64));
|
||||
|
||||
lwmutex->lock_var = { { lwmutex::free, lwmutex::zero } };
|
||||
lwmutex->attribute = attr->recursive | attr->protocol;
|
||||
lwmutex->recursive_count = 0;
|
||||
lwmutex->sleep_queue = Emu.GetIdManager().GetNewID(lw, TYPE_LWMUTEX);
|
||||
lwmutex->sleep_queue = Emu.GetIdManager().make<lv2_lwmutex_t>(protocol, attr->name_u64);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -362,9 +360,7 @@ s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmut
|
|||
{
|
||||
sysPrxForUser.Warning("sys_lwcond_create(lwcond=*0x%x, lwmutex=*0x%x, attr=*0x%x)", lwcond, lwmutex, attr);
|
||||
|
||||
std::shared_ptr<lwcond_t> cond(new lwcond_t(attr->name_u64));
|
||||
|
||||
lwcond->lwcond_queue = Emu.GetIdManager().GetNewID(cond, TYPE_LWCOND);
|
||||
lwcond->lwcond_queue = Emu.GetIdManager().make<lv2_lwcond_t>(attr->name_u64);
|
||||
lwcond->lwmutex = lwmutex;
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -770,16 +766,14 @@ u32 _sys_heap_create_heap(vm::ptr<const char> name, u32 arg2, u32 arg3, u32 arg4
|
|||
{
|
||||
sysPrxForUser.Warning("_sys_heap_create_heap(name=*0x%x, arg2=0x%x, arg3=0x%x, arg4=0x%x)", name, arg2, arg3, arg4);
|
||||
|
||||
std::shared_ptr<HeapInfo> heap(new HeapInfo(name.get_ptr()));
|
||||
|
||||
return Emu.GetIdManager().GetNewID(heap);
|
||||
return Emu.GetIdManager().make<HeapInfo>(name.get_ptr());
|
||||
}
|
||||
|
||||
s32 _sys_heap_delete_heap(u32 heap)
|
||||
{
|
||||
sysPrxForUser.Warning("_sys_heap_delete_heap(heap=0x%x)", heap);
|
||||
|
||||
Emu.GetIdManager().RemoveID<HeapInfo>(heap);
|
||||
Emu.GetIdManager().remove<HeapInfo>(heap);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -1169,7 +1163,7 @@ s32 sys_process_get_paramsfo(vm::ptr<char> buffer)
|
|||
return _sys_process_get_paramsfo(buffer);
|
||||
}
|
||||
|
||||
void sys_spinlock_initialize(vm::ptr<atomic_t<u32>> lock)
|
||||
void sys_spinlock_initialize(vm::ptr<atomic_be_t<u32>> lock)
|
||||
{
|
||||
sysPrxForUser.Log("sys_spinlock_initialize(lock=*0x%x)", lock);
|
||||
|
||||
|
@ -1177,7 +1171,7 @@ void sys_spinlock_initialize(vm::ptr<atomic_t<u32>> lock)
|
|||
lock->exchange(be_t<u32>::make(0));
|
||||
}
|
||||
|
||||
void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock)
|
||||
void sys_spinlock_lock(vm::ptr<atomic_be_t<u32>> lock)
|
||||
{
|
||||
sysPrxForUser.Log("sys_spinlock_lock(lock=*0x%x)", lock);
|
||||
|
||||
|
@ -1194,7 +1188,7 @@ void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock)
|
|||
}
|
||||
}
|
||||
|
||||
s32 sys_spinlock_trylock(vm::ptr<atomic_t<u32>> lock)
|
||||
s32 sys_spinlock_trylock(vm::ptr<atomic_be_t<u32>> lock)
|
||||
{
|
||||
sysPrxForUser.Log("sys_spinlock_trylock(lock=*0x%x)", lock);
|
||||
|
||||
|
@ -1207,7 +1201,7 @@ s32 sys_spinlock_trylock(vm::ptr<atomic_t<u32>> lock)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
void sys_spinlock_unlock(vm::ptr<atomic_t<u32>> lock)
|
||||
void sys_spinlock_unlock(vm::ptr<atomic_be_t<u32>> lock)
|
||||
{
|
||||
sysPrxForUser.Log("sys_spinlock_unlock(lock=*0x%x)", lock);
|
||||
|
||||
|
@ -1263,7 +1257,7 @@ void sys_ppu_thread_exit(PPUThread& CPU, u64 val)
|
|||
|
||||
std::mutex g_once_mutex;
|
||||
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_t<u32>> once_ctrl, vm::ptr<void()> init)
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_be_t<u32>> once_ctrl, vm::ptr<void()> init)
|
||||
{
|
||||
sysPrxForUser.Warning("sys_ppu_thread_once(once_ctrl=*0x%x, init=*0x%x)", once_ctrl, init);
|
||||
|
||||
|
|
|
@ -21,7 +21,7 @@ namespace ppu_func_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 8, "Invalid function argument type for ARG_GENERAL");
|
||||
|
||||
static __forceinline T get_arg(PPUThread& CPU)
|
||||
static force_inline T get_arg(PPUThread& CPU)
|
||||
{
|
||||
return cast_from_ppu_gpr<T>(CPU.GPR[g_count + 2]);
|
||||
}
|
||||
|
@ -32,7 +32,7 @@ namespace ppu_func_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 8, "Invalid function argument type for ARG_FLOAT");
|
||||
|
||||
static __forceinline T get_arg(PPUThread& CPU)
|
||||
static force_inline T get_arg(PPUThread& CPU)
|
||||
{
|
||||
return static_cast<T>(CPU.FPR[f_count]);
|
||||
}
|
||||
|
@ -43,7 +43,7 @@ namespace ppu_func_detail
|
|||
{
|
||||
static_assert(std::is_same<T, u128>::value, "Invalid function argument type for ARG_VECTOR");
|
||||
|
||||
static __forceinline T get_arg(PPUThread& CPU)
|
||||
static force_inline T get_arg(PPUThread& CPU)
|
||||
{
|
||||
return CPU.VPR[v_count + 1];
|
||||
}
|
||||
|
@ -56,7 +56,7 @@ namespace ppu_func_detail
|
|||
static_assert(v_count <= 12, "TODO: Unsupported stack argument type (vector)");
|
||||
static_assert(sizeof(T) <= 8, "Invalid function argument type for ARG_STACK");
|
||||
|
||||
static __forceinline T get_arg(PPUThread& CPU)
|
||||
static force_inline T get_arg(PPUThread& CPU)
|
||||
{
|
||||
// TODO: check stack argument displacement
|
||||
const u64 res = CPU.GetStackArg(8 + std::max(g_count - 8, 0) + std::max(f_count - 13, 0) + std::max(v_count - 12, 0));
|
||||
|
@ -70,7 +70,7 @@ namespace ppu_func_detail
|
|||
static_assert(type == ARG_GENERAL, "Wrong use of bind_result template");
|
||||
static_assert(sizeof(T) <= 8, "Invalid function result type for ARG_GENERAL");
|
||||
|
||||
static __forceinline void put_result(PPUThread& CPU, const T& result)
|
||||
static force_inline void put_result(PPUThread& CPU, const T& result)
|
||||
{
|
||||
CPU.GPR[3] = cast_to_ppu_gpr<T>(result);
|
||||
}
|
||||
|
@ -81,7 +81,7 @@ namespace ppu_func_detail
|
|||
{
|
||||
static_assert(sizeof(T) <= 8, "Invalid function result type for ARG_FLOAT");
|
||||
|
||||
static __forceinline void put_result(PPUThread& CPU, const T& result)
|
||||
static force_inline void put_result(PPUThread& CPU, const T& result)
|
||||
{
|
||||
CPU.FPR[1] = static_cast<T>(result);
|
||||
}
|
||||
|
@ -92,7 +92,7 @@ namespace ppu_func_detail
|
|||
{
|
||||
static_assert(std::is_same<T, u128>::value, "Invalid function result type for ARG_VECTOR");
|
||||
|
||||
static __forceinline void put_result(PPUThread& CPU, const T& result)
|
||||
static force_inline void put_result(PPUThread& CPU, const T& result)
|
||||
{
|
||||
CPU.VPR[2] = result;
|
||||
}
|
||||
|
@ -109,7 +109,7 @@ namespace ppu_func_detail
|
|||
template<typename T, u32 type_pack>
|
||||
struct bind_arg_packed
|
||||
{
|
||||
static __forceinline T get_arg(PPUThread& CPU)
|
||||
static force_inline T get_arg(PPUThread& CPU)
|
||||
{
|
||||
return bind_arg<T, type_pack, (type_pack >> 8), (type_pack >> 16), (type_pack >> 24)>::get_arg(CPU);
|
||||
}
|
||||
|
@ -118,7 +118,7 @@ namespace ppu_func_detail
|
|||
template <typename RT, typename F, typename Tuple, bool Done, int Total, int... N>
|
||||
struct call_impl
|
||||
{
|
||||
static __forceinline RT call(F f, Tuple && t)
|
||||
static force_inline RT call(F f, Tuple && t)
|
||||
{
|
||||
return call_impl<RT, F, Tuple, Total == 1 + sizeof...(N), Total, N..., sizeof...(N)>::call(f, std::forward<Tuple>(t));
|
||||
}
|
||||
|
@ -127,28 +127,28 @@ namespace ppu_func_detail
|
|||
template <typename RT, typename F, typename Tuple, int Total, int... N>
|
||||
struct call_impl<RT, F, Tuple, true, Total, N...>
|
||||
{
|
||||
static __forceinline RT call(F f, Tuple && t)
|
||||
static force_inline RT call(F f, Tuple && t)
|
||||
{
|
||||
return f(std::get<N>(std::forward<Tuple>(t))...);
|
||||
}
|
||||
};
|
||||
|
||||
template <typename RT, typename F, typename Tuple>
|
||||
__forceinline RT call(F f, Tuple && t)
|
||||
force_inline RT call(F f, Tuple && t)
|
||||
{
|
||||
typedef typename std::decay<Tuple>::type ttype;
|
||||
return ppu_func_detail::call_impl<RT, F, Tuple, 0 == std::tuple_size<ttype>::value, std::tuple_size<ttype>::value>::call(f, std::forward<Tuple>(t));
|
||||
}
|
||||
|
||||
template<u32 g_count, u32 f_count, u32 v_count>
|
||||
__forceinline std::tuple<> iterate(PPUThread& CPU)
|
||||
force_inline std::tuple<> iterate(PPUThread& CPU)
|
||||
{
|
||||
// terminator
|
||||
return std::tuple<>();
|
||||
}
|
||||
|
||||
template<u32 g_count, u32 f_count, u32 v_count, typename T, typename... A>
|
||||
__forceinline std::tuple<T, A...> iterate(PPUThread& CPU)
|
||||
force_inline std::tuple<T, A...> iterate(PPUThread& CPU)
|
||||
{
|
||||
static_assert(!std::is_pointer<T>::value, "Invalid function argument type (pointer)");
|
||||
static_assert(!std::is_reference<T>::value, "Invalid function argument type (reference)");
|
||||
|
@ -223,7 +223,7 @@ namespace ppu_func_detail
|
|||
};
|
||||
}
|
||||
|
||||
template<typename RT, typename... T> __forceinline void call_ppu_func(PPUThread& CPU, RT(*func)(T...))
|
||||
template<typename RT, typename... T> force_inline void call_ppu_func(PPUThread& CPU, RT(*func)(T...))
|
||||
{
|
||||
ppu_func_detail::func_binder<RT, T...>::do_call(CPU, func);
|
||||
}
|
||||
|
|
|
@ -422,7 +422,7 @@ const ppu_func_caller sc_table[1024] =
|
|||
null_func, //462 (0x1CE) UNS
|
||||
null_func,//bind_func(sys_prx_load_module_by_fd) //463 (0x1CF)
|
||||
null_func,//bind_func(sys_prx_load_module_on_memcontainer_by_fd) //464 (0x1D0)
|
||||
null_func,//bind_func(sys_prx_load_module_list) //465 (0x1D1)
|
||||
bind_func(sys_prx_load_module_list), //465 (0x1D1)
|
||||
null_func,//bind_func(sys_prx_load_module_list_on_memcontainer) //466 (0x1D2)
|
||||
null_func,//bind_func(sys_prx_get_ppu_guid) //467 (0x1D3)
|
||||
null_func,//bind_func(sys_...) //468 (0x1D4) ROOT
|
||||
|
@ -437,14 +437,14 @@ const ppu_func_caller sc_table[1024] =
|
|||
|
||||
null_func, null_func, null_func, //477-479 UNS
|
||||
|
||||
null_func,//bind_func(sys_prx_load_module), //480 (0x1E0)
|
||||
null_func,//bind_func(sys_prx_start_module), //481 (0x1E1)
|
||||
null_func,//bind_func(sys_prx_stop_module), //482 (0x1E2)
|
||||
null_func,//bind_func(sys_prx_unload_module), //483 (0x1E3)
|
||||
null_func,//bind_func(sys_prx_register_module), //484 (0x1E4)
|
||||
bind_func(sys_prx_load_module), //480 (0x1E0)
|
||||
bind_func(sys_prx_start_module), //481 (0x1E1)
|
||||
bind_func(sys_prx_stop_module), //482 (0x1E2)
|
||||
bind_func(sys_prx_unload_module), //483 (0x1E3)
|
||||
bind_func(sys_prx_register_module), //484 (0x1E4)
|
||||
bind_func(sys_prx_query_module), //485 (0x1E5)
|
||||
bind_func(sys_prx_register_library), //486 (0x1E6)
|
||||
null_func,//bind_func(sys_prx_unregister_library), //487 (0x1E7)
|
||||
bind_func(sys_prx_unregister_library), //487 (0x1E7)
|
||||
bind_func(sys_prx_link_library), //488 (0x1E8)
|
||||
bind_func(sys_prx_unlink_library), //489 (0x1E9)
|
||||
bind_func(sys_prx_query_library), //490 (0x1EA)
|
||||
|
|
|
@ -125,7 +125,7 @@ u32 sleep_queue_t::signal(u32 protocol)
|
|||
if (m_waiting.size())
|
||||
{
|
||||
res = m_waiting[0];
|
||||
if (!Emu.GetIdManager().CheckID<CPUThread>(res))
|
||||
if (!Emu.GetIdManager().check_id<CPUThread>(res))
|
||||
{
|
||||
LOG_ERROR(HLE, "sleep_queue_t['%s']::signal(SYS_SYNC_FIFO) failed: invalid thread (%d)", m_name.c_str(), res);
|
||||
Emu.Pause();
|
||||
|
|
|
@ -19,7 +19,7 @@ s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribu
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<mutex_t>(mutex_id);
|
||||
const auto mutex = std::move(Emu.GetIdManager().get<lv2_mutex_t>(mutex_id));
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -37,9 +37,7 @@ s32 sys_cond_create(vm::ptr<u32> cond_id, u32 mutex_id, vm::ptr<sys_cond_attribu
|
|||
throw __FUNCTION__;
|
||||
}
|
||||
|
||||
std::shared_ptr<cond_t> cond(new cond_t(mutex, attr->name_u64));
|
||||
|
||||
*cond_id = Emu.GetIdManager().GetNewID(cond, TYPE_COND);
|
||||
*cond_id = Emu.GetIdManager().make<lv2_cond_t>(mutex, attr->name_u64);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -50,7 +48,7 @@ s32 sys_cond_destroy(u32 cond_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(cond_id);
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
|
@ -67,7 +65,7 @@ s32 sys_cond_destroy(u32 cond_id)
|
|||
throw __FUNCTION__;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<cond_t>(cond_id);
|
||||
Emu.GetIdManager().remove<lv2_cond_t>(cond_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -78,7 +76,7 @@ s32 sys_cond_signal(u32 cond_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(cond_id);
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
|
@ -101,7 +99,7 @@ s32 sys_cond_signal_all(u32 cond_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(cond_id);
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
|
@ -124,14 +122,14 @@ s32 sys_cond_signal_to(u32 cond_id, u32 thread_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(cond_id);
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!Emu.GetIdManager().CheckID<CPUThread>(thread_id))
|
||||
if (!Emu.GetIdManager().check_id<CPUThread>(thread_id))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
@ -158,7 +156,7 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<cond_t>(cond_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(cond_id);
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
struct mutex_t;
|
||||
struct lv2_mutex_t;
|
||||
|
||||
struct sys_cond_attribute_t
|
||||
{
|
||||
|
@ -15,10 +15,10 @@ struct sys_cond_attribute_t
|
|||
};
|
||||
};
|
||||
|
||||
struct cond_t
|
||||
struct lv2_cond_t
|
||||
{
|
||||
const u64 name;
|
||||
const std::shared_ptr<mutex_t> mutex; // associated mutex
|
||||
const std::shared_ptr<lv2_mutex_t> mutex; // associated mutex
|
||||
|
||||
std::atomic<u32> signaled;
|
||||
|
||||
|
@ -26,15 +26,17 @@ struct cond_t
|
|||
std::condition_variable cv;
|
||||
std::unordered_set<u32> waiters;
|
||||
|
||||
cond_t(const std::shared_ptr<mutex_t>& mutex, u64 name)
|
||||
lv2_cond_t(const std::shared_ptr<lv2_mutex_t>& mutex, u64 name)
|
||||
: mutex(mutex)
|
||||
, name(name)
|
||||
, signaled(0)
|
||||
, waiters(0)
|
||||
//, waiters(0)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_cond_t, 0x86); // SYS_COND_OBJECT
|
||||
|
||||
class PPUThread;
|
||||
|
||||
// SysCalls
|
||||
|
|
|
@ -13,15 +13,6 @@
|
|||
|
||||
SysCallBase sys_event("sys_event");
|
||||
|
||||
u32 event_queue_create(u32 protocol, s32 type, u64 name_u64, u64 event_queue_key, s32 size)
|
||||
{
|
||||
std::shared_ptr<event_queue_t> queue(new event_queue_t(protocol, type, name_u64, event_queue_key, size));
|
||||
|
||||
Emu.GetEventManager().RegisterKey(queue, event_queue_key);
|
||||
|
||||
return Emu.GetIdManager().GetNewID(queue, TYPE_EVENT_QUEUE);
|
||||
}
|
||||
|
||||
s32 sys_event_queue_create(vm::ptr<u32> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, s32 size)
|
||||
{
|
||||
sys_event.Warning("sys_event_queue_create(equeue_id=*0x%x, attr=*0x%x, event_queue_key=0x%llx, size=%d)", equeue_id, attr, event_queue_key, size);
|
||||
|
@ -49,14 +40,14 @@ s32 sys_event_queue_create(vm::ptr<u32> equeue_id, vm::ptr<sys_event_queue_attr>
|
|||
default: sys_event.Error("sys_event_queue_create(): unknown type (0x%x)", type); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<event_queue_t> queue(new event_queue_t(protocol, type, attr->name_u64, event_queue_key, size));
|
||||
auto queue = Emu.GetEventManager().MakeEventQueue(protocol, type, attr->name_u64, event_queue_key, size);
|
||||
|
||||
if (!Emu.GetEventManager().RegisterKey(queue, event_queue_key))
|
||||
if (!queue)
|
||||
{
|
||||
return CELL_EEXIST;
|
||||
}
|
||||
|
||||
*equeue_id = Emu.GetIdManager().GetNewID(queue, TYPE_EVENT_QUEUE);
|
||||
*equeue_id = Emu.GetIdManager().add(std::move(queue));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -67,7 +58,7 @@ s32 sys_event_queue_destroy(u32 equeue_id, s32 mode)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(equeue_id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(equeue_id);
|
||||
|
||||
if (!queue)
|
||||
{
|
||||
|
@ -95,7 +86,7 @@ s32 sys_event_queue_destroy(u32 equeue_id, s32 mode)
|
|||
}
|
||||
|
||||
Emu.GetEventManager().UnregisterKey(queue->key);
|
||||
Emu.GetIdManager().RemoveID<event_queue_t>(equeue_id);
|
||||
Emu.GetIdManager().remove<lv2_event_queue_t>(equeue_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -106,7 +97,7 @@ s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_t> event_array,
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(equeue_id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(equeue_id);
|
||||
|
||||
if (!queue)
|
||||
{
|
||||
|
@ -146,7 +137,7 @@ s32 sys_event_queue_receive(PPUThread& CPU, u32 equeue_id, vm::ptr<sys_event_t>
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(equeue_id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(equeue_id);
|
||||
|
||||
if (!queue)
|
||||
{
|
||||
|
@ -203,7 +194,7 @@ s32 sys_event_queue_drain(u32 equeue_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(equeue_id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(equeue_id);
|
||||
|
||||
if (!queue)
|
||||
{
|
||||
|
@ -215,13 +206,6 @@ s32 sys_event_queue_drain(u32 equeue_id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 event_port_create(u64 name)
|
||||
{
|
||||
std::shared_ptr<event_port_t> eport(new event_port_t(SYS_EVENT_PORT_LOCAL, name));
|
||||
|
||||
return Emu.GetIdManager().GetNewID(eport, TYPE_EVENT_PORT);
|
||||
}
|
||||
|
||||
s32 sys_event_port_create(vm::ptr<u32> eport_id, s32 port_type, u64 name)
|
||||
{
|
||||
sys_event.Warning("sys_event_port_create(eport_id=*0x%x, port_type=%d, name=0x%llx)", eport_id, port_type, name);
|
||||
|
@ -232,9 +216,7 @@ s32 sys_event_port_create(vm::ptr<u32> eport_id, s32 port_type, u64 name)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<event_port_t> eport(new event_port_t(port_type, name));
|
||||
|
||||
*eport_id = Emu.GetIdManager().GetNewID(eport, TYPE_EVENT_PORT);
|
||||
*eport_id = Emu.GetIdManager().make<lv2_event_port_t>(port_type, name);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -245,7 +227,7 @@ s32 sys_event_port_destroy(u32 eport_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto port = Emu.GetIdManager().GetIDData<event_port_t>(eport_id);
|
||||
const auto port = Emu.GetIdManager().get<lv2_event_port_t>(eport_id);
|
||||
|
||||
if (!port)
|
||||
{
|
||||
|
@ -257,7 +239,7 @@ s32 sys_event_port_destroy(u32 eport_id)
|
|||
return CELL_EISCONN;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<event_port_t>(eport_id);
|
||||
Emu.GetIdManager().remove<lv2_event_port_t>(eport_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -268,8 +250,8 @@ s32 sys_event_port_connect_local(u32 eport_id, u32 equeue_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto port = Emu.GetIdManager().GetIDData<event_port_t>(eport_id);
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(equeue_id);
|
||||
const auto port = Emu.GetIdManager().get<lv2_event_port_t>(eport_id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(equeue_id);
|
||||
|
||||
if (!port || !queue)
|
||||
{
|
||||
|
@ -297,7 +279,7 @@ s32 sys_event_port_disconnect(u32 eport_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto port = Emu.GetIdManager().GetIDData<event_port_t>(eport_id);
|
||||
const auto port = Emu.GetIdManager().get<lv2_event_port_t>(eport_id);
|
||||
|
||||
if (!port)
|
||||
{
|
||||
|
@ -324,7 +306,7 @@ s32 sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto port = Emu.GetIdManager().GetIDData<event_port_t>(eport_id);
|
||||
const auto port = Emu.GetIdManager().get<lv2_event_port_t>(eport_id);
|
||||
|
||||
if (!port)
|
||||
{
|
||||
|
|
|
@ -69,7 +69,7 @@ struct event_t
|
|||
}
|
||||
};
|
||||
|
||||
struct event_queue_t
|
||||
struct lv2_event_queue_t
|
||||
{
|
||||
const u32 protocol;
|
||||
const s32 type;
|
||||
|
@ -84,7 +84,7 @@ struct event_queue_t
|
|||
std::condition_variable cv;
|
||||
std::atomic<u32> waiters;
|
||||
|
||||
event_queue_t(u32 protocol, s32 type, u64 name, u64 key, s32 size)
|
||||
lv2_event_queue_t(u32 protocol, s32 type, u64 name, u64 key, s32 size)
|
||||
: protocol(protocol)
|
||||
, type(type)
|
||||
, name(name)
|
||||
|
@ -108,24 +108,24 @@ struct event_queue_t
|
|||
}
|
||||
};
|
||||
|
||||
struct event_port_t
|
||||
REG_ID_TYPE(lv2_event_queue_t, 0x8D); // SYS_EVENT_QUEUE_OBJECT
|
||||
|
||||
struct lv2_event_port_t
|
||||
{
|
||||
const s32 type; // port type, must be SYS_EVENT_PORT_LOCAL
|
||||
const u64 name; // passed as event source (generated from id and process id if not set)
|
||||
std::weak_ptr<event_queue_t> queue; // event queue this port is connected to
|
||||
std::weak_ptr<lv2_event_queue_t> queue; // event queue this port is connected to
|
||||
|
||||
event_port_t(s32 type, u64 name)
|
||||
lv2_event_port_t(s32 type, u64 name)
|
||||
: type(type)
|
||||
, name(name)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
class PPUThread;
|
||||
REG_ID_TYPE(lv2_event_port_t, 0x0E); // SYS_EVENT_PORT_OBJECT
|
||||
|
||||
// Aux
|
||||
u32 event_port_create(u64 name);
|
||||
u32 event_queue_create(u32 protocol, s32 type, u64 name_u64, u64 event_queue_key, s32 size);
|
||||
class PPUThread;
|
||||
|
||||
// SysCalls
|
||||
s32 sys_event_queue_create(vm::ptr<u32> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, s32 size);
|
||||
|
|
|
@ -47,9 +47,7 @@ s32 sys_event_flag_create(vm::ptr<u32> id, vm::ptr<sys_event_flag_attr> attr, u6
|
|||
default: sys_event_flag.Error("sys_event_flag_create(): unknown type (0x%x)", attr->type); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<event_flag_t> ef(new event_flag_t(init, protocol, type, attr->name_u64));
|
||||
|
||||
*id = Emu.GetIdManager().GetNewID(ef, TYPE_EVENT_FLAG);
|
||||
*id = Emu.GetIdManager().make<lv2_event_flag_t>(init, protocol, type, attr->name_u64);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -60,7 +58,7 @@ s32 sys_event_flag_destroy(u32 id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -72,7 +70,7 @@ s32 sys_event_flag_destroy(u32 id)
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<event_flag_t>(id);
|
||||
Emu.GetIdManager().remove<lv2_event_flag_t>(id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -105,7 +103,7 @@ s32 sys_event_flag_wait(u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 t
|
|||
default: return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -212,7 +210,7 @@ s32 sys_event_flag_trywait(u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result)
|
|||
default: return CELL_EINVAL;
|
||||
}
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -252,7 +250,7 @@ s32 sys_event_flag_set(u32 id, u64 bitptn)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -280,7 +278,7 @@ s32 sys_event_flag_clear(u32 id, u64 bitptn)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -308,7 +306,7 @@ s32 sys_event_flag_cancel(u32 id, vm::ptr<u32> num)
|
|||
*num = 0;
|
||||
}
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
@ -344,7 +342,7 @@ s32 sys_event_flag_get(u32 id, vm::ptr<u64> flags)
|
|||
return CELL_EFAULT;
|
||||
}
|
||||
|
||||
const auto ef = Emu.GetIdManager().GetIDData<event_flag_t>(id);
|
||||
const auto ef = Emu.GetIdManager().get<lv2_event_flag_t>(id);
|
||||
|
||||
if (!ef)
|
||||
{
|
||||
|
|
|
@ -27,7 +27,7 @@ struct sys_event_flag_attr
|
|||
};
|
||||
};
|
||||
|
||||
struct event_flag_t
|
||||
struct lv2_event_flag_t
|
||||
{
|
||||
const u32 protocol;
|
||||
const s32 type;
|
||||
|
@ -40,7 +40,7 @@ struct event_flag_t
|
|||
std::condition_variable cv;
|
||||
std::atomic<u32> waiters;
|
||||
|
||||
event_flag_t(u64 pattern, u32 protocol, s32 type, u64 name)
|
||||
lv2_event_flag_t(u64 pattern, u32 protocol, s32 type, u64 name)
|
||||
: flags(pattern)
|
||||
, protocol(protocol)
|
||||
, type(type)
|
||||
|
@ -51,6 +51,8 @@ struct event_flag_t
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_event_flag_t, 0x98); // SYS_EVENT_FLAG_OBJECT
|
||||
|
||||
s32 sys_event_flag_create(vm::ptr<u32> id, vm::ptr<sys_event_flag_attr> attr, u64 init);
|
||||
s32 sys_event_flag_destroy(u32 id);
|
||||
s32 sys_event_flag_wait(u32 id, u64 bitptn, u32 mode, vm::ptr<u64> result, u64 timeout);
|
||||
|
|
|
@ -109,9 +109,7 @@ s32 sys_fs_open(vm::ptr<const char> path, s32 flags, vm::ptr<u32> fd, s32 mode,
|
|||
return CELL_FS_ENOENT;
|
||||
}
|
||||
|
||||
std::shared_ptr<fs_file_t> file_handler(new fs_file_t(file, mode, flags));
|
||||
|
||||
*fd = Emu.GetIdManager().GetNewID(file_handler, TYPE_FS_FILE);
|
||||
*fd = Emu.GetIdManager().make<lv2_file_t>(file, mode, flags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -120,7 +118,7 @@ s32 sys_fs_read(u32 fd, vm::ptr<void> buf, u64 nbytes, vm::ptr<u64> nread)
|
|||
{
|
||||
sys_fs.Log("sys_fs_read(fd=0x%x, buf=0x%x, nbytes=0x%llx, nread=0x%x)", fd, buf, nbytes, nread);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file || file->flags & CELL_FS_O_WRONLY)
|
||||
{
|
||||
|
@ -138,7 +136,7 @@ s32 sys_fs_write(u32 fd, vm::ptr<const void> buf, u64 nbytes, vm::ptr<u64> nwrit
|
|||
{
|
||||
sys_fs.Log("sys_fs_write(fd=0x%x, buf=*0x%x, nbytes=0x%llx, nwrite=*0x%x)", fd, buf, nbytes, nwrite);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
|
||||
{
|
||||
|
@ -158,7 +156,7 @@ s32 sys_fs_close(u32 fd)
|
|||
{
|
||||
sys_fs.Log("sys_fs_close(fd=0x%x)", fd);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -167,7 +165,7 @@ s32 sys_fs_close(u32 fd)
|
|||
|
||||
// TODO: return CELL_FS_EBUSY if locked
|
||||
|
||||
Emu.GetIdManager().RemoveID<fs_file_t>(fd);
|
||||
Emu.GetIdManager().remove<lv2_file_t>(fd);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -177,7 +175,7 @@ s32 sys_fs_opendir(vm::ptr<const char> path, vm::ptr<u32> fd)
|
|||
sys_fs.Warning("sys_fs_opendir(path=*0x%x, fd=*0x%x)", path, fd);
|
||||
sys_fs.Warning("*** path = '%s'", path.get_ptr());
|
||||
|
||||
std::shared_ptr<vfsDirBase> directory(Emu.GetVFS().OpenDir(path.get_ptr()));
|
||||
std::shared_ptr<lv2_dir_t> directory(Emu.GetVFS().OpenDir(path.get_ptr()));
|
||||
|
||||
if (!directory || !directory->IsOpened())
|
||||
{
|
||||
|
@ -185,7 +183,7 @@ s32 sys_fs_opendir(vm::ptr<const char> path, vm::ptr<u32> fd)
|
|||
return CELL_FS_ENOENT;
|
||||
}
|
||||
|
||||
*fd = Emu.GetIdManager().GetNewID(directory, TYPE_FS_DIR);
|
||||
*fd = Emu.GetIdManager().add(std::move(directory));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -194,7 +192,7 @@ s32 sys_fs_readdir(u32 fd, vm::ptr<CellFsDirent> dir, vm::ptr<u64> nread)
|
|||
{
|
||||
sys_fs.Warning("sys_fs_readdir(fd=0x%x, dir=*0x%x, nread=*0x%x)", fd, dir, nread);
|
||||
|
||||
const auto directory = Emu.GetIdManager().GetIDData<vfsDirBase>(fd);
|
||||
const auto directory = Emu.GetIdManager().get<lv2_dir_t>(fd);
|
||||
|
||||
if (!directory)
|
||||
{
|
||||
|
@ -222,14 +220,14 @@ s32 sys_fs_closedir(u32 fd)
|
|||
{
|
||||
sys_fs.Log("sys_fs_closedir(fd=0x%x)", fd);
|
||||
|
||||
const auto directory = Emu.GetIdManager().GetIDData<vfsDirBase>(fd);
|
||||
const auto directory = Emu.GetIdManager().get<lv2_dir_t>(fd);
|
||||
|
||||
if (!directory)
|
||||
{
|
||||
return CELL_FS_EBADF;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<vfsDirBase>(fd);
|
||||
Emu.GetIdManager().remove<lv2_dir_t>(fd);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -271,7 +269,7 @@ s32 sys_fs_fstat(u32 fd, vm::ptr<CellFsStat> sb)
|
|||
{
|
||||
sys_fs.Warning("sys_fs_fstat(fd=0x%x, sb=*0x%x)", fd, sb);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -420,7 +418,7 @@ s32 sys_fs_lseek(u32 fd, s64 offset, s32 whence, vm::ptr<u64> pos)
|
|||
return CELL_FS_EINVAL;
|
||||
}
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -438,7 +436,7 @@ s32 sys_fs_fget_block_size(u32 fd, vm::ptr<u64> sector_size, vm::ptr<u64> block_
|
|||
{
|
||||
sys_fs.Todo("sys_fs_fget_block_size(fd=0x%x, sector_size=*0x%x, block_size=*0x%x, arg4=*0x%x, arg5=*0x%x)", fd, sector_size, block_size, arg4, arg5);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file)
|
||||
{
|
||||
|
@ -486,7 +484,7 @@ s32 sys_fs_ftruncate(u32 fd, u64 size)
|
|||
{
|
||||
sys_fs.Warning("sys_fs_ftruncate(fd=0x%x, size=0x%llx)", fd, size);
|
||||
|
||||
const auto file = Emu.GetIdManager().GetIDData<fs_file_t>(fd);
|
||||
const auto file = Emu.GetIdManager().get<lv2_file_t>(fd);
|
||||
|
||||
if (!file || !(file->flags & CELL_FS_O_ACCMODE))
|
||||
{
|
||||
|
|
|
@ -145,6 +145,8 @@ struct CellFsUtimbuf
|
|||
|
||||
#pragma pack(pop)
|
||||
|
||||
struct vfsStream;
|
||||
|
||||
// Stream Support Status (st_status)
|
||||
enum : u32
|
||||
{
|
||||
|
@ -163,7 +165,7 @@ struct fs_st_cb_rec_t
|
|||
u32 pad;
|
||||
};
|
||||
|
||||
struct fs_file_t
|
||||
struct lv2_file_t
|
||||
{
|
||||
const std::shared_ptr<vfsStream> file;
|
||||
const s32 mode;
|
||||
|
@ -172,7 +174,7 @@ struct fs_file_t
|
|||
std::mutex mutex;
|
||||
std::condition_variable cv;
|
||||
|
||||
atomic_le_t<u32> st_status;
|
||||
atomic<u32> st_status;
|
||||
|
||||
u64 st_ringbuf_size;
|
||||
u64 st_block_size;
|
||||
|
@ -186,9 +188,9 @@ struct fs_file_t
|
|||
std::atomic<u64> st_total_read;
|
||||
std::atomic<u64> st_copied;
|
||||
|
||||
atomic_le_t<fs_st_cb_rec_t> st_callback;
|
||||
atomic<fs_st_cb_rec_t> st_callback;
|
||||
|
||||
fs_file_t(std::shared_ptr<vfsStream>& file, s32 mode, s32 flags)
|
||||
lv2_file_t(const std::shared_ptr<vfsStream>& file, s32 mode, s32 flags)
|
||||
: file(file)
|
||||
, mode(mode)
|
||||
, flags(flags)
|
||||
|
@ -198,6 +200,14 @@ struct fs_file_t
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_file_t, 0x73); // SYS_FS_FD_OBJECT
|
||||
|
||||
class vfsDirBase;
|
||||
|
||||
using lv2_dir_t = vfsDirBase;
|
||||
|
||||
REG_ID_TYPE(lv2_dir_t, 0x73); // SYS_FS_FD_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_fs_test(u32 arg1, u32 arg2, vm::ptr<u32> arg3, u32 arg4, vm::ptr<char> arg5, u32 arg6);
|
||||
s32 sys_fs_open(vm::ptr<const char> path, s32 flags, vm::ptr<u32> fd, s32 mode, vm::ptr<const void> arg, u64 size);
|
||||
|
|
|
@ -78,8 +78,6 @@ s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread,
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
std::shared_ptr<interrupt_handler_t> handler(new interrupt_handler_t{ it });
|
||||
|
||||
PPUThread& ppu = static_cast<PPUThread&>(*it);
|
||||
|
||||
{
|
||||
|
@ -127,7 +125,7 @@ s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread,
|
|||
};
|
||||
}
|
||||
|
||||
*ih = Emu.GetIdManager().GetNewID(handler, TYPE_INTR_SERVICE_HANDLE);
|
||||
*ih = Emu.GetIdManager().make<lv2_int_handler_t>(it);
|
||||
ppu.Exec();
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -137,7 +135,7 @@ s32 _sys_interrupt_thread_disestablish(u32 ih, vm::ptr<u64> r13)
|
|||
{
|
||||
sys_interrupt.Todo("_sys_interrupt_thread_disestablish(ih=0x%x, r13=*0x%x)", ih, r13);
|
||||
|
||||
const auto handler = Emu.GetIdManager().GetIDData<interrupt_handler_t>(ih);
|
||||
const auto handler = Emu.GetIdManager().get<lv2_int_handler_t>(ih);
|
||||
|
||||
if (!handler)
|
||||
{
|
||||
|
|
|
@ -2,11 +2,18 @@
|
|||
|
||||
class PPUThread;
|
||||
|
||||
struct interrupt_handler_t
|
||||
struct lv2_int_handler_t
|
||||
{
|
||||
std::shared_ptr<CPUThread> handler;
|
||||
|
||||
lv2_int_handler_t(const std::shared_ptr<CPUThread>& handler)
|
||||
: handler(handler)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_int_handler_t, 0x0B); // SYS_INTR_SERVICE_HANDLE_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_interrupt_tag_destroy(u32 intrtag);
|
||||
s32 sys_interrupt_thread_establish(vm::ptr<u32> ih, u32 intrtag, u64 intrthread, u64 arg);
|
||||
|
|
|
@ -15,19 +15,15 @@ SysCallBase sys_lwcond("sys_lwcond");
|
|||
|
||||
void lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name)
|
||||
{
|
||||
std::shared_ptr<lwcond_t> cond(new lwcond_t(name));
|
||||
|
||||
lwcond.lwmutex.set(vm::get_addr(&lwmutex));
|
||||
lwcond.lwcond_queue = Emu.GetIdManager().GetNewID(cond, TYPE_LWCOND);
|
||||
lwcond.lwcond_queue = Emu.GetIdManager().make<lv2_lwcond_t>(name);
|
||||
}
|
||||
|
||||
s32 _sys_lwcond_create(vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name, u32 arg5)
|
||||
{
|
||||
sys_lwcond.Warning("_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=0x%x, control=*0x%x, name=0x%llx, arg5=0x%x)", lwcond_id, lwmutex_id, control, name, arg5);
|
||||
|
||||
std::shared_ptr<lwcond_t> cond(new lwcond_t(name));
|
||||
|
||||
*lwcond_id = Emu.GetIdManager().GetNewID(cond, TYPE_LWCOND);
|
||||
*lwcond_id = Emu.GetIdManager().make<lv2_lwcond_t>(name);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -38,7 +34,7 @@ s32 _sys_lwcond_destroy(u32 lwcond_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<lwcond_t>(lwcond_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_lwcond_t>(lwcond_id);
|
||||
|
||||
if (!cond)
|
||||
{
|
||||
|
@ -50,7 +46,7 @@ s32 _sys_lwcond_destroy(u32 lwcond_id)
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<lwcond_t>(lwcond_id);
|
||||
Emu.GetIdManager().remove<lv2_lwcond_t>(lwcond_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -61,8 +57,8 @@ s32 _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mod
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<lwcond_t>(lwcond_id);
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_lwcond_t>(lwcond_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!cond || (lwmutex_id && !mutex))
|
||||
{
|
||||
|
@ -122,8 +118,8 @@ s32 _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<lwcond_t>(lwcond_id);
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_lwcond_t>(lwcond_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!cond || (lwmutex_id && !mutex))
|
||||
{
|
||||
|
@ -169,8 +165,8 @@ s32 _sys_lwcond_queue_wait(PPUThread& CPU, u32 lwcond_id, u32 lwmutex_id, u64 ti
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto cond = Emu.GetIdManager().GetIDData<lwcond_t>(lwcond_id);
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_lwcond_t>(lwcond_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!cond || !mutex)
|
||||
{
|
||||
|
|
|
@ -17,7 +17,7 @@ struct sys_lwcond_t
|
|||
be_t<u32> lwcond_queue; // lwcond pseudo-id
|
||||
};
|
||||
|
||||
struct lwcond_t
|
||||
struct lv2_lwcond_t
|
||||
{
|
||||
const u64 name;
|
||||
|
||||
|
@ -28,15 +28,17 @@ struct lwcond_t
|
|||
std::condition_variable cv;
|
||||
std::unordered_set<u32> waiters;
|
||||
|
||||
lwcond_t(u64 name)
|
||||
lv2_lwcond_t(u64 name)
|
||||
: name(name)
|
||||
, signaled1(0)
|
||||
, signaled2(0)
|
||||
, waiters(0)
|
||||
//, waiters(0)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_lwcond_t, 0x97); // SYS_LWCOND_OBJECT
|
||||
|
||||
// Aux
|
||||
void lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name);
|
||||
|
||||
|
|
|
@ -14,12 +14,10 @@ SysCallBase sys_lwmutex("sys_lwmutex");
|
|||
|
||||
void lwmutex_create(sys_lwmutex_t& lwmutex, bool recursive, u32 protocol, u64 name)
|
||||
{
|
||||
std::shared_ptr<lwmutex_t> mutex(new lwmutex_t(protocol, name));
|
||||
|
||||
lwmutex.lock_var = { { lwmutex::free, lwmutex::zero } };
|
||||
lwmutex.attribute = protocol | (recursive ? SYS_SYNC_RECURSIVE : SYS_SYNC_NOT_RECURSIVE);
|
||||
lwmutex.recursive_count = 0;
|
||||
lwmutex.sleep_queue = Emu.GetIdManager().GetNewID(mutex, TYPE_LWMUTEX);
|
||||
lwmutex.sleep_queue = Emu.GetIdManager().make<lv2_lwmutex_t>(protocol, name);
|
||||
}
|
||||
|
||||
s32 _sys_lwmutex_create(vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmutex_t> control, u32 arg4, u64 name, u32 arg6)
|
||||
|
@ -39,9 +37,7 @@ s32 _sys_lwmutex_create(vm::ptr<u32> lwmutex_id, u32 protocol, vm::ptr<sys_lwmut
|
|||
sys_lwmutex.Error("_sys_lwmutex_create(): unknown parameters (arg4=0x%x, arg6=0x%x)", arg4, arg6);
|
||||
}
|
||||
|
||||
std::shared_ptr<lwmutex_t> mutex(new lwmutex_t(protocol, name));
|
||||
|
||||
*lwmutex_id = Emu.GetIdManager().GetNewID(mutex, TYPE_LWMUTEX);
|
||||
*lwmutex_id = Emu.GetIdManager().make<lv2_lwmutex_t>(protocol, name);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -52,7 +48,7 @@ s32 _sys_lwmutex_destroy(u32 lwmutex_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -64,7 +60,7 @@ s32 _sys_lwmutex_destroy(u32 lwmutex_id)
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<lwmutex_t>(lwmutex_id);
|
||||
Emu.GetIdManager().remove<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -77,7 +73,7 @@ s32 _sys_lwmutex_lock(u32 lwmutex_id, u64 timeout)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -117,7 +113,7 @@ s32 _sys_lwmutex_trylock(u32 lwmutex_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -140,7 +136,7 @@ s32 _sys_lwmutex_unlock(u32 lwmutex_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<lwmutex_t>(lwmutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_lwmutex_t>(lwmutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
|
|
@ -47,15 +47,15 @@ struct sys_lwmutex_t
|
|||
|
||||
union
|
||||
{
|
||||
atomic_t<sync_var_t> lock_var;
|
||||
atomic_be_t<sync_var_t> lock_var;
|
||||
|
||||
struct
|
||||
{
|
||||
atomic_t<u32> owner;
|
||||
atomic_t<u32> waiter;
|
||||
atomic_be_t<u32> owner;
|
||||
atomic_be_t<u32> waiter;
|
||||
};
|
||||
|
||||
atomic_t<u64> all_info;
|
||||
atomic_be_t<u64> all_info;
|
||||
};
|
||||
|
||||
be_t<u32> attribute;
|
||||
|
@ -64,7 +64,7 @@ struct sys_lwmutex_t
|
|||
be_t<u32> pad;
|
||||
};
|
||||
|
||||
struct lwmutex_t
|
||||
struct lv2_lwmutex_t
|
||||
{
|
||||
const u32 protocol;
|
||||
const u64 name;
|
||||
|
@ -76,7 +76,7 @@ struct lwmutex_t
|
|||
std::condition_variable cv;
|
||||
std::atomic<u32> waiters;
|
||||
|
||||
lwmutex_t(u32 protocol, u64 name)
|
||||
lv2_lwmutex_t(u32 protocol, u64 name)
|
||||
: protocol(protocol)
|
||||
, name(name)
|
||||
, signaled(0)
|
||||
|
@ -85,6 +85,8 @@ struct lwmutex_t
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_lwmutex_t, 0x95); // SYS_LWMUTEX_OBJECT
|
||||
|
||||
// Aux
|
||||
void lwmutex_create(sys_lwmutex_t& lwmutex, bool recursive, u32 protocol, u64 name);
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ s32 sys_memory_allocate_from_container(u32 size, u32 cid, u32 flags, u32 alloc_a
|
|||
sys_memory.Log("sys_memory_allocate_from_container(size=0x%x, cid=0x%x, flags=0x%x)", size, cid, flags);
|
||||
|
||||
// Check if this container ID is valid.
|
||||
const auto ct = Emu.GetIdManager().GetIDData<MemoryContainerInfo>(cid);
|
||||
const auto ct = Emu.GetIdManager().get<MemoryContainerInfo>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
@ -124,11 +124,7 @@ s32 sys_memory_container_create(vm::ptr<u32> cid, u32 yield_size)
|
|||
return CELL_ENOMEM;
|
||||
|
||||
// Wrap the allocated memory in a memory container.
|
||||
std::shared_ptr<MemoryContainerInfo> ct(new MemoryContainerInfo(addr, yield_size));
|
||||
u32 id = Emu.GetIdManager().GetNewID(ct, TYPE_MEM);
|
||||
*cid = id;
|
||||
|
||||
sys_memory.Warning("*** memory_container created(addr=0x%llx): id = %d", addr, id);
|
||||
*cid = Emu.GetIdManager().make<MemoryContainerInfo>(addr, yield_size);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -138,7 +134,7 @@ s32 sys_memory_container_destroy(u32 cid)
|
|||
sys_memory.Warning("sys_memory_container_destroy(cid=0x%x)", cid);
|
||||
|
||||
// Check if this container ID is valid.
|
||||
const auto ct = Emu.GetIdManager().GetIDData<MemoryContainerInfo>(cid);
|
||||
const auto ct = Emu.GetIdManager().get<MemoryContainerInfo>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
@ -147,7 +143,7 @@ s32 sys_memory_container_destroy(u32 cid)
|
|||
|
||||
// Release the allocated memory and remove the ID.
|
||||
Memory.Free(ct->addr);
|
||||
Emu.GetIdManager().RemoveID<MemoryContainerInfo>(cid);
|
||||
Emu.GetIdManager().remove<MemoryContainerInfo>(cid);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -157,7 +153,7 @@ s32 sys_memory_container_get_size(vm::ptr<sys_memory_info_t> mem_info, u32 cid)
|
|||
sys_memory.Warning("sys_memory_container_get_size(mem_info_addr=0x%x, cid=0x%x)", mem_info.addr(), cid);
|
||||
|
||||
// Check if this container ID is valid.
|
||||
const auto ct = Emu.GetIdManager().GetIDData<MemoryContainerInfo>(cid);
|
||||
const auto ct = Emu.GetIdManager().get<MemoryContainerInfo>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
|
|
@ -77,8 +77,7 @@ s32 sys_mmapper_allocate_memory(u32 size, u64 flags, vm::ptr<u32> mem_id)
|
|||
}
|
||||
|
||||
// Generate a new mem ID.
|
||||
std::shared_ptr<mmapper_info> info(new mmapper_info(size, flags));
|
||||
*mem_id = Emu.GetIdManager().GetNewID(info, TYPE_MEM);
|
||||
*mem_id = Emu.GetIdManager().make<mmapper_info>(size, flags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -89,7 +88,7 @@ s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm:
|
|||
size, cid, flags, mem_id.addr());
|
||||
|
||||
// Check if this container ID is valid.
|
||||
const auto ct = Emu.GetIdManager().GetIDData<MemoryContainerInfo>(cid);
|
||||
const auto ct = Emu.GetIdManager().get<MemoryContainerInfo>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
@ -116,8 +115,7 @@ s32 sys_mmapper_allocate_memory_from_container(u32 size, u32 cid, u64 flags, vm:
|
|||
ct->size = size;
|
||||
|
||||
// Generate a new mem ID.
|
||||
std::shared_ptr<mmapper_info> info(new mmapper_info(ct->size, flags));
|
||||
*mem_id = Emu.GetIdManager().GetNewID(info, TYPE_MEM);
|
||||
*mem_id = Emu.GetIdManager().make<mmapper_info>(ct->size, flags);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -145,7 +143,7 @@ s32 sys_mmapper_free_memory(u32 mem_id)
|
|||
sys_mmapper.Warning("sys_mmapper_free_memory(mem_id=0x%x)", mem_id);
|
||||
|
||||
// Check if this mem ID is valid.
|
||||
const auto info = Emu.GetIdManager().GetIDData<mmapper_info>(mem_id);
|
||||
const auto info = Emu.GetIdManager().get<mmapper_info>(mem_id);
|
||||
|
||||
if (!info)
|
||||
{
|
||||
|
@ -153,7 +151,7 @@ s32 sys_mmapper_free_memory(u32 mem_id)
|
|||
}
|
||||
|
||||
// Release the allocated memory and remove the ID.
|
||||
Emu.GetIdManager().RemoveID<mmapper_info>(mem_id);
|
||||
Emu.GetIdManager().remove<mmapper_info>(mem_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -163,7 +161,7 @@ s32 sys_mmapper_map_memory(u32 start_addr, u32 mem_id, u64 flags)
|
|||
sys_mmapper.Warning("sys_mmapper_map_memory(start_addr=0x%x, mem_id=0x%x, flags=0x%llx)", start_addr, mem_id, flags);
|
||||
|
||||
// Check if this mem ID is valid.
|
||||
const auto info = Emu.GetIdManager().GetIDData<mmapper_info>(mem_id);
|
||||
const auto info = Emu.GetIdManager().get<mmapper_info>(mem_id);
|
||||
|
||||
if (!info)
|
||||
{
|
||||
|
@ -186,7 +184,7 @@ s32 sys_mmapper_search_and_map(u32 start_addr, u32 mem_id, u64 flags, u32 alloc_
|
|||
start_addr, mem_id, flags, alloc_addr);
|
||||
|
||||
// Check if this mem ID is valid.
|
||||
const auto info = Emu.GetIdManager().GetIDData<mmapper_info>(mem_id);
|
||||
const auto info = Emu.GetIdManager().get<mmapper_info>(mem_id);
|
||||
|
||||
if (!info)
|
||||
{
|
||||
|
|
|
@ -19,6 +19,8 @@ struct mmapper_info
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(mmapper_info, 0x08); // SYS_MEM_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_mmapper_allocate_address(u32 size, u64 flags, u32 alignment, u32 alloc_addr);
|
||||
s32 sys_mmapper_allocate_fixed_address();
|
||||
|
|
|
@ -41,9 +41,7 @@ s32 sys_mutex_create(vm::ptr<u32> mutex_id, vm::ptr<sys_mutex_attribute_t> attr)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<mutex_t> mutex(new mutex_t(recursive, protocol, attr->name_u64));
|
||||
|
||||
*mutex_id = Emu.GetIdManager().GetNewID(mutex, TYPE_MUTEX);
|
||||
*mutex_id = Emu.GetIdManager().make<lv2_mutex_t>(recursive, protocol, attr->name_u64);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -54,7 +52,7 @@ s32 sys_mutex_destroy(u32 mutex_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<mutex_t>(mutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(mutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -77,7 +75,7 @@ s32 sys_mutex_destroy(u32 mutex_id)
|
|||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<mutex_t>(mutex_id);
|
||||
Emu.GetIdManager().remove<lv2_mutex_t>(mutex_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -90,7 +88,7 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<mutex_t>(mutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(mutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -148,7 +146,7 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<mutex_t>(mutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(mutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
@ -190,7 +188,7 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<mutex_t>(mutex_id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(mutex_id);
|
||||
|
||||
if (!mutex)
|
||||
{
|
||||
|
|
|
@ -17,7 +17,7 @@ struct sys_mutex_attribute_t
|
|||
};
|
||||
};
|
||||
|
||||
struct mutex_t
|
||||
struct lv2_mutex_t
|
||||
{
|
||||
const bool recursive;
|
||||
const u32 protocol;
|
||||
|
@ -31,7 +31,7 @@ struct mutex_t
|
|||
std::condition_variable cv;
|
||||
std::atomic<u32> waiters;
|
||||
|
||||
mutex_t(bool recursive, u32 protocol, u64 name)
|
||||
lv2_mutex_t(bool recursive, u32 protocol, u64 name)
|
||||
: recursive(recursive)
|
||||
, protocol(protocol)
|
||||
, name(name)
|
||||
|
@ -42,6 +42,8 @@ struct mutex_t
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_mutex_t, 0x85); // SYS_MUTEX_OBJECT
|
||||
|
||||
class PPUThread;
|
||||
|
||||
// SysCalls
|
||||
|
|
|
@ -206,81 +206,78 @@ void sys_game_process_exitspawn2(vm::ptr<const char> path, u32 argv_addr, u32 en
|
|||
|
||||
s32 sys_process_get_number_of_object(u32 object, vm::ptr<u32> nump)
|
||||
{
|
||||
sys_process.Todo("sys_process_get_number_of_object(object=%d, nump_addr=0x%x)",
|
||||
object, nump.addr());
|
||||
sys_process.Error("sys_process_get_number_of_object(object=0x%x, nump=*0x%x)", object, nump);
|
||||
|
||||
switch(object)
|
||||
{
|
||||
case SYS_MEM_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_MEM); break;
|
||||
case SYS_MUTEX_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_MUTEX); break;
|
||||
case SYS_COND_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_COND); break;
|
||||
case SYS_RWLOCK_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_RWLOCK); break;
|
||||
case SYS_INTR_TAG_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_INTR_TAG); break;
|
||||
case SYS_INTR_SERVICE_HANDLE_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_INTR_SERVICE_HANDLE); break;
|
||||
case SYS_EVENT_QUEUE_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_EVENT_QUEUE); break;
|
||||
case SYS_EVENT_PORT_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_EVENT_PORT); break;
|
||||
case SYS_TRACE_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_TRACE); break;
|
||||
case SYS_SPUIMAGE_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_SPUIMAGE); break;
|
||||
case SYS_PRX_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_PRX); break;
|
||||
case SYS_SPUPORT_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_SPUPORT); break;
|
||||
case SYS_LWMUTEX_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_LWMUTEX); break;
|
||||
case SYS_TIMER_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_TIMER); break;
|
||||
case SYS_SEMAPHORE_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_SEMAPHORE); break;
|
||||
case SYS_LWCOND_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_LWCOND); break;
|
||||
case SYS_EVENT_FLAG_OBJECT: *nump = Emu.GetIdManager().GetTypeCount(TYPE_EVENT_FLAG); break;
|
||||
case SYS_MEM_OBJECT:
|
||||
case SYS_MUTEX_OBJECT:
|
||||
case SYS_COND_OBJECT:
|
||||
case SYS_RWLOCK_OBJECT:
|
||||
case SYS_INTR_TAG_OBJECT:
|
||||
case SYS_INTR_SERVICE_HANDLE_OBJECT:
|
||||
case SYS_EVENT_QUEUE_OBJECT:
|
||||
case SYS_EVENT_PORT_OBJECT:
|
||||
case SYS_TRACE_OBJECT:
|
||||
case SYS_SPUIMAGE_OBJECT:
|
||||
case SYS_PRX_OBJECT:
|
||||
case SYS_SPUPORT_OBJECT:
|
||||
case SYS_LWMUTEX_OBJECT:
|
||||
case SYS_TIMER_OBJECT:
|
||||
case SYS_SEMAPHORE_OBJECT:
|
||||
case SYS_FS_FD_OBJECT:
|
||||
*nump = Emu.GetIdManager().GetTypeCount(TYPE_FS_FILE) + Emu.GetIdManager().GetTypeCount(TYPE_FS_DIR);
|
||||
break;
|
||||
|
||||
default:
|
||||
return CELL_EINVAL;
|
||||
case SYS_LWCOND_OBJECT:
|
||||
case SYS_EVENT_FLAG_OBJECT:
|
||||
{
|
||||
*nump = Emu.GetIdManager().get_count_by_type(object);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
s32 sys_process_get_id(u32 object, vm::ptr<u32> buffer, u32 size, vm::ptr<u32> set_size)
|
||||
{
|
||||
sys_process.Todo("sys_process_get_id(object=%d, buffer_addr=0x%x, size=%d, set_size_addr=0x%x)",
|
||||
object, buffer.addr(), size, set_size.addr());
|
||||
sys_process.Error("sys_process_get_id(object=0x%x, buffer=*0x%x, size=%d, set_size=*0x%x)", object, buffer, size, set_size);
|
||||
|
||||
switch(object)
|
||||
switch (object)
|
||||
{
|
||||
case SYS_MEM_OBJECT:
|
||||
case SYS_MUTEX_OBJECT:
|
||||
case SYS_COND_OBJECT:
|
||||
case SYS_RWLOCK_OBJECT:
|
||||
case SYS_INTR_TAG_OBJECT:
|
||||
case SYS_INTR_SERVICE_HANDLE_OBJECT:
|
||||
case SYS_EVENT_QUEUE_OBJECT:
|
||||
case SYS_EVENT_PORT_OBJECT:
|
||||
case SYS_TRACE_OBJECT:
|
||||
case SYS_SPUIMAGE_OBJECT:
|
||||
case SYS_PRX_OBJECT:
|
||||
case SYS_SPUPORT_OBJECT:
|
||||
case SYS_LWMUTEX_OBJECT:
|
||||
case SYS_TIMER_OBJECT:
|
||||
case SYS_SEMAPHORE_OBJECT:
|
||||
case SYS_FS_FD_OBJECT:
|
||||
case SYS_LWCOND_OBJECT:
|
||||
case SYS_EVENT_FLAG_OBJECT:
|
||||
{
|
||||
const auto objects = Emu.GetIdManager().get_IDs_by_type(object);
|
||||
|
||||
#define ADD_OBJECTS(type) { \
|
||||
u32 i=0; \
|
||||
const auto objects = Emu.GetIdManager().GetTypeIDs(type); \
|
||||
for(auto id=objects.begin(); i<size && id!=objects.end(); id++, i++) \
|
||||
buffer[i] = *id; \
|
||||
*set_size = i; \
|
||||
u32 i = 0;
|
||||
|
||||
for (auto id = objects.begin(); i < size && id != objects.end(); id++, i++)
|
||||
{
|
||||
buffer[i] = *id;
|
||||
}
|
||||
|
||||
*set_size = i;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
case SYS_MEM_OBJECT: ADD_OBJECTS(TYPE_MEM); break;
|
||||
case SYS_MUTEX_OBJECT: ADD_OBJECTS(TYPE_MUTEX); break;
|
||||
case SYS_COND_OBJECT: ADD_OBJECTS(TYPE_COND); break;
|
||||
case SYS_RWLOCK_OBJECT: ADD_OBJECTS(TYPE_RWLOCK); break;
|
||||
case SYS_INTR_TAG_OBJECT: ADD_OBJECTS(TYPE_INTR_TAG); break;
|
||||
case SYS_INTR_SERVICE_HANDLE_OBJECT: ADD_OBJECTS(TYPE_INTR_SERVICE_HANDLE); break;
|
||||
case SYS_EVENT_QUEUE_OBJECT: ADD_OBJECTS(TYPE_EVENT_QUEUE); break;
|
||||
case SYS_EVENT_PORT_OBJECT: ADD_OBJECTS(TYPE_EVENT_PORT); break;
|
||||
case SYS_TRACE_OBJECT: ADD_OBJECTS(TYPE_TRACE); break;
|
||||
case SYS_SPUIMAGE_OBJECT: ADD_OBJECTS(TYPE_SPUIMAGE); break;
|
||||
case SYS_PRX_OBJECT: ADD_OBJECTS(TYPE_PRX); break;
|
||||
case SYS_SPUPORT_OBJECT: ADD_OBJECTS(TYPE_SPUPORT); break;
|
||||
case SYS_LWMUTEX_OBJECT: ADD_OBJECTS(TYPE_LWMUTEX); break;
|
||||
case SYS_TIMER_OBJECT: ADD_OBJECTS(TYPE_TIMER); break;
|
||||
case SYS_SEMAPHORE_OBJECT: ADD_OBJECTS(TYPE_SEMAPHORE); break;
|
||||
case SYS_FS_FD_OBJECT: ADD_OBJECTS(TYPE_FS_FILE);/*TODO:DIR*/ break;
|
||||
case SYS_LWCOND_OBJECT: ADD_OBJECTS(TYPE_LWCOND); break;
|
||||
case SYS_EVENT_FLAG_OBJECT: ADD_OBJECTS(TYPE_EVENT_FLAG); break;
|
||||
|
||||
#undef ADD_OBJECTS
|
||||
|
||||
default:
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
s32 process_is_spu_lock_line_reservation_address(u32 addr, u64 flags)
|
||||
|
|
|
@ -1,26 +1,26 @@
|
|||
#pragma once
|
||||
|
||||
//Process Local Object
|
||||
enum
|
||||
// Process Local Object Type
|
||||
enum : u32
|
||||
{
|
||||
SYS_MEM_OBJECT = (0x08UL),
|
||||
SYS_MUTEX_OBJECT = (0x85UL),
|
||||
SYS_COND_OBJECT = (0x86UL),
|
||||
SYS_RWLOCK_OBJECT = (0x88UL),
|
||||
SYS_INTR_TAG_OBJECT = (0x0AUL),
|
||||
SYS_INTR_SERVICE_HANDLE_OBJECT = (0x0BUL),
|
||||
SYS_EVENT_QUEUE_OBJECT = (0x8DUL),
|
||||
SYS_EVENT_PORT_OBJECT = (0x0EUL),
|
||||
SYS_TRACE_OBJECT = (0x21UL),
|
||||
SYS_SPUIMAGE_OBJECT = (0x22UL),
|
||||
SYS_PRX_OBJECT = (0x23UL),
|
||||
SYS_SPUPORT_OBJECT = (0x24UL),
|
||||
SYS_LWMUTEX_OBJECT = (0x95UL),
|
||||
SYS_TIMER_OBJECT = (0x11UL),
|
||||
SYS_SEMAPHORE_OBJECT = (0x96UL),
|
||||
SYS_FS_FD_OBJECT = (0x73UL),
|
||||
SYS_LWCOND_OBJECT = (0x97UL),
|
||||
SYS_EVENT_FLAG_OBJECT = (0x98UL),
|
||||
SYS_MEM_OBJECT = 0x08,
|
||||
SYS_MUTEX_OBJECT = 0x85,
|
||||
SYS_COND_OBJECT = 0x86,
|
||||
SYS_RWLOCK_OBJECT = 0x88,
|
||||
SYS_INTR_TAG_OBJECT = 0x0A,
|
||||
SYS_INTR_SERVICE_HANDLE_OBJECT = 0x0B,
|
||||
SYS_EVENT_QUEUE_OBJECT = 0x8D,
|
||||
SYS_EVENT_PORT_OBJECT = 0x0E,
|
||||
SYS_TRACE_OBJECT = 0x21,
|
||||
SYS_SPUIMAGE_OBJECT = 0x22,
|
||||
SYS_PRX_OBJECT = 0x23,
|
||||
SYS_SPUPORT_OBJECT = 0x24,
|
||||
SYS_LWMUTEX_OBJECT = 0x95,
|
||||
SYS_TIMER_OBJECT = 0x11,
|
||||
SYS_SEMAPHORE_OBJECT = 0x96,
|
||||
SYS_FS_FD_OBJECT = 0x73,
|
||||
SYS_LWCOND_OBJECT = 0x97,
|
||||
SYS_EVENT_FLAG_OBJECT = 0x98,
|
||||
};
|
||||
|
||||
// Auxiliary functions
|
||||
|
|
|
@ -3,45 +3,160 @@
|
|||
#include "Emu/System.h"
|
||||
#include "Emu/IdManager.h"
|
||||
#include "Emu/SysCalls/SysCalls.h"
|
||||
#include "Emu/SysCalls/CB_FUNC.h"
|
||||
#include "Emu/SysCalls/Modules.h"
|
||||
#include "Emu/SysCalls/ModuleManager.h"
|
||||
#include "Emu/Cell/PPUInstrTable.h"
|
||||
|
||||
#include "Emu/FS/VFS.h"
|
||||
#include "Emu/FS/vfsFile.h"
|
||||
#include "Crypto/unself.h"
|
||||
#include "Loader/ELF64.h"
|
||||
#include "sys_prx.h"
|
||||
|
||||
SysCallBase sys_prx("sys_prx");
|
||||
|
||||
extern void fill_ppu_exec_map(u32 addr, u32 size);
|
||||
|
||||
s32 prx_load_module(std::string path, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt)
|
||||
{
|
||||
sys_prx.Warning("prx_load_module(path='%s', flags=0x%llx, pOpt=*0x%x)", path.c_str(), flags, pOpt);
|
||||
|
||||
loader::handlers::elf64 loader;
|
||||
|
||||
vfsFile f(path);
|
||||
if (!f.IsOpened())
|
||||
return CELL_PRX_ERROR_UNKNOWN_MODULE;
|
||||
|
||||
if (loader.init(f) != loader::handler::error_code::ok || !loader.is_sprx())
|
||||
return CELL_PRX_ERROR_ILLEGAL_LIBRARY;
|
||||
|
||||
loader::handlers::elf64::sprx_info info;
|
||||
loader.load_sprx(info);
|
||||
|
||||
auto prx = std::make_shared<lv2_prx_t>();
|
||||
|
||||
auto meta = info.modules[""];
|
||||
prx->start.set(meta.exports[0xBC9A0086]);
|
||||
prx->stop.set(meta.exports[0xAB779874]);
|
||||
prx->exit.set(meta.exports[0x3AB9A95E]);
|
||||
|
||||
for (auto &module_ : info.modules)
|
||||
{
|
||||
if (module_.first == "")
|
||||
continue;
|
||||
|
||||
Module* module = Emu.GetModuleManager().GetModuleByName(module_.first.c_str());
|
||||
|
||||
if (!module)
|
||||
{
|
||||
sys_prx.Error("Unknown module '%s'", module_.first.c_str());
|
||||
}
|
||||
|
||||
for (auto& f : module_.second.exports)
|
||||
{
|
||||
const u32 nid = f.first;
|
||||
const u32 addr = f.second;
|
||||
|
||||
u32 index;
|
||||
|
||||
auto func = get_ppu_func_by_nid(nid, &index);
|
||||
|
||||
if (!func)
|
||||
{
|
||||
index = add_ppu_func(ModuleFunc(nid, 0, module, nullptr, nullptr, vm::ptr<void()>::make(addr)));
|
||||
}
|
||||
else
|
||||
{
|
||||
func->lle_func.set(addr);
|
||||
|
||||
if (func->flags & MFF_FORCED_HLE)
|
||||
{
|
||||
u32 i_addr = 0;
|
||||
|
||||
if (!vm::check_addr(addr, 8) || !vm::check_addr(i_addr = vm::read32(addr), 4))
|
||||
{
|
||||
sys_prx.Error("Failed to inject code for exported function '%s' (opd=0x%x, 0x%x)", SysCalls::GetFuncName(nid), addr, i_addr);
|
||||
}
|
||||
else
|
||||
{
|
||||
vm::write32(i_addr, PPU_instr::HACK(index | EIF_PERFORM_BLR));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto &import : module_.second.imports)
|
||||
{
|
||||
u32 nid = import.first;
|
||||
u32 addr = import.second;
|
||||
|
||||
u32 index;
|
||||
|
||||
auto func = get_ppu_func_by_nid(nid, &index);
|
||||
|
||||
if (!func)
|
||||
{
|
||||
sys_prx.Error("Unimplemented function '%s' in '%s' module (0x%x)", SysCalls::GetFuncName(nid), module_.first);
|
||||
|
||||
index = add_ppu_func(ModuleFunc(nid, 0, module, nullptr, nullptr));
|
||||
}
|
||||
else
|
||||
{
|
||||
const bool is_lle = func->lle_func && !(func->flags & MFF_FORCED_HLE);
|
||||
|
||||
sys_prx.Error("Imported %sfunction '%s' in '%s' module (0x%x)", (is_lle ? "LLE " : ""), SysCalls::GetFuncName(nid), module_.first, addr);
|
||||
}
|
||||
|
||||
if (!patch_ppu_import(addr, index))
|
||||
{
|
||||
sys_prx.Error("Failed to inject code at address 0x%x", addr);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for (auto& seg : info.segments)
|
||||
{
|
||||
const u32 addr = seg.begin.addr();
|
||||
const u32 size = align(seg.size, 4096);
|
||||
|
||||
if (vm::check_addr(addr, size))
|
||||
{
|
||||
fill_ppu_exec_map(addr, size);
|
||||
}
|
||||
else
|
||||
{
|
||||
sys_prx.Error("Failed to process executable area (addr=0x%x, size=0x%x)", addr, size);
|
||||
}
|
||||
}
|
||||
|
||||
return Emu.GetIdManager().add(std::move(prx));
|
||||
}
|
||||
|
||||
s32 sys_prx_load_module(vm::ptr<const char> path, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt)
|
||||
{
|
||||
sys_prx.Todo("sys_prx_load_module(path=\"%s\", flags=0x%llx, pOpt=0x%x)", path.get_ptr(), flags, pOpt.addr());
|
||||
sys_prx.Warning("sys_prx_load_module(path=*0x%x, flags=0x%llx, pOpt=*0x%x)", path, flags, pOpt);
|
||||
|
||||
std::string _path = path.get_ptr();
|
||||
// Check if the file is SPRX
|
||||
std::string local_path;
|
||||
Emu.GetVFS().GetDevice(_path, local_path);
|
||||
if (IsSelf(local_path)) {
|
||||
if (!DecryptSelf(local_path+".prx", local_path)) {
|
||||
return CELL_PRX_ERROR_ILLEGAL_LIBRARY;
|
||||
}
|
||||
_path += ".prx";
|
||||
return prx_load_module(path.get_ptr(), flags, pOpt);
|
||||
}
|
||||
|
||||
s32 sys_prx_load_module_list(s32 count, vm::ptr<const char, 2> path_list, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list)
|
||||
{
|
||||
sys_prx.Warning("sys_prx_load_module_list(count=%d, path_list=*0x%x, flags=0x%llx, pOpt=*0x%x, id_list=*0x%x)", count, path_list, flags, pOpt, id_list);
|
||||
|
||||
for (s32 i = 0; i < count; ++i)
|
||||
{
|
||||
auto path = path_list[i];
|
||||
std::string name = path.get_ptr();
|
||||
s32 result = prx_load_module(name, flags, pOpt);
|
||||
|
||||
if (result < 0)
|
||||
return result;
|
||||
|
||||
id_list[i] = result;
|
||||
}
|
||||
|
||||
vfsFile f(_path);
|
||||
if (!f.IsOpened()) {
|
||||
return CELL_PRX_ERROR_UNKNOWN_MODULE;
|
||||
}
|
||||
|
||||
// Create the PRX object and return its id
|
||||
std::shared_ptr<sys_prx_t> prx(new sys_prx_t());
|
||||
prx->size = (u32)f.GetSize();
|
||||
prx->address = (u32)Memory.Alloc(prx->size, 4);
|
||||
prx->path = (const char*)path;
|
||||
|
||||
// Load the PRX into memory
|
||||
f.Read(vm::get_ptr(prx->address), prx->size);
|
||||
|
||||
u32 id = Emu.GetIdManager().GetNewID(prx, TYPE_PRX);
|
||||
return id;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_prx_load_module_on_memcontainer()
|
||||
|
@ -62,56 +177,62 @@ s32 sys_prx_load_module_on_memcontainer_by_fd()
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_prx_start_module(s32 id, u32 args, u32 argp_addr, vm::ptr<u32> modres, u64 flags, vm::ptr<sys_prx_start_module_option_t> pOpt)
|
||||
s32 sys_prx_start_module(s32 id, u64 flags, vm::ptr<sys_prx_start_module_option_t> pOpt)
|
||||
{
|
||||
sys_prx.Todo("sys_prx_start_module(id=0x%x, args=%d, argp_addr=0x%x, modres_addr=0x%x, flags=0x%llx, pOpt=0x%x)",
|
||||
id, args, argp_addr, modres.addr(), flags, pOpt.addr());
|
||||
sys_prx.Warning("sys_prx_start_module(id=0x%x, flags=0x%llx, pOpt=*0x%x)", id, flags, pOpt);
|
||||
|
||||
const auto prx = Emu.GetIdManager().GetIDData<sys_prx_t>(id);
|
||||
const auto prx = Emu.GetIdManager().get<lv2_prx_t>(id);
|
||||
|
||||
if (!prx)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (prx->isStarted)
|
||||
return CELL_PRX_ERROR_ALREADY_STARTED;
|
||||
//if (prx->is_started)
|
||||
// return CELL_PRX_ERROR_ALREADY_STARTED;
|
||||
|
||||
//prx->is_started = true;
|
||||
pOpt->entry_point.set(be_t<u64>::make(prx->start ? prx->start.addr() : ~0ull));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_prx_stop_module(s32 id, u32 args, u32 argp_addr, vm::ptr<u32> modres, u64 flags, vm::ptr<sys_prx_stop_module_option_t> pOpt)
|
||||
s32 sys_prx_stop_module(s32 id, u64 flags, vm::ptr<sys_prx_stop_module_option_t> pOpt)
|
||||
{
|
||||
sys_prx.Todo("sys_prx_stop_module(id=0x%x, args=%d, argp_addr=0x%x, modres_addr=0x%x, flags=0x%llx, pOpt=0x%x)",
|
||||
id, args, argp_addr, modres.addr(), flags, pOpt.addr());
|
||||
sys_prx.Warning("sys_prx_stop_module(id=0x%x, flags=0x%llx, pOpt=*0x%x)", id, flags, pOpt);
|
||||
|
||||
const auto prx = Emu.GetIdManager().GetIDData<sys_prx_t>(id);
|
||||
const auto prx = Emu.GetIdManager().get<lv2_prx_t>(id);
|
||||
|
||||
if (!prx)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!prx->isStarted)
|
||||
return CELL_PRX_ERROR_ALREADY_STOPPED;
|
||||
//if (!prx->is_started)
|
||||
// return CELL_PRX_ERROR_ALREADY_STOPPED;
|
||||
|
||||
//prx->is_started = false;
|
||||
pOpt->entry_point.set(be_t<u64>::make(prx->stop ? prx->stop.addr() : -1));
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_prx_unload_module(s32 id, u64 flags, vm::ptr<sys_prx_unload_module_option_t> pOpt)
|
||||
{
|
||||
sys_prx.Todo("sys_prx_unload_module(id=0x%x, flags=0x%llx, pOpt=0x%x)", id, flags, pOpt.addr());
|
||||
sys_prx.Warning("sys_prx_unload_module(id=0x%x, flags=0x%llx, pOpt=*0x%x)", id, flags, pOpt);
|
||||
|
||||
// Get the PRX, free the used memory and delete the object and its ID
|
||||
const auto prx = Emu.GetIdManager().GetIDData<sys_prx_t>(id);
|
||||
const auto prx = Emu.GetIdManager().get<lv2_prx_t>(id);
|
||||
|
||||
if (!prx)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
Memory.Free(prx->address);
|
||||
Emu.GetIdManager().RemoveID<sys_prx_t>(id);
|
||||
//Memory.Free(prx->address);
|
||||
|
||||
//s32 result = prx->exit ? prx->exit() : CELL_OK;
|
||||
Emu.GetIdManager().remove<lv2_prx_t>(id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -146,15 +267,15 @@ s32 sys_prx_get_module_info()
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_prx_register_library(u32 lib_addr)
|
||||
s32 sys_prx_register_library(vm::ptr<void> library)
|
||||
{
|
||||
sys_prx.Todo("sys_prx_register_library(lib_addr=0x%x)", lib_addr);
|
||||
sys_prx.Todo("sys_prx_register_library(library=*0x%x)", library);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_prx_unregister_library()
|
||||
s32 sys_prx_unregister_library(vm::ptr<void> library)
|
||||
{
|
||||
sys_prx.Todo("sys_prx_unregister_library()");
|
||||
sys_prx.Todo("sys_prx_unregister_library(library=*0x%x)", library);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -101,11 +101,15 @@ struct sys_prx_load_module_option_t
|
|||
struct sys_prx_start_module_option_t
|
||||
{
|
||||
be_t<u64> size;
|
||||
be_t<u64> put;
|
||||
vm::bptr<s32(int argc, vm::ptr<void> argv), 1, u64> entry_point;
|
||||
};
|
||||
|
||||
struct sys_prx_stop_module_option_t
|
||||
{
|
||||
be_t<u64> size;
|
||||
be_t<u64> put;
|
||||
vm::bptr<s32(int argc, vm::ptr<void> argv), 1, u64> entry_point;
|
||||
};
|
||||
|
||||
struct sys_prx_unload_module_option_t
|
||||
|
@ -114,34 +118,37 @@ struct sys_prx_unload_module_option_t
|
|||
};
|
||||
|
||||
// Auxiliary data types
|
||||
struct sys_prx_t
|
||||
struct lv2_prx_t
|
||||
{
|
||||
u32 size;
|
||||
u32 address;
|
||||
std::string path;
|
||||
bool isStarted;
|
||||
bool is_started = false;
|
||||
|
||||
sys_prx_t()
|
||||
: isStarted(false)
|
||||
vm::ptr<s32(int argc, vm::ptr<void> argv)> start = vm::null;
|
||||
vm::ptr<s32(int argc, vm::ptr<void> argv)> stop = vm::null;
|
||||
vm::ptr<s32()> exit = vm::null;
|
||||
|
||||
lv2_prx_t()
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_prx_t, 0x23); // SYS_PRX_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_prx_load_module(vm::ptr<const char> path, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt);
|
||||
s32 sys_prx_load_module_list(s32 count, vm::ptr<const char, 2> path_list, u64 flags, vm::ptr<sys_prx_load_module_option_t> pOpt, vm::ptr<u32> id_list);
|
||||
s32 sys_prx_load_module_on_memcontainer();
|
||||
s32 sys_prx_load_module_by_fd();
|
||||
s32 sys_prx_load_module_on_memcontainer_by_fd();
|
||||
s32 sys_prx_start_module(s32 id, u32 args, u32 argp_addr, vm::ptr<u32> modres, u64 flags, vm::ptr<sys_prx_start_module_option_t> pOpt);
|
||||
s32 sys_prx_stop_module(s32 id, u32 args, u32 argp_addr, vm::ptr<u32> modres, u64 flags, vm::ptr<sys_prx_stop_module_option_t> pOpt);
|
||||
s32 sys_prx_start_module(s32 id, u64 flags, vm::ptr<sys_prx_start_module_option_t> pOpt);
|
||||
s32 sys_prx_stop_module(s32 id, u64 flags, vm::ptr<sys_prx_stop_module_option_t> pOpt);
|
||||
s32 sys_prx_unload_module(s32 id, u64 flags, vm::ptr<sys_prx_unload_module_option_t> pOpt);
|
||||
s32 sys_prx_get_module_list();
|
||||
s32 sys_prx_get_my_module_id();
|
||||
s32 sys_prx_get_module_id_by_address();
|
||||
s32 sys_prx_get_module_id_by_name();
|
||||
s32 sys_prx_get_module_info();
|
||||
s32 sys_prx_register_library(u32 lib_addr);
|
||||
s32 sys_prx_unregister_library();
|
||||
s32 sys_prx_register_library(vm::ptr<void> library);
|
||||
s32 sys_prx_unregister_library(vm::ptr<void> library);
|
||||
s32 sys_prx_get_ppu_guid();
|
||||
s32 sys_prx_register_module();
|
||||
s32 sys_prx_query_module();
|
||||
|
|
|
@ -36,9 +36,7 @@ s32 sys_rwlock_create(vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> a
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
std::shared_ptr<rwlock_t> rwlock(new rwlock_t(attr->protocol, attr->name_u64));
|
||||
|
||||
*rw_lock_id = Emu.GetIdManager().GetNewID(rwlock, TYPE_RWLOCK);
|
||||
*rw_lock_id = Emu.GetIdManager().make<lv2_rwlock_t>(attr->protocol, attr->name_u64);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -49,7 +47,7 @@ s32 sys_rwlock_destroy(u32 rw_lock_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
@ -61,7 +59,7 @@ s32 sys_rwlock_destroy(u32 rw_lock_id)
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<rwlock_t>(rw_lock_id);
|
||||
Emu.GetIdManager().remove<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -74,7 +72,7 @@ s32 sys_rwlock_rlock(u32 rw_lock_id, u64 timeout)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
@ -113,7 +111,7 @@ s32 sys_rwlock_tryrlock(u32 rw_lock_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
@ -136,7 +134,7 @@ s32 sys_rwlock_runlock(u32 rw_lock_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
@ -164,7 +162,7 @@ s32 sys_rwlock_wlock(PPUThread& CPU, u32 rw_lock_id, u64 timeout)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
@ -208,7 +206,7 @@ s32 sys_rwlock_trywlock(PPUThread& CPU, u32 rw_lock_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
@ -236,7 +234,7 @@ s32 sys_rwlock_wunlock(PPUThread& CPU, u32 rw_lock_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto rwlock = Emu.GetIdManager().GetIDData<rwlock_t>(rw_lock_id);
|
||||
const auto rwlock = Emu.GetIdManager().get<lv2_rwlock_t>(rw_lock_id);
|
||||
|
||||
if (!rwlock)
|
||||
{
|
||||
|
|
|
@ -15,7 +15,7 @@ struct sys_rwlock_attribute_t
|
|||
};
|
||||
};
|
||||
|
||||
struct rwlock_t
|
||||
struct lv2_rwlock_t
|
||||
{
|
||||
const u32 protocol;
|
||||
const u64 name;
|
||||
|
@ -29,7 +29,7 @@ struct rwlock_t
|
|||
std::atomic<u32> rwaiters;
|
||||
std::atomic<u32> wwaiters;
|
||||
|
||||
rwlock_t(u32 protocol, u64 name)
|
||||
lv2_rwlock_t(u32 protocol, u64 name)
|
||||
: protocol(protocol)
|
||||
, name(name)
|
||||
, readers(0)
|
||||
|
@ -40,6 +40,8 @@ struct rwlock_t
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_rwlock_t, 0x88); // SYS_RWLOCK_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_rwlock_create(vm::ptr<u32> rw_lock_id, vm::ptr<sys_rwlock_attribute_t> attr);
|
||||
s32 sys_rwlock_destroy(u32 rw_lock_id);
|
||||
|
|
|
@ -12,13 +12,6 @@
|
|||
|
||||
SysCallBase sys_semaphore("sys_semaphore");
|
||||
|
||||
u32 semaphore_create(s32 initial_val, s32 max_val, u32 protocol, u64 name_u64)
|
||||
{
|
||||
std::shared_ptr<semaphore_t> sem(new semaphore_t(protocol, max_val, name_u64, initial_val));
|
||||
|
||||
return Emu.GetIdManager().GetNewID(sem, TYPE_SEMAPHORE);
|
||||
}
|
||||
|
||||
s32 sys_semaphore_create(vm::ptr<u32> sem, vm::ptr<sys_semaphore_attribute_t> attr, s32 initial_val, s32 max_val)
|
||||
{
|
||||
sys_semaphore.Warning("sys_semaphore_create(sem=*0x%x, attr=*0x%x, initial_val=%d, max_val=%d)", sem, attr, initial_val, max_val);
|
||||
|
@ -50,7 +43,7 @@ s32 sys_semaphore_create(vm::ptr<u32> sem, vm::ptr<sys_semaphore_attribute_t> at
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
*sem = semaphore_create(initial_val, max_val, protocol, attr->name_u64);
|
||||
*sem = Emu.GetIdManager().make<lv2_sema_t>(initial_val, max_val, protocol, attr->name_u64);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -61,7 +54,7 @@ s32 sys_semaphore_destroy(u32 sem)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto semaphore = Emu.GetIdManager().GetIDData<semaphore_t>(sem);
|
||||
const auto semaphore = Emu.GetIdManager().get<lv2_sema_t>(sem);
|
||||
|
||||
if (!semaphore)
|
||||
{
|
||||
|
@ -73,7 +66,7 @@ s32 sys_semaphore_destroy(u32 sem)
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<semaphore_t>(sem);
|
||||
Emu.GetIdManager().remove<lv2_sema_t>(sem);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -86,7 +79,7 @@ s32 sys_semaphore_wait(u32 sem, u64 timeout)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto semaphore = Emu.GetIdManager().GetIDData<semaphore_t>(sem);
|
||||
const auto semaphore = Emu.GetIdManager().get<lv2_sema_t>(sem);
|
||||
|
||||
if (!semaphore)
|
||||
{
|
||||
|
@ -125,7 +118,7 @@ s32 sys_semaphore_trywait(u32 sem)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto semaphore = Emu.GetIdManager().GetIDData<semaphore_t>(sem);
|
||||
const auto semaphore = Emu.GetIdManager().get<lv2_sema_t>(sem);
|
||||
|
||||
if (!semaphore)
|
||||
{
|
||||
|
@ -148,7 +141,7 @@ s32 sys_semaphore_post(u32 sem, s32 count)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto semaphore = Emu.GetIdManager().GetIDData<semaphore_t>(sem);
|
||||
const auto semaphore = Emu.GetIdManager().get<lv2_sema_t>(sem);
|
||||
|
||||
if (!semaphore)
|
||||
{
|
||||
|
@ -189,7 +182,7 @@ s32 sys_semaphore_get_value(u32 sem, vm::ptr<s32> count)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto semaphore = Emu.GetIdManager().GetIDData<semaphore_t>(sem);
|
||||
const auto semaphore = Emu.GetIdManager().get<lv2_sema_t>(sem);
|
||||
|
||||
if (!semaphore)
|
||||
{
|
||||
|
|
|
@ -15,7 +15,7 @@ struct sys_semaphore_attribute_t
|
|||
};
|
||||
};
|
||||
|
||||
struct semaphore_t
|
||||
struct lv2_sema_t
|
||||
{
|
||||
const u32 protocol;
|
||||
const s32 max;
|
||||
|
@ -27,7 +27,7 @@ struct semaphore_t
|
|||
std::condition_variable cv;
|
||||
std::atomic<u32> waiters;
|
||||
|
||||
semaphore_t(u32 protocol, s32 max, u64 name, s32 value)
|
||||
lv2_sema_t(u32 protocol, s32 max, u64 name, s32 value)
|
||||
: protocol(protocol)
|
||||
, max(max)
|
||||
, name(name)
|
||||
|
@ -37,8 +37,7 @@ struct semaphore_t
|
|||
}
|
||||
};
|
||||
|
||||
// Aux
|
||||
u32 semaphore_create(s32 initial_val, s32 max_val, u32 protocol, u64 name_u64);
|
||||
REG_ID_TYPE(lv2_sema_t, 0x96); // SYS_SEMAPHORE_OBJECT
|
||||
|
||||
// SysCalls
|
||||
s32 sys_semaphore_create(vm::ptr<u32> sem, vm::ptr<sys_semaphore_attribute_t> attr, s32 initial_val, s32 max_val);
|
||||
|
|
|
@ -108,7 +108,7 @@ u32 spu_thread_initialize(u32 group_id, u32 spu_num, vm::ptr<sys_spu_image> img,
|
|||
spu.SetName(name);
|
||||
spu.m_custom_task = task;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(group_id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(group_id);
|
||||
|
||||
spu.tg = group;
|
||||
group->threads[spu_num] = t;
|
||||
|
@ -140,7 +140,7 @@ s32 sys_spu_thread_initialize(vm::ptr<u32> thread, u32 group_id, u32 spu_num, vm
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(group_id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(group_id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -221,9 +221,7 @@ u32 spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type
|
|||
sys_spu.Todo("Unsupported SPU Thread Group type (0x%x)", type);
|
||||
}
|
||||
|
||||
std::shared_ptr<spu_group_t> group(new spu_group_t(name, num, prio, type, container));
|
||||
|
||||
return Emu.GetIdManager().GetNewID(group);
|
||||
return Emu.GetIdManager().make<spu_group_t>(name, num, prio, type, container);
|
||||
}
|
||||
|
||||
s32 sys_spu_thread_group_create(vm::ptr<u32> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr)
|
||||
|
@ -247,7 +245,7 @@ s32 sys_spu_thread_group_destroy(u32 id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -274,7 +272,7 @@ s32 sys_spu_thread_group_destroy(u32 id)
|
|||
}
|
||||
|
||||
group->state = SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED; // hack
|
||||
Emu.GetIdManager().RemoveID<spu_group_t>(id);
|
||||
Emu.GetIdManager().remove<spu_group_t>(id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -285,7 +283,7 @@ s32 sys_spu_thread_group_start(u32 id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -348,7 +346,7 @@ s32 sys_spu_thread_group_suspend(u32 id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -403,7 +401,7 @@ s32 sys_spu_thread_group_resume(u32 id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -450,7 +448,7 @@ s32 sys_spu_thread_group_yield(u32 id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -475,7 +473,7 @@ s32 sys_spu_thread_group_terminate(u32 id, s32 value)
|
|||
|
||||
// seems the id can be either SPU Thread Group or SPU Thread
|
||||
auto thread = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group && !thread)
|
||||
{
|
||||
|
@ -540,7 +538,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<u32> cause, vm::ptr<u32> status)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -778,8 +776,8 @@ s32 sys_spu_thread_group_connect_event(u32 id, u32 eq, u32 et)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(eq);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(eq);
|
||||
|
||||
if (!group || !queue)
|
||||
{
|
||||
|
@ -834,7 +832,7 @@ s32 sys_spu_thread_group_disconnect_event(u32 id, u32 et)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
@ -898,7 +896,7 @@ s32 sys_spu_thread_connect_event(u32 id, u32 eq, u32 et, u8 spup)
|
|||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(eq);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(eq);
|
||||
|
||||
if (!t || !queue)
|
||||
{
|
||||
|
@ -965,7 +963,7 @@ s32 sys_spu_thread_bind_queue(u32 id, u32 spuq, u32 spuq_num)
|
|||
LV2_LOCK;
|
||||
|
||||
const auto t = Emu.GetCPU().GetThread(id, CPU_THREAD_SPU);
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(spuq);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(spuq);
|
||||
|
||||
if (!t || !queue)
|
||||
{
|
||||
|
@ -1038,8 +1036,8 @@ s32 sys_spu_thread_group_connect_event_all_threads(u32 id, u32 eq, u64 req, vm::
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(eq);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(eq);
|
||||
|
||||
if (!group || !queue)
|
||||
{
|
||||
|
@ -1113,7 +1111,7 @@ s32 sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto group = Emu.GetIdManager().GetIDData<spu_group_t>(id);
|
||||
const auto group = Emu.GetIdManager().get<spu_group_t>(id);
|
||||
|
||||
if (!group)
|
||||
{
|
||||
|
|
|
@ -157,9 +157,9 @@ struct spu_group_t
|
|||
std::atomic<u32> join_state; // flags used to detect exit cause
|
||||
std::condition_variable join_cv; // used to signal waiting PPU thread
|
||||
|
||||
std::weak_ptr<event_queue_t> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
|
||||
std::weak_ptr<event_queue_t> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
|
||||
std::weak_ptr<event_queue_t> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
|
||||
std::weak_ptr<lv2_event_queue_t> ep_run; // port for SYS_SPU_THREAD_GROUP_EVENT_RUN events
|
||||
std::weak_ptr<lv2_event_queue_t> ep_exception; // TODO: SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION
|
||||
std::weak_ptr<lv2_event_queue_t> ep_sysmodule; // TODO: SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE
|
||||
|
||||
spu_group_t(std::string name, u32 num, s32 prio, s32 type, u32 ct)
|
||||
: name(name)
|
||||
|
|
|
@ -18,7 +18,7 @@ s32 sys_timer_create(vm::ptr<u32> timer_id)
|
|||
|
||||
std::shared_ptr<lv2_timer_t> timer(new lv2_timer_t);
|
||||
|
||||
thread_t(fmt::format("Timer[0x%x] Thread", (*timer_id = Emu.GetIdManager().GetNewID(timer, TYPE_TIMER))), [timer]()
|
||||
thread_t(fmt::format("Timer[0x%x] Thread", (*timer_id = Emu.GetIdManager().add(timer))), [timer]() // TODO: call from the constructor
|
||||
{
|
||||
LV2_LOCK;
|
||||
|
||||
|
@ -62,7 +62,7 @@ s32 sys_timer_destroy(u32 timer_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto timer = Emu.GetIdManager().GetIDData<lv2_timer_t>(timer_id);
|
||||
const auto timer = Emu.GetIdManager().get<lv2_timer_t>(timer_id);
|
||||
|
||||
if (!timer)
|
||||
{
|
||||
|
@ -74,7 +74,7 @@ s32 sys_timer_destroy(u32 timer_id)
|
|||
return CELL_EISCONN;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID<lv2_timer_t>(timer_id);
|
||||
Emu.GetIdManager().remove<lv2_timer_t>(timer_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -85,7 +85,7 @@ s32 sys_timer_get_information(u32 timer_id, vm::ptr<sys_timer_information_t> inf
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto timer = Emu.GetIdManager().GetIDData<lv2_timer_t>(timer_id);
|
||||
const auto timer = Emu.GetIdManager().get<lv2_timer_t>(timer_id);
|
||||
|
||||
if (!timer)
|
||||
{
|
||||
|
@ -108,7 +108,7 @@ s32 _sys_timer_start(u32 timer_id, u64 base_time, u64 period)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto timer = Emu.GetIdManager().GetIDData<lv2_timer_t>(timer_id);
|
||||
const auto timer = Emu.GetIdManager().get<lv2_timer_t>(timer_id);
|
||||
|
||||
if (!timer)
|
||||
{
|
||||
|
@ -160,7 +160,7 @@ s32 sys_timer_stop(u32 timer_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto timer = Emu.GetIdManager().GetIDData<lv2_timer_t>(timer_id);
|
||||
const auto timer = Emu.GetIdManager().get<lv2_timer_t>(timer_id);
|
||||
|
||||
if (!timer)
|
||||
{
|
||||
|
@ -178,8 +178,8 @@ s32 sys_timer_connect_event_queue(u32 timer_id, u32 queue_id, u64 name, u64 data
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto timer = Emu.GetIdManager().GetIDData<lv2_timer_t>(timer_id);
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(queue_id);
|
||||
const auto timer = Emu.GetIdManager().get<lv2_timer_t>(timer_id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(queue_id);
|
||||
|
||||
if (!timer || !queue)
|
||||
{
|
||||
|
@ -205,7 +205,7 @@ s32 sys_timer_disconnect_event_queue(u32 timer_id)
|
|||
|
||||
LV2_LOCK;
|
||||
|
||||
const auto timer = Emu.GetIdManager().GetIDData<lv2_timer_t>(timer_id);
|
||||
const auto timer = Emu.GetIdManager().get<lv2_timer_t>(timer_id);
|
||||
|
||||
if (!timer)
|
||||
{
|
||||
|
|
|
@ -18,7 +18,7 @@ struct sys_timer_information_t
|
|||
// "timer_t" conflicts with some definition
|
||||
struct lv2_timer_t
|
||||
{
|
||||
std::weak_ptr<event_queue_t> port; // event queue
|
||||
std::weak_ptr<lv2_event_queue_t> port; // event queue
|
||||
u64 source; // event source
|
||||
u64 data1; // event arg 1
|
||||
u64 data2; // event arg 2
|
||||
|
@ -37,6 +37,8 @@ struct lv2_timer_t
|
|||
}
|
||||
};
|
||||
|
||||
REG_ID_TYPE(lv2_timer_t, 0x11); // SYS_TIMER_OBJECT
|
||||
|
||||
s32 sys_timer_create(vm::ptr<u32> timer_id);
|
||||
s32 sys_timer_destroy(u32 timer_id);
|
||||
s32 sys_timer_get_information(u32 timer_id, vm::ptr<sys_timer_information_t> info);
|
||||
|
|
|
@ -39,7 +39,7 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, u32 a
|
|||
else
|
||||
{
|
||||
// Check memory container.
|
||||
const auto ct = Emu.GetIdManager().GetIDData<MemoryContainerInfo>(cid);
|
||||
const auto ct = Emu.GetIdManager().get<MemoryContainerInfo>(cid);
|
||||
|
||||
if (!ct)
|
||||
{
|
||||
|
|
|
@ -49,7 +49,7 @@ Emulator::Emulator()
|
|||
, m_pad_manager(new PadManager())
|
||||
, m_keyboard_manager(new KeyboardManager())
|
||||
, m_mouse_manager(new MouseManager())
|
||||
, m_id_manager(new IdManager())
|
||||
, m_id_manager(new ID_manager())
|
||||
, m_gs_manager(new GSManager())
|
||||
, m_audio_manager(new AudioManager())
|
||||
, m_callback_manager(new CallbackManager())
|
||||
|
@ -326,7 +326,7 @@ void Emulator::Pause()
|
|||
if (!IsRunning()) return;
|
||||
SendDbgCommand(DID_PAUSE_EMU);
|
||||
|
||||
if (InterlockedCompareExchange((volatile u32*)&m_status, Paused, Running) == Running)
|
||||
if (sync_bool_compare_and_swap((volatile u32*)&m_status, Running, Paused))
|
||||
{
|
||||
SendDbgCommand(DID_PAUSED_EMU);
|
||||
|
||||
|
@ -396,7 +396,7 @@ void Emulator::Stop()
|
|||
GetAudioManager().Close();
|
||||
GetEventManager().Clear();
|
||||
GetCPU().Close();
|
||||
GetIdManager().Clear();
|
||||
GetIdManager().clear();
|
||||
GetPadManager().Close();
|
||||
GetKeyboardManager().Close();
|
||||
GetMouseManager().Close();
|
||||
|
|
|
@ -14,7 +14,7 @@ class CPUThreadManager;
|
|||
class PadManager;
|
||||
class KeyboardManager;
|
||||
class MouseManager;
|
||||
class IdManager;
|
||||
class ID_manager;
|
||||
class GSManager;
|
||||
class AudioManager;
|
||||
class CallbackManager;
|
||||
|
@ -84,7 +84,7 @@ class Emulator
|
|||
PadManager* m_pad_manager;
|
||||
KeyboardManager* m_keyboard_manager;
|
||||
MouseManager* m_mouse_manager;
|
||||
IdManager* m_id_manager;
|
||||
ID_manager* m_id_manager;
|
||||
GSManager* m_gs_manager;
|
||||
AudioManager* m_audio_manager;
|
||||
CallbackManager* m_callback_manager;
|
||||
|
@ -141,7 +141,7 @@ public:
|
|||
PadManager& GetPadManager() { return *m_pad_manager; }
|
||||
KeyboardManager& GetKeyboardManager() { return *m_keyboard_manager; }
|
||||
MouseManager& GetMouseManager() { return *m_mouse_manager; }
|
||||
IdManager& GetIdManager() { return *m_id_manager; }
|
||||
ID_manager& GetIdManager() { return *m_id_manager; }
|
||||
GSManager& GetGSManager() { return *m_gs_manager; }
|
||||
AudioManager& GetAudioManager() { return *m_audio_manager; }
|
||||
CallbackManager& GetCallbackManager() { return *m_callback_manager; }
|
||||
|
@ -190,10 +190,10 @@ public:
|
|||
void SavePoints(const std::string& path);
|
||||
void LoadPoints(const std::string& path);
|
||||
|
||||
__forceinline bool IsRunning() const { return m_status == Running; }
|
||||
__forceinline bool IsPaused() const { return m_status == Paused; }
|
||||
__forceinline bool IsStopped() const { return m_status == Stopped; }
|
||||
__forceinline bool IsReady() const { return m_status == Ready; }
|
||||
force_inline bool IsRunning() const { return m_status == Running; }
|
||||
force_inline bool IsPaused() const { return m_status == Paused; }
|
||||
force_inline bool IsStopped() const { return m_status == Stopped; }
|
||||
force_inline bool IsReady() const { return m_status == Ready; }
|
||||
};
|
||||
|
||||
using lv2_lock_type = std::unique_lock<std::mutex>;
|
||||
|
|
|
@ -13,6 +13,7 @@
|
|||
#include "Emu/SysCalls/lv2/sys_cond.h"
|
||||
#include "Emu/SysCalls/lv2/sys_semaphore.h"
|
||||
#include "Emu/SysCalls/lv2/sys_event.h"
|
||||
#include "Emu/SysCalls/lv2/sys_process.h"
|
||||
|
||||
#include "KernelExplorer.h"
|
||||
|
||||
|
@ -50,7 +51,6 @@ KernelExplorer::KernelExplorer(wxWindow* parent)
|
|||
|
||||
void KernelExplorer::Update()
|
||||
{
|
||||
int count;
|
||||
char name[4096];
|
||||
|
||||
m_tree->DeleteAllItems();
|
||||
|
@ -77,112 +77,104 @@ void KernelExplorer::Update()
|
|||
// TODO: FileSystem
|
||||
|
||||
// Semaphores
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_SEMAPHORE);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_SEMAPHORE_OBJECT))
|
||||
{
|
||||
sprintf(name, "Semaphores (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_SEMAPHORE))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_SEMAPHORE_OBJECT))
|
||||
{
|
||||
const auto sem = Emu.GetIdManager().GetIDData<semaphore_t>(id);
|
||||
const auto sem = Emu.GetIdManager().get<lv2_sema_t>(id);
|
||||
sprintf(name, "Semaphore: ID = 0x%x '%s', Count = %d, Max Count = %d, Waiters = %d", id, &name64(sem->name), sem->value.load(), sem->max, sem->waiters.load());
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Mutexes
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_MUTEX);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_MUTEX_OBJECT))
|
||||
{
|
||||
sprintf(name, "Mutexes (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_MUTEX))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_MUTEX_OBJECT))
|
||||
{
|
||||
const auto mutex = Emu.GetIdManager().GetIDData<mutex_t>(id);
|
||||
const auto mutex = Emu.GetIdManager().get<lv2_mutex_t>(id);
|
||||
sprintf(name, "Mutex: ID = 0x%x '%s'", id, &name64(mutex->name));
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Light Weight Mutexes
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_LWMUTEX);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_LWMUTEX_OBJECT))
|
||||
{
|
||||
sprintf(name, "Lightweight Mutexes (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_LWMUTEX))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_LWMUTEX_OBJECT))
|
||||
{
|
||||
const auto lwm = Emu.GetIdManager().GetIDData<lwmutex_t>(id);
|
||||
const auto lwm = Emu.GetIdManager().get<lv2_lwmutex_t>(id);
|
||||
sprintf(name, "Lightweight Mutex: ID = 0x%x '%s'", id, &name64(lwm->name));
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Condition Variables
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_COND);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_COND_OBJECT))
|
||||
{
|
||||
sprintf(name, "Condition Variables (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_COND))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_COND_OBJECT))
|
||||
{
|
||||
const auto cond = Emu.GetIdManager().GetIDData<cond_t>(id);
|
||||
const auto cond = Emu.GetIdManager().get<lv2_cond_t>(id);
|
||||
sprintf(name, "Condition Variable: ID = 0x%x '%s'", id, &name64(cond->name));
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Light Weight Condition Variables
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_LWCOND);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_LWCOND_OBJECT))
|
||||
{
|
||||
sprintf(name, "Lightweight Condition Variables (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_LWCOND))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_LWCOND_OBJECT))
|
||||
{
|
||||
const auto lwc = Emu.GetIdManager().GetIDData<lwcond_t>(id);
|
||||
const auto lwc = Emu.GetIdManager().get<lv2_lwcond_t>(id);
|
||||
sprintf(name, "Lightweight Condition Variable: ID = 0x%x '%s'", id, &name64(lwc->name));
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Event Queues
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_EVENT_QUEUE);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_EVENT_QUEUE_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Queues (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_EVENT_QUEUE))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_EVENT_QUEUE_OBJECT))
|
||||
{
|
||||
const auto queue = Emu.GetIdManager().GetIDData<event_queue_t>(id);
|
||||
const auto queue = Emu.GetIdManager().get<lv2_event_queue_t>(id);
|
||||
sprintf(name, "Event Queue: ID = 0x%x '%s', Key = %#llx", id, &name64(queue->name), queue->key);
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Event Ports
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_EVENT_PORT);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_EVENT_PORT_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Ports (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto id : Emu.GetIdManager().GetTypeIDs(TYPE_EVENT_PORT))
|
||||
for (const auto id : Emu.GetIdManager().get_IDs_by_type(SYS_EVENT_PORT_OBJECT))
|
||||
{
|
||||
const auto port = Emu.GetIdManager().GetIDData<event_port_t>(id);
|
||||
const auto port = Emu.GetIdManager().get<lv2_event_port_t>(id);
|
||||
sprintf(name, "Event Port: ID = 0x%x, Name = %#llx", id, port->name);
|
||||
m_tree->AppendItem(node, name);
|
||||
}
|
||||
}
|
||||
|
||||
// Modules
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_PRX);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_PRX_OBJECT))
|
||||
{
|
||||
sprintf(name, "Modules (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
//sprintf(name, "Segment List (%l)", 2 * objects.size()); // TODO: Assuming 2 segments per PRX file is not good
|
||||
//m_tree->AppendItem(node, name);
|
||||
for (const auto& id : Emu.GetIdManager().GetTypeIDs(TYPE_PRX))
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs_by_type(SYS_PRX_OBJECT))
|
||||
{
|
||||
sprintf(name, "PRX: ID = 0x%x", id);
|
||||
m_tree->AppendItem(node, name);
|
||||
|
@ -190,12 +182,11 @@ void KernelExplorer::Update()
|
|||
}
|
||||
|
||||
// Memory Containers
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_MEM);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_MEM_OBJECT))
|
||||
{
|
||||
sprintf(name, "Memory Containers (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto& id : Emu.GetIdManager().GetTypeIDs(TYPE_MEM))
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs_by_type(SYS_MEM_OBJECT))
|
||||
{
|
||||
sprintf(name, "Memory Container: ID = 0x%x", id);
|
||||
m_tree->AppendItem(node, name);
|
||||
|
@ -203,12 +194,11 @@ void KernelExplorer::Update()
|
|||
}
|
||||
|
||||
// Event Flags
|
||||
count = Emu.GetIdManager().GetTypeCount(TYPE_EVENT_FLAG);
|
||||
if (count)
|
||||
if (u32 count = Emu.GetIdManager().get_count_by_type(SYS_EVENT_FLAG_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Flags (%d)", count);
|
||||
const auto& node = m_tree->AppendItem(root, name);
|
||||
for (const auto& id : Emu.GetIdManager().GetTypeIDs(TYPE_EVENT_FLAG))
|
||||
for (const auto& id : Emu.GetIdManager().get_IDs_by_type(SYS_EVENT_FLAG_OBJECT))
|
||||
{
|
||||
sprintf(name, "Event Flag: ID = 0x%x", id);
|
||||
m_tree->AppendItem(node, name);
|
||||
|
|
|
@ -673,26 +673,6 @@ namespace loader
|
|||
LOG_WARNING(LOADER, "Unknown module '%s'", module_name.c_str());
|
||||
}
|
||||
|
||||
//struct tbl_item
|
||||
//{
|
||||
// be_t<u32> stub;
|
||||
// be_t<u32> rtoc;
|
||||
//};
|
||||
|
||||
//struct stub_data_t
|
||||
//{
|
||||
// be_t<u32> data[3];
|
||||
//}
|
||||
//static const stub_data =
|
||||
//{
|
||||
// be_t<u32>::make(MR(11, 2)),
|
||||
// be_t<u32>::make(SC(0)),
|
||||
// be_t<u32>::make(BLR())
|
||||
//};
|
||||
|
||||
//const auto& tbl = vm::get().alloc<tbl_item>(stub->s_imports);
|
||||
//const auto& dst = vm::get().alloc<stub_data_t>(stub->s_imports);
|
||||
|
||||
for (u32 i = 0; i < stub->s_imports; ++i)
|
||||
{
|
||||
const u32 nid = stub->s_nid[i];
|
||||
|
@ -719,31 +699,6 @@ namespace loader
|
|||
{
|
||||
LOG_ERROR(LOADER, "Failed to inject code at address 0x%x", addr);
|
||||
}
|
||||
|
||||
//if (!func || !func->lle_func)
|
||||
//{
|
||||
// dst[i] = stub_data;
|
||||
|
||||
// tbl[i].stub = (dst + i).addr();
|
||||
// tbl[i].rtoc = stub->s_nid[i];
|
||||
|
||||
// stub->s_text[i] = (tbl + i).addr();
|
||||
|
||||
// if (!func)
|
||||
// {
|
||||
//
|
||||
// }
|
||||
// else //if (Ini.HLELogging.GetValue())
|
||||
// {
|
||||
// LOG_NOTICE(LOADER, "Imported function '%s' in '%s' module (HLE)", SysCalls::GetHLEFuncName(nid).c_str(), module_name.c_str());
|
||||
// }
|
||||
//}
|
||||
//else
|
||||
//{
|
||||
// stub->s_text[i] = func->lle_func.addr();
|
||||
// //Is function auto exported, than we can use it
|
||||
// LOG_NOTICE(LOADER, "Imported function '%s' in '%s' module (LLE: 0x%x)", SysCalls::GetHLEFuncName(nid).c_str(), module_name.c_str(), (u32)stub->s_text[i]);
|
||||
//}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -418,7 +418,6 @@
|
|||
<ClInclude Include="Emu\Memory\Memory.h" />
|
||||
<ClInclude Include="Emu\Memory\MemoryBlock.h" />
|
||||
<ClInclude Include="Emu\Memory\atomic.h" />
|
||||
<ClInclude Include="Emu\Memory\refcnt.h" />
|
||||
<ClInclude Include="Emu\RSX\CgBinaryProgram.h" />
|
||||
<ClInclude Include="Emu\RSX\Common\FragmentProgramDecompiler.h" />
|
||||
<ClInclude Include="Emu\RSX\Common\ProgramStateCache.h" />
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue