mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-20 11:36:13 +00:00
commit
f38fb61c62
72 changed files with 3194 additions and 1817 deletions
|
@ -69,12 +69,12 @@ void AutoPause::Reload(void)
|
|||
//Less than 1024 - be regarded as a system call.
|
||||
//emplace_back may not cause reductant move/copy operation.
|
||||
m_pause_syscall.emplace_back(num);
|
||||
LOG_WARNING(HLE, "Auto Pause: Find System Call ID %x", num);
|
||||
LOG_WARNING(HLE, "Auto Pause: Find System Call ID 0x%x", num);
|
||||
}
|
||||
else
|
||||
{
|
||||
m_pause_function.emplace_back(num);
|
||||
LOG_WARNING(HLE, "Auto Pause: Find Function Call ID %x", num);
|
||||
LOG_WARNING(HLE, "Auto Pause: Find Function Call ID 0x%x", num);
|
||||
}
|
||||
}
|
||||
list.Close();
|
||||
|
@ -103,7 +103,7 @@ void AutoPause::TryPause(u32 code) {
|
|||
if (code == m_pause_syscall[i])
|
||||
{
|
||||
Emu.Pause();
|
||||
LOG_ERROR(HLE, "Auto Pause Triggered: System call %x", code); //Used Error
|
||||
LOG_ERROR(HLE, "Auto Pause Triggered: System call 0x%x", code); // Used Error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -121,7 +121,7 @@ void AutoPause::TryPause(u32 code) {
|
|||
if (code == m_pause_function[i])
|
||||
{
|
||||
Emu.Pause();
|
||||
LOG_ERROR(HLE, "Auto Pause Triggered: Function call %x", code); //Used Error
|
||||
LOG_ERROR(HLE, "Auto Pause Triggered: Function call 0x%x", code); // Used Error
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -104,6 +104,16 @@ union u128
|
|||
return ret;
|
||||
}
|
||||
|
||||
static u128 from32r(u32 _3, u32 _2 = 0, u32 _1 = 0, u32 _0 = 0)
|
||||
{
|
||||
u128 ret;
|
||||
ret._u32[0] = _0;
|
||||
ret._u32[1] = _1;
|
||||
ret._u32[2] = _2;
|
||||
ret._u32[3] = _3;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static u128 fromBit(u32 bit)
|
||||
{
|
||||
u128 ret = {};
|
||||
|
@ -153,7 +163,7 @@ union u128
|
|||
|
||||
std::string to_hex() const
|
||||
{
|
||||
return fmt::Format("%16llx%16llx", _u64[1], _u64[0]);
|
||||
return fmt::Format("%016llx%016llx", _u64[1], _u64[0]);
|
||||
}
|
||||
|
||||
std::string to_xyzw() const
|
||||
|
@ -306,17 +316,12 @@ public:
|
|||
m_data = se_t<T, sizeof(T2)>::func(value);
|
||||
}
|
||||
|
||||
static be_t MakeFromLE(const T value)
|
||||
static be_t make(const T value)
|
||||
{
|
||||
T data = se_t<T, sizeof(T2)>::func(value);
|
||||
return (be_t&)data;
|
||||
}
|
||||
|
||||
static be_t MakeFromBE(const T value)
|
||||
{
|
||||
return (be_t&)value;
|
||||
}
|
||||
|
||||
//template<typename T1>
|
||||
operator const T() const
|
||||
{
|
||||
|
@ -335,7 +340,8 @@ public:
|
|||
template<typename T1>
|
||||
operator const be_t<T1>() const
|
||||
{
|
||||
return _convert<T1, T, ((sizeof(T1) > sizeof(T)) ? 1 : (sizeof(T1) < sizeof(T) ? 2 : 0))>::func(m_data);
|
||||
return be_t<T1>::make(ToLE());
|
||||
//return _convert<T1, T, ((sizeof(T1) > sizeof(T)) ? 1 : (sizeof(T1) < sizeof(T) ? 2 : 0))>::func(m_data);
|
||||
}
|
||||
|
||||
template<typename T1> be_t& operator += (T1 right) { return *this = T(*this) + right; }
|
||||
|
@ -401,17 +407,12 @@ public:
|
|||
return se_t<const T, sizeof(T2)>::func(m_data);
|
||||
}
|
||||
|
||||
static be_t MakeFromLE(const T value)
|
||||
static be_t make(const T value)
|
||||
{
|
||||
const T data = se_t<const T, sizeof(T2)>::func(value);
|
||||
return (be_t&)data;
|
||||
}
|
||||
|
||||
static be_t MakeFromBE(const T value)
|
||||
{
|
||||
return (be_t&)value;
|
||||
}
|
||||
|
||||
//template<typename T1>
|
||||
operator const T() const
|
||||
{
|
||||
|
@ -421,21 +422,7 @@ public:
|
|||
template<typename T1>
|
||||
operator const be_t<T1>() const
|
||||
{
|
||||
if (sizeof(T1) > sizeof(T) || std::is_floating_point<T>::value || std::is_floating_point<T1>::value)
|
||||
{
|
||||
T1 res = se_t<T1, sizeof(T1)>::func(ToLE());
|
||||
return (be_t<T1>&)res;
|
||||
}
|
||||
else if (sizeof(T1) < sizeof(T))
|
||||
{
|
||||
T1 res = ToBE() >> ((sizeof(T) - sizeof(T1)) * 8);
|
||||
return (be_t<T1>&)res;
|
||||
}
|
||||
else
|
||||
{
|
||||
T1 res = ToBE();
|
||||
return (be_t<T1>&)res;
|
||||
}
|
||||
return be_t<T1>::make(ToLE());
|
||||
}
|
||||
|
||||
template<typename T1> be_t operator & (const be_t<T1>& right) const { const T res = ToBE() & right.ToBE(); return (be_t&)res; }
|
||||
|
|
157
Utilities/GNU.h
157
Utilities/GNU.h
|
@ -13,13 +13,21 @@
|
|||
#endif
|
||||
|
||||
template<size_t size>
|
||||
void strcpy_trunc(char (&dst)[size], const std::string& src)
|
||||
void strcpy_trunc(char(&dst)[size], const std::string& src)
|
||||
{
|
||||
const size_t count = (src.size() >= size) ? size - 1 /* truncation */ : src.size();
|
||||
memcpy(dst, src.c_str(), count);
|
||||
dst[count] = 0;
|
||||
}
|
||||
|
||||
template<size_t size, size_t rsize>
|
||||
void strcpy_trunc(char(&dst)[size], const char(&src)[rsize])
|
||||
{
|
||||
const size_t count = (rsize >= size) ? size - 1 /* truncation */ : rsize;
|
||||
memcpy(dst, src, count);
|
||||
dst[count] = 0;
|
||||
}
|
||||
|
||||
#if defined(__GNUG__)
|
||||
#include <cmath>
|
||||
#include <stdlib.h>
|
||||
|
@ -37,19 +45,22 @@ void strcpy_trunc(char (&dst)[size], const std::string& src)
|
|||
#define INFINITE 0xFFFFFFFF
|
||||
#define _CRT_ALIGN(x) __attribute__((aligned(x)))
|
||||
#define InterlockedCompareExchange(ptr,new_val,old_val) __sync_val_compare_and_swap(ptr,old_val,new_val)
|
||||
#define InterlockedCompareExchange64(ptr,new_val,old_val) __sync_val_compare_and_swap(ptr,old_val,new_val)
|
||||
#define InterlockedExchange(ptr, value) __sync_lock_test_and_set(ptr, value)
|
||||
#define InterlockedOr(ptr, value) __sync_fetch_and_or(ptr, value)
|
||||
#define InterlockedAnd(ptr, value) __sync_fetch_and_and(ptr, value)
|
||||
#define InterlockedXor(ptr, value) __sync_fetch_and_xor(ptr, value)
|
||||
|
||||
inline int64_t InterlockedOr64(volatile int64_t *dest, int64_t val)
|
||||
{
|
||||
int64_t olderval;
|
||||
int64_t oldval = *dest;
|
||||
do
|
||||
{
|
||||
olderval = oldval;
|
||||
oldval = InterlockedCompareExchange64(dest, olderval | val, olderval);
|
||||
} while (olderval != oldval);
|
||||
return oldval;
|
||||
}
|
||||
//inline int64_t InterlockedOr64(volatile int64_t *dest, int64_t val)
|
||||
//{
|
||||
// int64_t olderval;
|
||||
// int64_t oldval = *dest;
|
||||
// do
|
||||
// {
|
||||
// olderval = oldval;
|
||||
// oldval = __sync_val_compare_and_swap(dest, olderval | val, olderval);
|
||||
// } while (olderval != oldval);
|
||||
// return oldval;
|
||||
//}
|
||||
|
||||
inline uint64_t __umulh(uint64_t a, uint64_t b)
|
||||
{
|
||||
|
@ -84,6 +95,14 @@ int clock_gettime(int foo, struct timespec *ts);
|
|||
#endif
|
||||
|
||||
#ifndef InterlockedCompareExchange
|
||||
static __forceinline uint8_t InterlockedCompareExchange(volatile uint8_t* dest, uint8_t exch, uint8_t comp)
|
||||
{
|
||||
return _InterlockedCompareExchange8((volatile char*)dest, exch, comp);
|
||||
}
|
||||
static __forceinline uint16_t InterlockedCompareExchange(volatile uint16_t* dest, uint16_t exch, uint16_t comp)
|
||||
{
|
||||
return _InterlockedCompareExchange16((volatile short*)dest, exch, comp);
|
||||
}
|
||||
static __forceinline uint32_t InterlockedCompareExchange(volatile uint32_t* dest, uint32_t exch, uint32_t comp)
|
||||
{
|
||||
return _InterlockedCompareExchange((volatile long*)dest, exch, comp);
|
||||
|
@ -92,4 +111,114 @@ static __forceinline uint64_t InterlockedCompareExchange(volatile uint64_t* dest
|
|||
{
|
||||
return _InterlockedCompareExchange64((volatile long long*)dest, exch, comp);
|
||||
}
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedExchange
|
||||
static __forceinline uint8_t InterlockedExchange(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
return _InterlockedExchange8((volatile char*)dest, value);
|
||||
}
|
||||
static __forceinline uint16_t InterlockedExchange(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedExchange16((volatile short*)dest, value);
|
||||
}
|
||||
static __forceinline uint32_t InterlockedExchange(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedExchange((volatile long*)dest, value);
|
||||
}
|
||||
static __forceinline uint64_t InterlockedExchange(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedExchange64((volatile long long*)dest, value);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedOr
|
||||
static __forceinline uint8_t InterlockedOr(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
return _InterlockedOr8((volatile char*)dest, value);
|
||||
}
|
||||
static __forceinline uint16_t InterlockedOr(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedOr16((volatile short*)dest, value);
|
||||
}
|
||||
static __forceinline uint32_t InterlockedOr(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedOr((volatile long*)dest, value);
|
||||
}
|
||||
static __forceinline uint64_t InterlockedOr(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedOr64((volatile long long*)dest, value);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedAnd
|
||||
static __forceinline uint8_t InterlockedAnd(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
return _InterlockedAnd8((volatile char*)dest, value);
|
||||
}
|
||||
static __forceinline uint16_t InterlockedAnd(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedAnd16((volatile short*)dest, value);
|
||||
}
|
||||
static __forceinline uint32_t InterlockedAnd(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedAnd((volatile long*)dest, value);
|
||||
}
|
||||
static __forceinline uint64_t InterlockedAnd(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedAnd64((volatile long long*)dest, value);
|
||||
}
|
||||
#endif
|
||||
|
||||
#ifndef InterlockedXor
|
||||
static __forceinline uint8_t InterlockedXor(volatile uint8_t* dest, uint8_t value)
|
||||
{
|
||||
return _InterlockedXor8((volatile char*)dest, value);
|
||||
}
|
||||
static __forceinline uint16_t InterlockedXor(volatile uint16_t* dest, uint16_t value)
|
||||
{
|
||||
return _InterlockedXor16((volatile short*)dest, value);
|
||||
}
|
||||
static __forceinline uint32_t InterlockedXor(volatile uint32_t* dest, uint32_t value)
|
||||
{
|
||||
return _InterlockedXor((volatile long*)dest, value);
|
||||
}
|
||||
static __forceinline uint64_t InterlockedXor(volatile uint64_t* dest, uint64_t value)
|
||||
{
|
||||
return _InterlockedXor64((volatile long long*)dest, value);
|
||||
}
|
||||
#endif
|
||||
|
||||
static __forceinline uint32_t cntlz32(uint32_t arg)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __builtin_clzl(arg);
|
||||
#else
|
||||
unsigned long res;
|
||||
if (!_BitScanReverse(&res, arg))
|
||||
{
|
||||
return 32;
|
||||
}
|
||||
else
|
||||
{
|
||||
return res ^ 31;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
||||
static __forceinline uint64_t cntlz64(uint64_t arg)
|
||||
{
|
||||
#if defined(__GNUG__)
|
||||
return __builtin_clzll(arg);
|
||||
#else
|
||||
unsigned long res;
|
||||
if (!_BitScanReverse64(&res, arg))
|
||||
{
|
||||
return 64;
|
||||
}
|
||||
else
|
||||
{
|
||||
return res ^ 63;
|
||||
}
|
||||
#endif
|
||||
}
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#pragma once
|
||||
#include "Emu/Memory/atomic_type.h"
|
||||
|
||||
bool SM_IsAborted();
|
||||
void SM_Sleep();
|
||||
|
@ -24,20 +25,21 @@ template
|
|||
>
|
||||
class SMutexBase
|
||||
{
|
||||
static_assert(sizeof(T) == sizeof(std::atomic<T>), "Invalid SMutexBase type");
|
||||
std::atomic<T> owner;
|
||||
static_assert(sizeof(T) == sizeof(atomic_le_t<T>), "Invalid SMutexBase type");
|
||||
T owner;
|
||||
typedef atomic_le_t<T> AT;
|
||||
|
||||
public:
|
||||
static const T GetFreeValue()
|
||||
{
|
||||
static const u64 value = free_value;
|
||||
return (const T&)value;
|
||||
return (T&)value;
|
||||
}
|
||||
|
||||
static const T GetDeadValue()
|
||||
{
|
||||
static const u64 value = dead_value;
|
||||
return (const T&)value;
|
||||
return (T&)value;
|
||||
}
|
||||
|
||||
void initialize()
|
||||
|
@ -45,11 +47,6 @@ public:
|
|||
owner = GetFreeValue();
|
||||
}
|
||||
|
||||
SMutexBase()
|
||||
{
|
||||
initialize();
|
||||
}
|
||||
|
||||
void finalize()
|
||||
{
|
||||
owner = GetDeadValue();
|
||||
|
@ -66,9 +63,9 @@ public:
|
|||
{
|
||||
return SMR_ABORT;
|
||||
}
|
||||
T old = GetFreeValue();
|
||||
T old = reinterpret_cast<AT&>(owner).compare_and_swap(GetFreeValue(), tid);
|
||||
|
||||
if (!owner.compare_exchange_strong(old, tid))
|
||||
if (old != GetFreeValue())
|
||||
{
|
||||
if (old == tid)
|
||||
{
|
||||
|
@ -90,9 +87,9 @@ public:
|
|||
{
|
||||
return SMR_ABORT;
|
||||
}
|
||||
T old = tid;
|
||||
T old = reinterpret_cast<AT&>(owner).compare_and_swap(tid, to);
|
||||
|
||||
if (!owner.compare_exchange_strong(old, to))
|
||||
if (old != tid)
|
||||
{
|
||||
if (old == GetFreeValue())
|
||||
{
|
||||
|
@ -131,5 +128,4 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
typedef SMutexBase<u32>
|
||||
SMutex;
|
||||
typedef SMutexBase<u32> SMutex;
|
||||
|
|
|
@ -120,8 +120,8 @@ int Decrypt(rFile& pkg_f, rFile& dec_pkg_f, PKGHeader* m_header)
|
|||
{
|
||||
aes_crypt_ecb(&c, AES_ENCRYPT, iv, ctr+j*HASH_LEN);
|
||||
|
||||
be_t<u64> hi = be_t<u64>::MakeFromBE(*(u64*)&iv[0]);
|
||||
be_t<u64> lo = be_t<u64>::MakeFromBE(*(u64*)&iv[8]);
|
||||
be_t<u64> hi = *(be_t<u64>*)&iv[0];
|
||||
be_t<u64> lo = *(be_t<u64>*)&iv[8];
|
||||
lo++;
|
||||
|
||||
if (lo == 0)
|
||||
|
|
|
@ -109,7 +109,7 @@ public:
|
|||
|
||||
virtual std::string GetThreadName() const
|
||||
{
|
||||
std::string temp = (GetFName() + fmt::Format("[0x%08llx]", PC));
|
||||
std::string temp = (GetFName() + fmt::Format("[0x%08x]", PC));
|
||||
return temp;
|
||||
}
|
||||
|
||||
|
|
|
@ -61,5 +61,4 @@ enum
|
|||
|
||||
struct DMAC
|
||||
{
|
||||
u32 ls_offset;
|
||||
};
|
||||
|
|
|
@ -1037,7 +1037,7 @@ private:
|
|||
case 0x1: Write("HyperCall"); break;
|
||||
case 0x2: Write("sc"); break;
|
||||
case 0x22: Write("HyperCall LV1"); break;
|
||||
default: Write(fmt::Format("Unknown sc: %x", sc_code));
|
||||
default: Write(fmt::Format("Unknown sc: 0x%x", sc_code));
|
||||
}
|
||||
}
|
||||
void B(s32 ll, u32 aa, u32 lk)
|
||||
|
|
|
@ -149,7 +149,7 @@ private:
|
|||
((u64)a < (u64)simm16 && (to & 0x2)) ||
|
||||
((u64)a > (u64)simm16 && (to & 0x1)) )
|
||||
{
|
||||
UNK(fmt::Format("Trap! (tdi %x, r%d, %x)", to, ra, simm16));
|
||||
UNK(fmt::Format("Trap! (tdi 0x%x, r%d, 0x%x)", to, ra, simm16));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -163,7 +163,7 @@ private:
|
|||
((u32)a < (u32)simm16 && (to & 0x2)) ||
|
||||
((u32)a > (u32)simm16 && (to & 0x1)) )
|
||||
{
|
||||
UNK(fmt::Format("Trap! (twi %x, r%d, %x)", to, ra, simm16));
|
||||
UNK(fmt::Format("Trap! (twi 0x%x, r%d, 0x%x)", to, ra, simm16));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -2102,7 +2102,7 @@ private:
|
|||
break;
|
||||
case 0x4: CPU.FastStop(); break;
|
||||
case 0x22: UNK("HyperCall LV1"); break;
|
||||
default: UNK(fmt::Format("Unknown sc: %x", sc_code));
|
||||
default: UNK(fmt::Format("Unknown sc: 0x%x", sc_code));
|
||||
}
|
||||
}
|
||||
void B(s32 ll, u32 aa, u32 lk)
|
||||
|
@ -2266,7 +2266,7 @@ private:
|
|||
((u32)a < (u32)b && (to & 0x2)) ||
|
||||
((u32)a > (u32)b && (to & 0x1)) )
|
||||
{
|
||||
UNK(fmt::Format("Trap! (tw %x, r%d, r%d)", to, ra, rb));
|
||||
UNK(fmt::Format("Trap! (tw 0x%x, r%d, r%d)", to, ra, rb));
|
||||
}
|
||||
}
|
||||
void LVSL(u32 vd, u32 ra, u32 rb)
|
||||
|
@ -2464,8 +2464,6 @@ private:
|
|||
}
|
||||
void DCBST(u32 ra, u32 rb)
|
||||
{
|
||||
//UNK("dcbst", false);
|
||||
_mm_mfence();
|
||||
}
|
||||
void LWZUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
|
@ -2521,8 +2519,6 @@ private:
|
|||
}
|
||||
void DCBF(u32 ra, u32 rb)
|
||||
{
|
||||
//UNK("dcbf", false);
|
||||
_mm_mfence();
|
||||
}
|
||||
void LBZX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
|
@ -2739,8 +2735,6 @@ private:
|
|||
}
|
||||
void DCBTST(u32 ra, u32 rb, u32 th)
|
||||
{
|
||||
//UNK("dcbtst", false);
|
||||
_mm_mfence();
|
||||
}
|
||||
void STBUX(u32 rs, u32 ra, u32 rb)
|
||||
{
|
||||
|
@ -2758,8 +2752,6 @@ private:
|
|||
}
|
||||
void DCBT(u32 ra, u32 rb, u32 th)
|
||||
{
|
||||
//UNK("dcbt", false);
|
||||
_mm_mfence();
|
||||
}
|
||||
void LHZX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
|
@ -2796,7 +2788,6 @@ private:
|
|||
}
|
||||
void DST(u32 ra, u32 rb, u32 strm, u32 t)
|
||||
{
|
||||
_mm_mfence();
|
||||
}
|
||||
void LHAX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
|
@ -2825,7 +2816,6 @@ private:
|
|||
}
|
||||
void DSTST(u32 ra, u32 rb, u32 strm, u32 t)
|
||||
{
|
||||
_mm_mfence();
|
||||
}
|
||||
void LHAUX(u32 rd, u32 ra, u32 rb)
|
||||
{
|
||||
|
@ -3166,7 +3156,6 @@ private:
|
|||
}
|
||||
void DSS(u32 strm, u32 a)
|
||||
{
|
||||
_mm_mfence();
|
||||
}
|
||||
void SRAWI(u32 ra, u32 rs, u32 sh, bool rc)
|
||||
{
|
||||
|
@ -3239,7 +3228,6 @@ private:
|
|||
auto const cache_line = vm::get_ptr<u8>(addr & ~127);
|
||||
if (cache_line)
|
||||
memset(cache_line, 0, 128);
|
||||
_mm_mfence();
|
||||
}
|
||||
void LWZ(u32 rd, u32 ra, s32 d)
|
||||
{
|
||||
|
|
|
@ -187,7 +187,7 @@ u64 PPUThread::FastCall2(u32 addr, u32 rtoc)
|
|||
LR = Emu.m_ppu_thr_stop;
|
||||
SetCurrentNamedThread(this);
|
||||
|
||||
Task();
|
||||
CPUThread::Task();
|
||||
|
||||
m_status = old_status;
|
||||
PC = old_PC;
|
||||
|
@ -202,4 +202,16 @@ u64 PPUThread::FastCall2(u32 addr, u32 rtoc)
|
|||
void PPUThread::FastStop()
|
||||
{
|
||||
m_status = Stopped;
|
||||
}
|
||||
}
|
||||
|
||||
void PPUThread::Task()
|
||||
{
|
||||
if (m_custom_task)
|
||||
{
|
||||
m_custom_task(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
CPUThread::Task();
|
||||
}
|
||||
}
|
||||
|
|
|
@ -470,9 +470,6 @@ struct FPRdouble
|
|||
|
||||
class PPUThread : public PPCThread
|
||||
{
|
||||
public:
|
||||
u32 owned_mutexes;
|
||||
|
||||
public:
|
||||
PPCdouble FPR[32]; //Floating Point Register
|
||||
FPSCRhdr FPSCR; //Floating Point Status and Control Register
|
||||
|
@ -556,6 +553,9 @@ public:
|
|||
u64 R_ADDR; // reservation address
|
||||
u64 R_VALUE; // reservation value (BE)
|
||||
|
||||
u32 owned_mutexes;
|
||||
std::function<void(PPUThread& CPU)> m_custom_task;
|
||||
|
||||
public:
|
||||
PPUThread();
|
||||
virtual ~PPUThread();
|
||||
|
@ -785,17 +785,18 @@ public:
|
|||
|
||||
public:
|
||||
virtual void InitRegs();
|
||||
virtual void Task();
|
||||
u64 GetStackArg(s32 i);
|
||||
u64 FastCall2(u32 addr, u32 rtoc);
|
||||
void FastStop();
|
||||
|
||||
virtual void DoReset() override;
|
||||
virtual void DoRun() override;
|
||||
|
||||
protected:
|
||||
virtual void DoReset() override;
|
||||
virtual void DoPause() override;
|
||||
virtual void DoResume() override;
|
||||
virtual void DoStop() override;
|
||||
|
||||
protected:
|
||||
virtual void Step() override
|
||||
{
|
||||
//if(++cycle > 20)
|
||||
|
|
|
@ -190,7 +190,7 @@ bool RawSPUThread::Write32(const u64 addr, const u32 value)
|
|||
|
||||
void RawSPUThread::InitRegs()
|
||||
{
|
||||
dmac.ls_offset = m_offset = (u32)GetStartAddr() + RAW_SPU_LS_OFFSET;
|
||||
ls_offset = m_offset = (u32)GetStartAddr() + RAW_SPU_LS_OFFSET;
|
||||
SPUThread::InitRegs();
|
||||
}
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
#define UNIMPLEMENTED() UNK(__FUNCTION__)
|
||||
|
||||
#define MEM_AND_REG_HASH() \
|
||||
unsigned char mem_h[20]; sha1(vm::get_ptr<u8>(CPU.dmac.ls_offset), 256*1024, mem_h); \
|
||||
unsigned char mem_h[20]; sha1(vm::get_ptr<u8>(CPU.ls_offset), 256*1024, mem_h); \
|
||||
unsigned char reg_h[20]; sha1((const unsigned char*)CPU.GPR, sizeof(CPU.GPR), reg_h); \
|
||||
LOG_NOTICE(Log::SPU, "Mem hash: 0x%llx, reg hash: 0x%llx", *(u64*)mem_h, *(u64*)reg_h);
|
||||
|
||||
|
@ -432,9 +432,7 @@ private:
|
|||
}
|
||||
void LQX(u32 rt, u32 ra, u32 rb)
|
||||
{
|
||||
u32 a = CPU.GPR[ra]._u32[3], b = CPU.GPR[rb]._u32[3];
|
||||
|
||||
u32 lsa = (a + b) & 0x3fff0;
|
||||
u32 lsa = (CPU.GPR[ra]._u32[3] + CPU.GPR[rb]._u32[3]) & 0x3fff0;
|
||||
|
||||
CPU.GPR[rt] = CPU.ReadLS128(lsa);
|
||||
}
|
||||
|
@ -1088,6 +1086,7 @@ private:
|
|||
for (int i = 0; i < 4; i++)
|
||||
{
|
||||
CPU.GPR[rt]._f[i] = (float)CPU.GPR[ra]._u32[i];
|
||||
|
||||
u32 exp = ((CPU.GPR[rt]._u32[i] >> 23) & 0xff) - scale;
|
||||
|
||||
if (exp > 255) //< 0
|
||||
|
|
|
@ -78,6 +78,7 @@ public:
|
|||
SPUInterpreter* inter;
|
||||
JitRuntime runtime;
|
||||
bool first;
|
||||
bool need_check;
|
||||
|
||||
struct SPURecEntry
|
||||
{
|
||||
|
@ -457,7 +458,7 @@ private:
|
|||
c.mov(cpu_dword(PC), CPU.PC);
|
||||
// This instruction must be used following a store instruction that modifies the instruction stream.
|
||||
c.mfence();
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1 + 0x2000000);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
@ -1142,6 +1143,7 @@ private:
|
|||
|
||||
c.mov(*addr, CPU.PC + 4);
|
||||
c.mov(*pos_var, cpu_dword(GPR[ra]._u32[3]));
|
||||
if (ra) c.or_(*pos_var, 0x2000000 << 2); // rude (check if not LR)
|
||||
c.cmp(cpu_dword(GPR[rt]._u32[3]), 0);
|
||||
c.cmovne(*pos_var, *addr);
|
||||
c.shr(*pos_var, 2);
|
||||
|
@ -1160,6 +1162,7 @@ private:
|
|||
|
||||
c.mov(*addr, CPU.PC + 4);
|
||||
c.mov(*pos_var, cpu_dword(GPR[ra]._u32[3]));
|
||||
if (ra) c.or_(*pos_var, 0x2000000 << 2); // rude (check if not LR)
|
||||
c.cmp(cpu_dword(GPR[rt]._u32[3]), 0);
|
||||
c.cmove(*pos_var, *addr);
|
||||
c.shr(*pos_var, 2);
|
||||
|
@ -1178,6 +1181,7 @@ private:
|
|||
|
||||
c.mov(*addr, CPU.PC + 4);
|
||||
c.mov(*pos_var, cpu_dword(GPR[ra]._u32[3]));
|
||||
if (ra) c.or_(*pos_var, 0x2000000 << 2); // rude (check if not LR)
|
||||
c.cmp(cpu_word(GPR[rt]._u16[6]), 0);
|
||||
c.cmovne(*pos_var, *addr);
|
||||
c.shr(*pos_var, 2);
|
||||
|
@ -1196,6 +1200,7 @@ private:
|
|||
|
||||
c.mov(*addr, CPU.PC + 4);
|
||||
c.mov(*pos_var, cpu_dword(GPR[ra]._u32[3]));
|
||||
if (ra) c.or_(*pos_var, 0x2000000 << 2); // rude (check if not LR)
|
||||
c.cmp(cpu_word(GPR[rt]._u16[6]), 0);
|
||||
c.cmove(*pos_var, *addr);
|
||||
c.shr(*pos_var, 2);
|
||||
|
@ -1244,6 +1249,7 @@ private:
|
|||
do_finalize = true;
|
||||
|
||||
c.mov(*pos_var, cpu_dword(GPR[ra]._u32[3]));
|
||||
if (ra) c.or_(*pos_var, 0x2000000 << 2); // rude (check if not LR)
|
||||
c.shr(*pos_var, 2);
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
@ -1267,6 +1273,7 @@ private:
|
|||
c.mov(*pos_var, cpu_dword(GPR[ra]._u32[3]));
|
||||
c.mov(cpu_dword(GPR[rt]._u32[3]), CPU.PC + 4);
|
||||
c.shr(*pos_var, 2);
|
||||
c.or_(*pos_var, 0x2000000);
|
||||
LOG_OPCODE();
|
||||
}
|
||||
void IRET(u32 ra)
|
||||
|
@ -1947,23 +1954,22 @@ private:
|
|||
{
|
||||
c.mov(*addr, cpu_dword(GPR[ra]._s32[3]));
|
||||
c.cmp(*addr, cpu_dword(GPR[rb]._s32[3]));
|
||||
c.mov(*addr, 0);
|
||||
c.setg(addr->r8());
|
||||
c.neg(*addr);
|
||||
c.shl(*addr, 24);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.xor_(*pos_var, *addr);
|
||||
c.or_(*pos_var, *addr);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
void CLZ(u32 rt, u32 ra)
|
||||
{
|
||||
XmmInvalidate(rt);
|
||||
c.mov(*qw0, 32 + 31);
|
||||
for (u32 i = 0; i < 4; i++)
|
||||
{
|
||||
c.bsr(*addr, cpu_dword(GPR[ra]._u32[i]));
|
||||
c.cmovz(*addr, dword_ptr(*g_imm_var, (s32)offsetof(g_imm_table_struct, fsmb_table[0xffff]))); // load 0xffffffff
|
||||
c.neg(*addr);
|
||||
c.add(*addr, 31);
|
||||
c.cmovz(*addr, qw0->r32());
|
||||
c.xor_(*addr, 31);
|
||||
c.mov(cpu_dword(GPR[rt]._u32[i]), *addr);
|
||||
}
|
||||
LOG_OPCODE();
|
||||
|
@ -2308,11 +2314,10 @@ private:
|
|||
{
|
||||
c.mov(*addr, cpu_dword(GPR[ra]._u32[3]));
|
||||
c.cmp(*addr, cpu_dword(GPR[rb]._u32[3]));
|
||||
c.mov(*addr, 0);
|
||||
c.seta(addr->r8());
|
||||
c.neg(*addr);
|
||||
c.shl(*addr, 24);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.xor_(*pos_var, *addr);
|
||||
c.or_(*pos_var, *addr);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
@ -2662,11 +2667,10 @@ private:
|
|||
{
|
||||
c.mov(*addr, cpu_dword(GPR[ra]._s32[3]));
|
||||
c.cmp(*addr, cpu_dword(GPR[rb]._s32[3]));
|
||||
c.mov(*addr, 0);
|
||||
c.sete(addr->r8());
|
||||
c.neg(*addr);
|
||||
c.shl(*addr, 24);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.xor_(*pos_var, *addr);
|
||||
c.or_(*pos_var, *addr);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
@ -3324,11 +3328,10 @@ private:
|
|||
{
|
||||
c.mov(*addr, cpu_dword(GPR[ra]._s32[3]));
|
||||
c.cmp(*addr, i10);
|
||||
c.mov(*addr, 0);
|
||||
c.setg(addr->r8());
|
||||
c.neg(*addr);
|
||||
c.shl(*addr, 24);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.xor_(*pos_var, *addr);
|
||||
c.or_(*pos_var, *addr);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
@ -3390,11 +3393,10 @@ private:
|
|||
{
|
||||
c.mov(*addr, cpu_dword(GPR[ra]._u32[3]));
|
||||
c.cmp(*addr, i10);
|
||||
c.mov(*addr, 0);
|
||||
c.seta(addr->r8());
|
||||
c.neg(*addr);
|
||||
c.shl(*addr, 24);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.xor_(*pos_var, *addr);
|
||||
c.or_(*pos_var, *addr);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
@ -3441,11 +3443,10 @@ private:
|
|||
{
|
||||
c.mov(*addr, cpu_dword(GPR[ra]._u32[3]));
|
||||
c.cmp(*addr, i10);
|
||||
c.mov(*addr, 0);
|
||||
c.sete(addr->r8());
|
||||
c.neg(*addr);
|
||||
c.shl(*addr, 24);
|
||||
c.mov(*pos_var, (CPU.PC >> 2) + 1);
|
||||
c.xor_(*pos_var, *addr);
|
||||
c.or_(*pos_var, *addr);
|
||||
do_finalize = true;
|
||||
LOG_OPCODE();
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ SPURecompilerCore::SPURecompilerCore(SPUThread& cpu)
|
|||
, inter(new SPUInterpreter(cpu))
|
||||
, CPU(cpu)
|
||||
, first(true)
|
||||
, need_check(false)
|
||||
{
|
||||
memset(entry, 0, sizeof(entry));
|
||||
X86CpuInfo inf;
|
||||
|
@ -48,7 +49,7 @@ void SPURecompilerCore::Compile(u16 pos)
|
|||
u64 time0 = 0;
|
||||
|
||||
SPUDisAsm dis_asm(CPUDisAsm_InterpreterMode);
|
||||
dis_asm.offset = vm::get_ptr<u8>(CPU.dmac.ls_offset);
|
||||
dis_asm.offset = vm::get_ptr<u8>(CPU.ls_offset);
|
||||
|
||||
StringLogger stringLogger;
|
||||
stringLogger.setOption(kLoggerOptionBinaryForm, true);
|
||||
|
@ -102,7 +103,7 @@ void SPURecompilerCore::Compile(u16 pos)
|
|||
|
||||
while (true)
|
||||
{
|
||||
const u32 opcode = vm::read32(CPU.dmac.ls_offset + pos * 4);
|
||||
const u32 opcode = vm::read32(CPU.ls_offset + pos * 4);
|
||||
m_enc->do_finalize = false;
|
||||
if (opcode)
|
||||
{
|
||||
|
@ -181,8 +182,8 @@ void SPURecompilerCore::Compile(u16 pos)
|
|||
|
||||
u8 SPURecompilerCore::DecodeMemory(const u32 address)
|
||||
{
|
||||
assert(CPU.dmac.ls_offset == address - CPU.PC);
|
||||
const u32 m_offset = CPU.dmac.ls_offset;
|
||||
assert(CPU.ls_offset == address - CPU.PC);
|
||||
const u32 m_offset = CPU.ls_offset;
|
||||
const u16 pos = (u16)(CPU.PC >> 2);
|
||||
|
||||
//ConLog.Write("DecodeMemory: pos=%d", pos);
|
||||
|
@ -192,20 +193,26 @@ u8 SPURecompilerCore::DecodeMemory(const u32 address)
|
|||
{
|
||||
// check data (hard way)
|
||||
bool is_valid = true;
|
||||
//for (u32 i = pos; i < (u32)(entry[pos].count + pos); i++)
|
||||
//{
|
||||
// if (entry[i].valid != ls[i])
|
||||
// {
|
||||
// is_valid = false;
|
||||
// break;
|
||||
// }
|
||||
//}
|
||||
if (need_check)
|
||||
{
|
||||
for (u32 i = 0; i < 0x10000; i++)
|
||||
{
|
||||
if (entry[i].valid && entry[i].valid != ls[i])
|
||||
{
|
||||
is_valid = false;
|
||||
break;
|
||||
}
|
||||
}
|
||||
need_check = false;
|
||||
}
|
||||
// invalidate if necessary
|
||||
if (!is_valid)
|
||||
{
|
||||
for (u32 i = 0; i < 0x10000; i++)
|
||||
{
|
||||
if (entry[i].pointer &&
|
||||
if (!entry[i].pointer) continue;
|
||||
|
||||
if (!entry[i].valid || entry[i].valid != ls[i] ||
|
||||
i + (u32)entry[i].count > (u32)pos &&
|
||||
i < (u32)pos + (u32)entry[pos].count)
|
||||
{
|
||||
|
@ -214,6 +221,11 @@ u8 SPURecompilerCore::DecodeMemory(const u32 address)
|
|||
//RtlDeleteFunctionTable(&entry[i].info);
|
||||
#endif
|
||||
entry[i].pointer = nullptr;
|
||||
for (u32 j = i; j < i + (u32)entry[i].count; j++)
|
||||
{
|
||||
entry[j].valid = 0;
|
||||
}
|
||||
//need_check = true;
|
||||
}
|
||||
}
|
||||
//LOG_ERROR(Log::SPU, "SPURecompilerCore::DecodeMemory(ls_addr=0x%x): code has changed", pos * sizeof(u32));
|
||||
|
@ -254,11 +266,17 @@ u8 SPURecompilerCore::DecodeMemory(const u32 address)
|
|||
u32 res = pos;
|
||||
res = func(cpu, vm::get_ptr<void>(m_offset), imm_table.data(), &g_imm_table);
|
||||
|
||||
if (res > 0xffff)
|
||||
if (res & 0x1000000)
|
||||
{
|
||||
CPU.SPU.Status.SetValue(SPU_STATUS_STOPPED_BY_HALT);
|
||||
CPU.Stop();
|
||||
res = ~res;
|
||||
res &= ~0x1000000;
|
||||
}
|
||||
|
||||
if (res & 0x2000000)
|
||||
{
|
||||
need_check = true;
|
||||
res &= ~0x2000000;
|
||||
}
|
||||
|
||||
if (did_compile)
|
||||
|
|
|
@ -50,7 +50,15 @@ void SPUThread::Task()
|
|||
const int round = std::fegetround();
|
||||
std::fesetround(FE_TOWARDZERO);
|
||||
|
||||
CPUThread::Task();
|
||||
if (m_custom_task)
|
||||
{
|
||||
m_custom_task(*this);
|
||||
}
|
||||
else
|
||||
{
|
||||
CPUThread::Task();
|
||||
}
|
||||
|
||||
if (std::fegetround() != FE_TOWARDZERO)
|
||||
{
|
||||
LOG_ERROR(Log::SPU, "Rounding mode has changed(%d)", std::fegetround());
|
||||
|
@ -68,11 +76,11 @@ void SPUThread::DoReset()
|
|||
|
||||
void SPUThread::InitRegs()
|
||||
{
|
||||
GPR[1]._u32[3] = 0x40000 - 120;
|
||||
GPR[1]._u32[3] = 0x3FFF0; // initial stack frame pointer
|
||||
|
||||
cfg.Reset();
|
||||
|
||||
dmac.ls_offset = m_offset;
|
||||
ls_offset = m_offset;
|
||||
|
||||
SPU.Status.SetValue(SPU_STATUS_STOPPED);
|
||||
|
||||
|
@ -138,6 +146,31 @@ void SPUThread::DoClose()
|
|||
}
|
||||
}
|
||||
|
||||
void SPUThread::FastCall(u32 ls_addr)
|
||||
{
|
||||
// can't be called from another thread (because it doesn't make sense)
|
||||
WriteLS32(0x0, 2);
|
||||
|
||||
auto old_PC = PC;
|
||||
auto old_LR = GPR[0]._u32[3];
|
||||
auto old_stack = GPR[1]._u32[3]; // only saved and restored (may be wrong)
|
||||
|
||||
m_status = Running;
|
||||
PC = ls_addr;
|
||||
GPR[0]._u32[3] = 0x0;
|
||||
|
||||
CPUThread::Task();
|
||||
|
||||
PC = old_PC;
|
||||
GPR[0]._u32[3] = old_LR;
|
||||
GPR[1]._u32[3] = old_stack;
|
||||
}
|
||||
|
||||
void SPUThread::FastStop()
|
||||
{
|
||||
m_status = Stopped;
|
||||
}
|
||||
|
||||
void SPUThread::WriteSNR(bool number, u32 value)
|
||||
{
|
||||
if (cfg.value & ((u64)1 << (u64)number))
|
||||
|
@ -181,11 +214,11 @@ void SPUThread::ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size)
|
|||
if ((addr <= 0x3ffff) && (addr + size <= 0x40000))
|
||||
{
|
||||
// LS access
|
||||
ea = spu->dmac.ls_offset + addr;
|
||||
ea = spu->ls_offset + addr;
|
||||
}
|
||||
else if ((cmd & MFC_PUT_CMD) && size == 4 && (addr == SYS_SPU_THREAD_SNR1 || addr == SYS_SPU_THREAD_SNR2))
|
||||
{
|
||||
spu->WriteSNR(SYS_SPU_THREAD_SNR2 == addr, vm::read32(dmac.ls_offset + lsa));
|
||||
spu->WriteSNR(SYS_SPU_THREAD_SNR2 == addr, vm::read32(ls_offset + lsa));
|
||||
return;
|
||||
}
|
||||
else
|
||||
|
@ -231,13 +264,13 @@ void SPUThread::ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size)
|
|||
{
|
||||
case MFC_PUT_CMD:
|
||||
{
|
||||
memcpy(vm::get_ptr<void>(ea), vm::get_ptr<void>(dmac.ls_offset + lsa), size);
|
||||
memcpy(vm::get_ptr<void>(ea), vm::get_ptr<void>(ls_offset + lsa), size);
|
||||
return;
|
||||
}
|
||||
|
||||
case MFC_GET_CMD:
|
||||
{
|
||||
memcpy(vm::get_ptr<void>(dmac.ls_offset + lsa), vm::get_ptr<void>(ea), size);
|
||||
memcpy(vm::get_ptr<void>(ls_offset + lsa), vm::get_ptr<void>(ea), size);
|
||||
return;
|
||||
}
|
||||
|
||||
|
@ -269,10 +302,10 @@ void SPUThread::ListCmd(u32 lsa, u64 ea, u16 tag, u16 size, u32 cmd, MFCReg& MFC
|
|||
|
||||
for (u32 i = 0; i < list_size; i++)
|
||||
{
|
||||
auto rec = vm::ptr<list_element>::make(dmac.ls_offset + list_addr + i * 8);
|
||||
auto rec = vm::ptr<list_element>::make(ls_offset + list_addr + i * 8);
|
||||
|
||||
u32 size = rec->ts;
|
||||
if (size < 16 && size != 1 && size != 2 && size != 4 && size != 8)
|
||||
if (!(rec->s.ToBE() & se16(0x8000)) && size < 16 && size != 1 && size != 2 && size != 4 && size != 8)
|
||||
{
|
||||
LOG_ERROR(Log::SPU, "DMA List: invalid transfer size(%d)", size);
|
||||
result = MFC_PPU_DMA_CMD_SEQUENCE_ERROR;
|
||||
|
@ -280,13 +313,16 @@ void SPUThread::ListCmd(u32 lsa, u64 ea, u16 tag, u16 size, u32 cmd, MFCReg& MFC
|
|||
}
|
||||
|
||||
u32 addr = rec->ea;
|
||||
ProcessCmd(cmd, tag, lsa | (addr & 0xf), addr, size);
|
||||
|
||||
if (Ini.HLELogging.GetValue() || rec->s)
|
||||
if (size)
|
||||
ProcessCmd(cmd, tag, lsa | (addr & 0xf), addr, size);
|
||||
|
||||
if (Ini.HLELogging.GetValue() || rec->s.ToBE())
|
||||
LOG_NOTICE(Log::SPU, "*** list element(%d/%d): s = 0x%x, ts = 0x%x, low ea = 0x%x (lsa = 0x%x)",
|
||||
i, list_size, (u16)rec->s, (u16)rec->ts, (u32)rec->ea, lsa | (addr & 0xf));
|
||||
|
||||
lsa += std::max(size, (u32)16);
|
||||
if (size)
|
||||
lsa += std::max(size, (u32)16);
|
||||
|
||||
if (rec->s.ToBE() & se16(0x8000))
|
||||
{
|
||||
|
@ -377,7 +413,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
|||
for (u32 i = 0; i < 16; i++)
|
||||
{
|
||||
R_DATA[i] = vm::get_ptr<u64>(R_ADDR)[i];
|
||||
vm::get_ptr<u64>(dmac.ls_offset + lsa)[i] = R_DATA[i];
|
||||
vm::get_ptr<u64>(ls_offset + lsa)[i] = R_DATA[i];
|
||||
}
|
||||
MFCArgs.AtomicStat.PushUncond(MFC_GETLLAR_SUCCESS);
|
||||
}
|
||||
|
@ -391,7 +427,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
|||
u64 buf[16];
|
||||
for (u32 i = 0; i < 16; i++)
|
||||
{
|
||||
buf[i] = vm::get_ptr<u64>(dmac.ls_offset + lsa)[i];
|
||||
buf[i] = vm::get_ptr<u64>(ls_offset + lsa)[i];
|
||||
if (buf[i] != R_DATA[i])
|
||||
{
|
||||
changed++;
|
||||
|
@ -436,8 +472,8 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
|||
for (s32 i = (s32)PC; i < (s32)PC + 4 * 7; i += 4)
|
||||
{
|
||||
dis_asm.dump_pc = i;
|
||||
dis_asm.offset = vm::get_ptr<u8>(dmac.ls_offset);
|
||||
const u32 opcode = vm::read32(i + dmac.ls_offset);
|
||||
dis_asm.offset = vm::get_ptr<u8>(ls_offset);
|
||||
const u32 opcode = vm::read32(i + ls_offset);
|
||||
(*SPU_instr::rrr_list)(&dis_asm, opcode);
|
||||
if (i >= 0 && i < 0x40000)
|
||||
{
|
||||
|
@ -454,7 +490,7 @@ void SPUThread::EnqMfcCmd(MFCReg& MFCArgs)
|
|||
}
|
||||
else // store unconditional
|
||||
{
|
||||
if (R_ADDR)
|
||||
if (R_ADDR) // may be wrong
|
||||
{
|
||||
m_events |= SPU_EVENT_LR;
|
||||
}
|
||||
|
@ -498,18 +534,20 @@ bool SPUThread::CheckEvents()
|
|||
|
||||
u32 SPUThread::GetChannelCount(u32 ch)
|
||||
{
|
||||
u32 res = 0xdeafbeef;
|
||||
|
||||
switch (ch)
|
||||
{
|
||||
case SPU_WrOutMbox: return SPU.Out_MBox.GetFreeCount();
|
||||
case SPU_WrOutIntrMbox: return SPU.Out_IntrMBox.GetFreeCount();
|
||||
case SPU_RdInMbox: return SPU.In_MBox.GetCount();
|
||||
case MFC_RdTagStat: return MFC1.TagStatus.GetCount();
|
||||
case MFC_RdListStallStat: return StallStat.GetCount();
|
||||
case MFC_WrTagUpdate: return MFC1.TagStatus.GetCount(); // hack
|
||||
case SPU_RdSigNotify1: return SPU.SNR[0].GetCount();
|
||||
case SPU_RdSigNotify2: return SPU.SNR[1].GetCount();
|
||||
case MFC_RdAtomicStat: return MFC1.AtomicStat.GetCount();
|
||||
case SPU_RdEventStat: return CheckEvents() ? 1 : 0;
|
||||
case SPU_WrOutMbox: res = SPU.Out_MBox.GetFreeCount(); break;
|
||||
case SPU_WrOutIntrMbox: res = SPU.Out_IntrMBox.GetFreeCount(); break;
|
||||
case SPU_RdInMbox: res = SPU.In_MBox.GetCount(); break;
|
||||
case MFC_RdTagStat: res = MFC1.TagStatus.GetCount(); break;
|
||||
case MFC_RdListStallStat: res = StallStat.GetCount(); break;
|
||||
case MFC_WrTagUpdate: res = MFC1.TagStatus.GetCount(); break;// hack
|
||||
case SPU_RdSigNotify1: res = SPU.SNR[0].GetCount(); break;
|
||||
case SPU_RdSigNotify2: res = SPU.SNR[1].GetCount(); break;
|
||||
case MFC_RdAtomicStat: res = MFC1.AtomicStat.GetCount(); break;
|
||||
case SPU_RdEventStat: res = CheckEvents() ? 1 : 0; break;
|
||||
|
||||
default:
|
||||
{
|
||||
|
@ -518,12 +556,17 @@ u32 SPUThread::GetChannelCount(u32 ch)
|
|||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
//LOG_NOTICE(Log::SPU, "%s(%s) -> 0x%x", __FUNCTION__, spu_ch_name[ch], res);
|
||||
return res;
|
||||
}
|
||||
|
||||
void SPUThread::WriteChannel(u32 ch, const u128& r)
|
||||
{
|
||||
const u32 v = r._u32[3];
|
||||
|
||||
//LOG_NOTICE(Log::SPU, "%s(%s): v=0x%x", __FUNCTION__, spu_ch_name[ch], v);
|
||||
|
||||
switch (ch)
|
||||
{
|
||||
case SPU_WrOutIntrMbox:
|
||||
|
@ -880,13 +923,27 @@ void SPUThread::ReadChannel(u128& r, u32 ch)
|
|||
|
||||
case SPU_RdSigNotify1:
|
||||
{
|
||||
while (!SPU.SNR[0].Pop(v) && !Emu.IsStopped()) std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
if (cfg.value & 1)
|
||||
{
|
||||
while (!SPU.SNR[0].Pop_XCHG(v) && !Emu.IsStopped()) std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
while (!SPU.SNR[0].Pop(v) && !Emu.IsStopped()) std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
case SPU_RdSigNotify2:
|
||||
{
|
||||
while (!SPU.SNR[1].Pop(v) && !Emu.IsStopped()) std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
if (cfg.value & 2)
|
||||
{
|
||||
while (!SPU.SNR[1].Pop_XCHG(v) && !Emu.IsStopped()) std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
else
|
||||
{
|
||||
while (!SPU.SNR[1].Pop(v) && !Emu.IsStopped()) std::this_thread::sleep_for(std::chrono::milliseconds(1));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
|
@ -936,6 +993,8 @@ void SPUThread::ReadChannel(u128& r, u32 ch)
|
|||
}
|
||||
|
||||
if (Emu.IsStopped()) LOG_WARNING(Log::SPU, "%s(%s) aborted", __FUNCTION__, spu_ch_name[ch]);
|
||||
|
||||
//LOG_NOTICE(Log::SPU, "%s(%s) -> 0x%x", __FUNCTION__, spu_ch_name[ch], v);
|
||||
}
|
||||
|
||||
void SPUThread::StopAndSignal(u32 code)
|
||||
|
@ -945,6 +1004,24 @@ void SPUThread::StopAndSignal(u32 code)
|
|||
|
||||
switch (code)
|
||||
{
|
||||
case 0x001:
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x002:
|
||||
{
|
||||
FastStop();
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x003:
|
||||
{
|
||||
GPR[3]._u32[3] = m_code3_func(*this);
|
||||
break;
|
||||
}
|
||||
|
||||
case 0x110:
|
||||
{
|
||||
/* ===== sys_spu_thread_receive_event ===== */
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
#pragma once
|
||||
#include "Emu/Memory/atomic_type.h"
|
||||
#include "PPCThread.h"
|
||||
#include "Emu/Event.h"
|
||||
#include "MFC.h"
|
||||
|
@ -246,181 +247,127 @@ public:
|
|||
}
|
||||
} m_intrtag[3];
|
||||
|
||||
template<size_t _max_count>
|
||||
// limited lock-free queue, most functions are barrier-free
|
||||
template<size_t max_count>
|
||||
class Channel
|
||||
{
|
||||
public:
|
||||
static const size_t max_count = _max_count;
|
||||
static_assert(max_count >= 1, "Invalid channel count");
|
||||
|
||||
private:
|
||||
union _CRT_ALIGN(8) {
|
||||
struct {
|
||||
volatile u32 m_index;
|
||||
u32 m_value[max_count];
|
||||
};
|
||||
volatile u64 m_indval;
|
||||
struct ChannelData
|
||||
{
|
||||
u32 value;
|
||||
u32 is_set;
|
||||
};
|
||||
std::mutex m_lock;
|
||||
|
||||
atomic_t<ChannelData> m_data[max_count];
|
||||
size_t m_push;
|
||||
size_t m_pop;
|
||||
|
||||
public:
|
||||
Channel()
|
||||
__noinline Channel()
|
||||
{
|
||||
Init();
|
||||
}
|
||||
|
||||
void Init()
|
||||
{
|
||||
m_indval = 0;
|
||||
}
|
||||
|
||||
__forceinline bool Pop(u32& res)
|
||||
{
|
||||
if (max_count > 1)
|
||||
for (size_t i = 0; i < max_count; i++)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
if(!m_index)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
res = m_value[0];
|
||||
if (max_count > 1) for (u32 i = 1; i < max_count; i++) // FIFO
|
||||
{
|
||||
m_value[i-1] = m_value[i];
|
||||
}
|
||||
m_value[max_count-1] = 0;
|
||||
m_index--;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{ //lock-free
|
||||
if ((m_indval & 0xffffffff) == 0)
|
||||
return false;
|
||||
else
|
||||
{
|
||||
res = (m_indval >> 32);
|
||||
m_indval = 0;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__forceinline bool Push(u32 value)
|
||||
{
|
||||
if (max_count > 1)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
if(m_index >= max_count)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
m_value[m_index++] = value;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{ //lock-free
|
||||
if (m_indval & 0xffffffff)
|
||||
return false;
|
||||
else
|
||||
{
|
||||
const u64 new_value = ((u64)value << 32) | 1;
|
||||
m_indval = new_value;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
__forceinline void PushUncond(u32 value)
|
||||
{
|
||||
if (max_count > 1)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
if(m_index >= max_count)
|
||||
m_value[max_count-1] = value; //last message is overwritten
|
||||
else
|
||||
m_value[m_index++] = value;
|
||||
}
|
||||
else
|
||||
{ //lock-free
|
||||
const u64 new_value = ((u64)value << 32) | 1;
|
||||
m_indval = new_value;
|
||||
}
|
||||
}
|
||||
|
||||
__forceinline void PushUncond_OR(u32 value)
|
||||
{
|
||||
if (max_count > 1)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
if(m_index >= max_count)
|
||||
m_value[max_count-1] |= value; //last message is logically ORed
|
||||
else
|
||||
m_value[m_index++] = value;
|
||||
}
|
||||
else
|
||||
{
|
||||
InterlockedOr64((volatile s64*)m_indval, ((u64)value << 32) | 1);
|
||||
m_data[i].write_relaxed({});
|
||||
}
|
||||
m_push = 0;
|
||||
m_pop = 0;
|
||||
}
|
||||
|
||||
__forceinline void PopUncond(u32& res)
|
||||
{
|
||||
if (max_count > 1)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
if(!m_index)
|
||||
res = 0; //result is undefined
|
||||
else
|
||||
{
|
||||
res = m_value[--m_index];
|
||||
m_value[m_index] = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
{ //lock-free
|
||||
if(!m_index)
|
||||
res = 0;
|
||||
else
|
||||
{
|
||||
res = (m_indval >> 32);
|
||||
m_indval = 0;
|
||||
}
|
||||
}
|
||||
res = m_data[m_pop].read_relaxed().value;
|
||||
m_data[m_pop].write_relaxed({});
|
||||
m_pop = (m_pop + 1) % max_count;
|
||||
}
|
||||
|
||||
__forceinline u32 GetCount()
|
||||
__forceinline bool Pop(u32& res)
|
||||
{
|
||||
if (max_count > 1)
|
||||
const auto data = m_data[m_pop].read_relaxed();
|
||||
if (data.is_set)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
return m_index;
|
||||
res = data.value;
|
||||
m_data[m_pop].write_relaxed({});
|
||||
m_pop = (m_pop + 1) % max_count;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
return m_index;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
__forceinline u32 GetFreeCount()
|
||||
__forceinline bool Pop_XCHG(u32& res) // not barrier-free, not tested
|
||||
{
|
||||
if (max_count > 1)
|
||||
const auto data = m_data[m_pop].exchange({});
|
||||
if (data.is_set)
|
||||
{
|
||||
std::lock_guard<std::mutex> lock(m_lock);
|
||||
return max_count - m_index;
|
||||
res = data.value;
|
||||
m_pop = (m_pop + 1) % max_count;
|
||||
return true;
|
||||
}
|
||||
else
|
||||
{
|
||||
return max_count - m_index;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
void SetValue(u32 value)
|
||||
__forceinline void PushUncond_OR(const u32 value) // not barrier-free, not tested
|
||||
{
|
||||
m_value[0] = value;
|
||||
m_data[m_push]._or({ value, 1 });
|
||||
m_push = (m_push + 1) % max_count;
|
||||
}
|
||||
|
||||
u32 GetValue() const
|
||||
__forceinline void PushUncond(const u32 value)
|
||||
{
|
||||
return m_value[0];
|
||||
m_data[m_push].write_relaxed({ value, 1 });
|
||||
m_push = (m_push + 1) % max_count;
|
||||
}
|
||||
|
||||
__forceinline bool Push(const u32 value)
|
||||
{
|
||||
if (m_data[m_push].read_relaxed().is_set)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
PushUncond(value);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
__forceinline u32 GetCount() const
|
||||
{
|
||||
u32 res = 0;
|
||||
for (size_t i = 0; i < max_count; i++)
|
||||
{
|
||||
res += m_data[i].read_relaxed().is_set ? 1 : 0;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
__forceinline u32 GetFreeCount() const
|
||||
{
|
||||
u32 res = 0;
|
||||
for (size_t i = 0; i < max_count; i++)
|
||||
{
|
||||
res += m_data[i].read_relaxed().is_set ? 0 : 1;
|
||||
}
|
||||
return res;
|
||||
}
|
||||
|
||||
__forceinline void SetValue(const u32 value)
|
||||
{
|
||||
m_data[m_push].direct_op([value](ChannelData& v)
|
||||
{
|
||||
v.value = value;
|
||||
});
|
||||
}
|
||||
|
||||
__forceinline u32 GetValue() const
|
||||
{
|
||||
return m_data[m_pop].read_relaxed().value;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -473,7 +420,7 @@ public:
|
|||
struct { u32 EAH, EAL; };
|
||||
};
|
||||
|
||||
DMAC dmac;
|
||||
u32 ls_offset;
|
||||
|
||||
void ProcessCmd(u32 cmd, u32 tag, u32 lsa, u64 ea, u32 size);
|
||||
|
||||
|
@ -503,6 +450,9 @@ public:
|
|||
void WriteLS64 (const u32 lsa, const u64& data) const { vm::write64 (lsa + m_offset, data); }
|
||||
void WriteLS128(const u32 lsa, const u128& data) const { vm::write128(lsa + m_offset, data); }
|
||||
|
||||
std::function<void(SPUThread& SPU)> m_custom_task;
|
||||
std::function<u32(SPUThread& SPU)> m_code3_func;
|
||||
|
||||
public:
|
||||
SPUThread(CPUThreadType type = CPU_THREAD_SPU);
|
||||
virtual ~SPUThread();
|
||||
|
@ -560,6 +510,8 @@ public:
|
|||
public:
|
||||
virtual void InitRegs();
|
||||
virtual void Task();
|
||||
void FastCall(u32 ls_addr);
|
||||
void FastStop();
|
||||
|
||||
protected:
|
||||
virtual void DoReset();
|
||||
|
|
|
@ -128,7 +128,6 @@ void MemoryBase::Init(MemoryType type)
|
|||
MemoryBlocks.push_back(MainMem.SetRange(0x00010000, 0x2FFF0000));
|
||||
MemoryBlocks.push_back(UserMemory = PRXMem.SetRange(0x30000000, 0x10000000));
|
||||
MemoryBlocks.push_back(RSXCMDMem.SetRange(0x40000000, 0x10000000));
|
||||
MemoryBlocks.push_back(MmaperMem.SetRange(0xB0000000, 0x10000000));
|
||||
MemoryBlocks.push_back(RSXFBMem.SetRange(0xC0000000, 0x10000000));
|
||||
MemoryBlocks.push_back(StackMem.SetRange(0xD0000000, 0x10000000));
|
||||
break;
|
||||
|
@ -218,7 +217,7 @@ bool MemoryBase::Map(const u64 addr, const u32 size)
|
|||
}
|
||||
|
||||
MemoryBlocks.push_back((new MemoryBlock())->SetRange(addr, size));
|
||||
LOG_WARNING(MEMORY, "MemoryBase::Map(0x%llx, 0x%x)", addr, size);
|
||||
LOG_WARNING(MEMORY, "Memory mapped at 0x%llx: size=0x%x", addr, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -579,73 +578,71 @@ bool VirtualMemoryBlock::IsMyAddress(const u64 addr)
|
|||
return false;
|
||||
}
|
||||
|
||||
u64 VirtualMemoryBlock::Map(u64 realaddr, u32 size, u64 addr)
|
||||
u64 VirtualMemoryBlock::Map(u64 realaddr, u32 size)
|
||||
{
|
||||
if(addr)
|
||||
for (u64 addr = GetStartAddr(); addr <= GetEndAddr() - GetReservedAmount() - size;)
|
||||
{
|
||||
if(!IsInMyRange(addr, size) && (IsMyAddress(addr) || IsMyAddress(addr + size - 1)))
|
||||
return 0;
|
||||
bool is_good_addr = true;
|
||||
|
||||
m_mapped_memory.emplace_back(addr, realaddr, size);
|
||||
return addr;
|
||||
}
|
||||
else
|
||||
{
|
||||
for(u64 addr = GetStartAddr(); addr <= GetEndAddr() - GetReservedAmount() - size;)
|
||||
// check if address is already mapped
|
||||
for (u32 i = 0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
bool is_good_addr = true;
|
||||
|
||||
// check if address is already mapped
|
||||
for(u32 i=0; i<m_mapped_memory.size(); ++i)
|
||||
if ((addr >= m_mapped_memory[i].addr && addr < m_mapped_memory[i].addr + m_mapped_memory[i].size) ||
|
||||
(m_mapped_memory[i].addr >= addr && m_mapped_memory[i].addr < addr + size))
|
||||
{
|
||||
if((addr >= m_mapped_memory[i].addr && addr < m_mapped_memory[i].addr + m_mapped_memory[i].size) ||
|
||||
(m_mapped_memory[i].addr >= addr && m_mapped_memory[i].addr < addr + size))
|
||||
{
|
||||
is_good_addr = false;
|
||||
addr = m_mapped_memory[i].addr + m_mapped_memory[i].size;
|
||||
break;
|
||||
}
|
||||
is_good_addr = false;
|
||||
addr = m_mapped_memory[i].addr + m_mapped_memory[i].size;
|
||||
break;
|
||||
}
|
||||
|
||||
if(!is_good_addr) continue;
|
||||
|
||||
m_mapped_memory.emplace_back(addr, realaddr, size);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
if (!is_good_addr) continue;
|
||||
|
||||
m_mapped_memory.emplace_back(addr, realaddr, size);
|
||||
|
||||
return addr;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
u32 VirtualMemoryBlock::UnmapRealAddress(u64 realaddr)
|
||||
bool VirtualMemoryBlock::Map(u64 realaddr, u32 size, u64 addr)
|
||||
{
|
||||
if(!IsInMyRange(addr, size) && (IsMyAddress(addr) || IsMyAddress(addr + size - 1)))
|
||||
return false;
|
||||
|
||||
m_mapped_memory.emplace_back(addr, realaddr, size);
|
||||
return true;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::UnmapRealAddress(u64 realaddr, u32& size)
|
||||
{
|
||||
for(u32 i=0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
if(m_mapped_memory[i].realAddress == realaddr && IsInMyRange(m_mapped_memory[i].addr, m_mapped_memory[i].size))
|
||||
{
|
||||
u32 size = m_mapped_memory[i].size;
|
||||
size = m_mapped_memory[i].size;
|
||||
m_mapped_memory.erase(m_mapped_memory.begin() + i);
|
||||
return size;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
u32 VirtualMemoryBlock::UnmapAddress(u64 addr)
|
||||
bool VirtualMemoryBlock::UnmapAddress(u64 addr, u32& size)
|
||||
{
|
||||
for(u32 i=0; i<m_mapped_memory.size(); ++i)
|
||||
{
|
||||
if(m_mapped_memory[i].addr == addr && IsInMyRange(m_mapped_memory[i].addr, m_mapped_memory[i].size))
|
||||
{
|
||||
u32 size = m_mapped_memory[i].size;
|
||||
size = m_mapped_memory[i].size;
|
||||
m_mapped_memory.erase(m_mapped_memory.begin() + i);
|
||||
return size;
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
return 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
bool VirtualMemoryBlock::Read32(const u64 addr, u32* value)
|
||||
|
|
|
@ -35,7 +35,6 @@ public:
|
|||
DynamicMemoryBlock MainMem;
|
||||
DynamicMemoryBlock PRXMem;
|
||||
DynamicMemoryBlock RSXCMDMem;
|
||||
DynamicMemoryBlock MmaperMem;
|
||||
DynamicMemoryBlock RSXFBMem;
|
||||
DynamicMemoryBlock StackMem;
|
||||
MemoryBlock* RawSPUMem[(0x100000000 - RAW_SPU_BASE_ADDR) / RAW_SPU_OFFSET];
|
||||
|
|
|
@ -163,13 +163,14 @@ public:
|
|||
|
||||
// maps real address to virtual address space, returns the mapped address or 0 on failure (if no address is specified the
|
||||
// first mappable space is used)
|
||||
virtual u64 Map(u64 realaddr, u32 size, u64 addr = 0);
|
||||
virtual bool Map(u64 realaddr, u32 size, u64 addr);
|
||||
virtual u64 Map(u64 realaddr, u32 size);
|
||||
|
||||
// Unmap real address (please specify only starting point, no midway memory will be unmapped), returns the size of the unmapped area
|
||||
virtual u32 UnmapRealAddress(u64 realaddr);
|
||||
virtual bool UnmapRealAddress(u64 realaddr, u32& size);
|
||||
|
||||
// Unmap address (please specify only starting point, no midway memory will be unmapped), returns the size of the unmapped area
|
||||
virtual u32 UnmapAddress(u64 addr);
|
||||
virtual bool UnmapAddress(u64 addr, u32& size);
|
||||
|
||||
// Reserve a certain amount so no one can use it, returns true on succces, false on failure
|
||||
virtual bool Reserve(u32 size);
|
||||
|
|
211
rpcs3/Emu/Memory/atomic_type.h
Normal file
211
rpcs3/Emu/Memory/atomic_type.h
Normal file
|
@ -0,0 +1,211 @@
|
|||
#pragma once
|
||||
|
||||
template<typename T, size_t size = sizeof(T)>
|
||||
struct _to_atomic
|
||||
{
|
||||
static_assert(size == 1 || size == 2 || size == 4 || size == 8, "Invalid atomic type");
|
||||
|
||||
typedef T type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct _to_atomic<T, 1>
|
||||
{
|
||||
typedef uint8_t type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct _to_atomic<T, 2>
|
||||
{
|
||||
typedef uint16_t type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct _to_atomic<T, 4>
|
||||
{
|
||||
typedef uint32_t type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
struct _to_atomic<T, 8>
|
||||
{
|
||||
typedef uint64_t type;
|
||||
};
|
||||
|
||||
template<typename T>
|
||||
class _atomic_base
|
||||
{
|
||||
typedef typename _to_atomic<T, sizeof(T)>::type atomic_type;
|
||||
atomic_type data;
|
||||
|
||||
public:
|
||||
// atomically compare data with cmp, replace with exch if equal, return previous data value anyway
|
||||
__forceinline const T compare_and_swap(const T& cmp, const T& exch) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp));
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
// atomically compare data with cmp, replace with exch if equal, return true if data was replaced
|
||||
__forceinline bool compare_and_swap_test(const T& cmp, const T& exch) volatile
|
||||
{
|
||||
return InterlockedCompareExchange(&data, (atomic_type&)(exch), (atomic_type&)(cmp)) == (atomic_type&)(cmp);
|
||||
}
|
||||
|
||||
// read data with memory barrier
|
||||
__forceinline const T read_sync() const volatile
|
||||
{
|
||||
const atomic_type res = InterlockedCompareExchange(const_cast<volatile atomic_type*>(&data), 0, 0);
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
// atomically replace data with exch, return previous data value
|
||||
__forceinline const T exchange(const T& exch) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedExchange(&data, (atomic_type&)(exch));
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
// read data without memory barrier
|
||||
__forceinline const T read_relaxed() const volatile
|
||||
{
|
||||
return (T&)data;
|
||||
}
|
||||
|
||||
// write data without memory barrier
|
||||
__forceinline void write_relaxed(const T& value) volatile
|
||||
{
|
||||
data = (atomic_type&)(value);
|
||||
}
|
||||
|
||||
// perform atomic operation on data
|
||||
template<typename FT> __forceinline void atomic_op(const FT atomic_proc) volatile
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const T old = read_relaxed();
|
||||
T _new = old;
|
||||
atomic_proc(_new); // function should accept reference to T type
|
||||
if (compare_and_swap_test(old, _new)) return;
|
||||
}
|
||||
}
|
||||
|
||||
// perform atomic operation on data with special exit condition (if intermediate result != proceed_value)
|
||||
template<typename RT, typename FT> __forceinline RT atomic_op(const RT proceed_value, const FT atomic_proc) volatile
|
||||
{
|
||||
while (true)
|
||||
{
|
||||
const T old = read_relaxed();
|
||||
T _new = old;
|
||||
RT res = (RT)atomic_proc(_new); // function should accept reference to T type and return some value
|
||||
if (res != proceed_value) return res;
|
||||
if (compare_and_swap_test(old, _new)) return proceed_value;
|
||||
}
|
||||
}
|
||||
|
||||
// perform atomic operation on data with additional memory barrier
|
||||
template<typename FT> __forceinline void atomic_op_sync(const FT atomic_proc) volatile
|
||||
{
|
||||
T old = read_sync();
|
||||
while (true)
|
||||
{
|
||||
T _new = old;
|
||||
atomic_proc(_new); // function should accept reference to T type
|
||||
const T val = compare_and_swap(old, _new);
|
||||
if ((atomic_type&)val == (atomic_type&)old) return;
|
||||
old = val;
|
||||
}
|
||||
}
|
||||
|
||||
// perform atomic operation on data with additional memory barrier and special exit condition (if intermediate result != proceed_value)
|
||||
template<typename RT, typename FT> __forceinline RT atomic_op_sync(const RT proceed_value, const FT atomic_proc) volatile
|
||||
{
|
||||
T old = read_sync();
|
||||
while (true)
|
||||
{
|
||||
T _new = old;
|
||||
RT res = (RT)atomic_proc(_new); // function should accept reference to T type and return some value
|
||||
if (res != proceed_value) return res;
|
||||
const T val = compare_and_swap(old, _new);
|
||||
if ((atomic_type&)val == (atomic_type&)old) return proceed_value;
|
||||
old = val;
|
||||
}
|
||||
}
|
||||
|
||||
// perform non-atomic operation on data directly without memory barriers
|
||||
template<typename FT> __forceinline void direct_op(const FT direct_proc) volatile
|
||||
{
|
||||
direct_proc((T&)data);
|
||||
}
|
||||
|
||||
// atomic bitwise OR, returns previous data
|
||||
__forceinline const T _or(const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right));
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
// atomic bitwise AND, returns previous data
|
||||
__forceinline const T _and(const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right));
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
// atomic bitwise AND NOT (inverts right argument), returns previous data
|
||||
__forceinline const T _and_not(const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedAnd(&data, ~(atomic_type&)(right));
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
// atomic bitwise XOR, returns previous data
|
||||
__forceinline const T _xor(const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right));
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
__forceinline const T operator |= (const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedOr(&data, (atomic_type&)(right)) | (atomic_type&)(right);
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
__forceinline const T operator &= (const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedAnd(&data, (atomic_type&)(right)) & (atomic_type&)(right);
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
__forceinline const T operator ^= (const T& right) volatile
|
||||
{
|
||||
const atomic_type res = InterlockedXor(&data, (atomic_type&)(right)) ^ (atomic_type&)(right);
|
||||
return (T&)res;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
template<typename T> struct atomic_le_t : public _atomic_base<T>
|
||||
{
|
||||
};
|
||||
|
||||
template<typename T> struct atomic_be_t : public _atomic_base<typename to_be_t<T>::type>
|
||||
{
|
||||
};
|
||||
|
||||
namespace ps3
|
||||
{
|
||||
template<typename T> struct atomic_t : public atomic_be_t<T>
|
||||
{
|
||||
};
|
||||
}
|
||||
|
||||
namespace psv
|
||||
{
|
||||
template<typename T> struct atomic_t : public atomic_le_t<T>
|
||||
{
|
||||
};
|
||||
}
|
||||
|
||||
using namespace ps3;
|
|
@ -12,12 +12,6 @@ namespace vm
|
|||
{
|
||||
return (T*)((u8*)g_base_addr + addr);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T* const get_ptr(u64 addr)
|
||||
{
|
||||
return get_ptr<T>((u32)addr);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T& get_ref(u32 addr)
|
||||
|
@ -25,12 +19,6 @@ namespace vm
|
|||
return *get_ptr<T>(addr);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
T& get_ref(u64 addr)
|
||||
{
|
||||
return get_ref<T>((u32)addr);
|
||||
}
|
||||
|
||||
namespace ps3
|
||||
{
|
||||
static u8 read8(u32 addr)
|
||||
|
@ -154,4 +142,4 @@ namespace vm
|
|||
|
||||
#include "vm_ref.h"
|
||||
#include "vm_ptr.h"
|
||||
#include "vm_var.h"
|
||||
#include "vm_var.h"
|
||||
|
|
|
@ -366,7 +366,7 @@ namespace vm
|
|||
//BE pointer to LE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> struct bptrl : public _ptr_base<T, lvl, typename to_be_t<AT>::type>
|
||||
{
|
||||
static bptrl make(AT addr)
|
||||
static bptrl make(typename to_be_t<AT>::type addr)
|
||||
{
|
||||
return (bptrl&)addr;
|
||||
}
|
||||
|
@ -378,7 +378,7 @@ namespace vm
|
|||
//BE pointer to BE data
|
||||
template<typename T, int lvl = 1, typename AT = u32> struct bptrb : public _ptr_base<typename to_be_t<T>::type, lvl, typename to_be_t<AT>::type>
|
||||
{
|
||||
static bptrb make(AT addr)
|
||||
static bptrb make(typename to_be_t<AT>::type addr)
|
||||
{
|
||||
return (bptrb&)addr;
|
||||
}
|
||||
|
@ -428,7 +428,7 @@ namespace vm
|
|||
//default pointer for HLE structures (BE ptrerence to BE data)
|
||||
template<typename T, int lvl = 1, typename AT = u32> struct bptr : public bptrb<T, lvl, AT>
|
||||
{
|
||||
static bptr make(AT addr)
|
||||
static bptr make(typename to_be_t<AT>::type addr)
|
||||
{
|
||||
return (bptr&)addr;
|
||||
}
|
||||
|
|
|
@ -48,7 +48,7 @@ u32 GetAddress(u32 offset, u32 location)
|
|||
switch(location)
|
||||
{
|
||||
case CELL_GCM_LOCATION_LOCAL: return (u32)Memory.RSXFBMem.GetStartAddr() + offset;
|
||||
case CELL_GCM_LOCATION_MAIN: return (u32)Memory.RSXIOMem.RealAddr(Memory.RSXIOMem.GetStartAddr() + offset); // TODO: Error Check?
|
||||
case CELL_GCM_LOCATION_MAIN: return (u32)Memory.RSXIOMem.RealAddr(offset); // TODO: Error Check?
|
||||
}
|
||||
|
||||
LOG_ERROR(RSX, "GetAddress(offset=0x%x, location=0x%x)", location);
|
||||
|
@ -239,7 +239,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV406E_SET_CONTEXT_DMA_SEMAPHORE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV406E_SET_CONTEXT_DMA_SEMAPHORE: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV406E_SET_CONTEXT_DMA_SEMAPHORE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -254,7 +254,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV406E_SEMAPHORE_ACQUIRE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV406E_SEMAPHORE_ACQUIRE: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV406E_SEMAPHORE_ACQUIRE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -296,9 +296,10 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
if(m_flip_handler)
|
||||
{
|
||||
auto cb = m_flip_handler;
|
||||
Emu.GetCallbackManager().Async([cb]()
|
||||
Emu.GetCallbackManager().Register([cb]()
|
||||
{
|
||||
cb(1);
|
||||
return 0;
|
||||
});
|
||||
}
|
||||
|
||||
|
@ -315,21 +316,21 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_NOTIFY:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_NOTIFY: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_NOTIFY: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
case NV4097_WAIT_FOR_IDLE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_WAIT_FOR_IDLE: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_WAIT_FOR_IDLE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
case NV4097_PM_TRIGGER:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_PM_TRIGGER: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_PM_TRIGGER: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -458,7 +459,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_VERTEX_ATTRIB_INPUT_MASK:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_VERTEX_ATTRIB_INPUT_MASK: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_VERTEX_ATTRIB_INPUT_MASK: 0x%x", ARGS(0));
|
||||
|
||||
//VertexData[0].prog.attributeInputMask = ARGS(0);
|
||||
}
|
||||
|
@ -467,7 +468,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_VERTEX_ATTRIB_OUTPUT_MASK: 0x%x", ARGS(0));
|
||||
|
||||
//VertexData[0].prog.attributeOutputMask = ARGS(0);
|
||||
//FragmentData.prog.attributeInputMask = ARGS(0)/* & ~0x20*/;
|
||||
|
@ -490,7 +491,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_COLOR_MASK_MRT:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_COLOR_MASK_MRT: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_COLOR_MASK_MRT: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -829,14 +830,14 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_CLEAR_RECT_HORIZONTAL:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_CLEAR_RECT_HORIZONTAL: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_CLEAR_RECT_HORIZONTAL: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
case NV4097_SET_CLEAR_RECT_VERTICAL:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_CLEAR_RECT_VERTICAL: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_CLEAR_RECT_VERTICAL: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -933,7 +934,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
{
|
||||
const u32 a0 = ARGS(0);
|
||||
|
||||
//LOG_WARNING(RSX, "NV4097_SET_BEGIN_END: %x", a0);
|
||||
//LOG_WARNING(RSX, "NV4097_SET_BEGIN_END: 0x%x", a0);
|
||||
|
||||
m_read_buffer = false;
|
||||
|
||||
|
@ -1066,7 +1067,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_INVALIDATE_L2:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_INVALIDATE_L2: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_INVALIDATE_L2: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1085,7 +1086,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_INVALIDATE_ZCULL:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_INVALIDATE_ZCULL: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_INVALIDATE_ZCULL: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1249,7 +1250,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_SCULL_CONTROL:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_SCULL_CONTROL: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_SCULL_CONTROL: 0x%x", ARGS(0));
|
||||
|
||||
//This is stencil culling , nothing to do with stencil masking on regular color or depth buffer
|
||||
//const u32 a0 = ARGS(0);
|
||||
|
@ -1287,7 +1288,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_POINT_PARAMS_ENABLE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_ERROR(RSX, "NV4097_SET_POINT_PARAMS_ENABLE: %x", ARGS(0));
|
||||
LOG_ERROR(RSX, "NV4097_SET_POINT_PARAMS_ENABLE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1427,7 +1428,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_SURFACE_PITCH_D:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_SURFACE_PITCH_D: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_SURFACE_PITCH_D: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1467,7 +1468,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_CONTEXT_DMA_COLOR_D:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_CONTEXT_DMA_COLOR_D: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_CONTEXT_DMA_COLOR_D: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1481,14 +1482,14 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_CONTEXT_DMA_SEMAPHORE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_CONTEXT_DMA_SEMAPHORE: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_CONTEXT_DMA_SEMAPHORE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
case NV4097_SET_CONTEXT_DMA_NOTIFIES:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_CONTEXT_DMA_NOTIFIES: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_CONTEXT_DMA_NOTIFIES: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1529,7 +1530,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
const u8 alphaToOne = (a0 >> 8) & 0xf;
|
||||
const u16 sampleMask = a0 >> 16;
|
||||
|
||||
LOG_WARNING(RSX, "TODO: NV4097_SET_ANTI_ALIASING_CONTROL: %x", a0);
|
||||
LOG_WARNING(RSX, "TODO: NV4097_SET_ANTI_ALIASING_CONTROL: 0x%x", a0);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1599,7 +1600,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_ZCULL_CONTROL0:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_ZCULL_CONTROL0: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_ZCULL_CONTROL0: 0x%x", ARGS(0));
|
||||
|
||||
//m_set_depth_func = true;
|
||||
//m_depth_func = ARGS(0) >> 4;
|
||||
|
@ -1609,7 +1610,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_ZCULL_CONTROL1:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_ZCULL_CONTROL1: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_ZCULL_CONTROL1: 0x%x", ARGS(0));
|
||||
|
||||
//m_set_depth_func = true;
|
||||
//m_depth_func = ARGS(0) >> 4;
|
||||
|
@ -1619,14 +1620,14 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV4097_SET_ZCULL_STATS_ENABLE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_SET_ZCULL_STATS_ENABLE: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_SET_ZCULL_STATS_ENABLE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
case NV4097_ZCULL_SYNC:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV4097_ZCULL_SYNC: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV4097_ZCULL_SYNC: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1745,7 +1746,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
{
|
||||
const u32 offset = ARGS(0) & 0xffffff;
|
||||
const u8 mode = ARGS(0) >> 24;
|
||||
LOG_WARNING(RSX, "NV4097_SET_RENDER_ENABLE: Offset=%06x, Mode=%x", offset, mode);
|
||||
LOG_WARNING(RSX, "NV4097_SET_RENDER_ENABLE: Offset=0x%06x, Mode=0x%x", offset, mode);
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1812,14 +1813,14 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV0039_PITCH_IN:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV0039_PITCH_IN: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV0039_PITCH_IN: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
case NV0039_BUFFER_NOTIFY:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV0039_BUFFER_NOTIFY: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV0039_BUFFER_NOTIFY: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1848,7 +1849,7 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
case NV309E_SET_CONTEXT_DMA_IMAGE:
|
||||
{
|
||||
if (ARGS(0))
|
||||
LOG_WARNING(RSX, "NV309E_SET_CONTEXT_DMA_IMAGE: %x", ARGS(0));
|
||||
LOG_WARNING(RSX, "NV309E_SET_CONTEXT_DMA_IMAGE: 0x%x", ARGS(0));
|
||||
}
|
||||
break;
|
||||
|
||||
|
@ -1947,13 +1948,17 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
u8* pixels_src = vm::get_ptr<u8>(GetAddress(offset, m_context_dma_img_src - 0xfeed0000));
|
||||
u8* pixels_dst = vm::get_ptr<u8>(GetAddress(m_dst_offset, m_context_dma_img_dst - 0xfeed0000));
|
||||
|
||||
LOG_WARNING(RSX, "NV3089_IMAGE_IN_SIZE: width=%d, height=%d, pitch=%d, origin=%d, inter=%d, offset=0x%x, u=%d, v=%d", width, height, pitch, origin, inter, offset, u, v);
|
||||
LOG_WARNING(RSX, "*** m_dst_offset=0x%x, m_color: conv_in_h=0x%x, format_src_pitch=0x%x, conv_in_x=0x%x, conv_in_y=0x%x, conv_out_x=0x%x, conv_out_y=0x%x",
|
||||
m_dst_offset, m_color_conv_in_h, m_color_format_src_pitch, m_color_conv_in_x, m_color_conv_in_y, m_color_conv_out_x, m_color_conv_out_y);
|
||||
|
||||
for(u16 y=0; y<m_color_conv_in_h; ++y)
|
||||
{
|
||||
for(u16 x=0; x<m_color_format_src_pitch/4/*m_color_conv_in_w*/; ++x)
|
||||
{
|
||||
const u32 src_offset = (m_color_conv_in_y + y) * m_color_format_src_pitch + (m_color_conv_in_x + x) * 4;
|
||||
const u32 dst_offset = (m_color_conv_out_y + y) * m_color_format_dst_pitch + (m_color_conv_out_x + x) * 4;
|
||||
(u32&)pixels_dst[dst_offset] = (u32&)pixels_src[src_offset];
|
||||
//(u32&)pixels_dst[dst_offset] = (u32&)pixels_src[src_offset];
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1981,9 +1986,10 @@ void RSXThread::DoCmd(const u32 fcmd, const u32 cmd, const u32 args_addr, const
|
|||
{
|
||||
const u32 cause = ARGS(0);
|
||||
auto cb = m_user_handler;
|
||||
Emu.GetCallbackManager().Async([cb, cause]()
|
||||
Emu.GetCallbackManager().Register([cb, cause]()
|
||||
{
|
||||
cb(cause);
|
||||
return 0;
|
||||
});
|
||||
}
|
||||
break;
|
||||
|
@ -2147,9 +2153,10 @@ void RSXThread::Task()
|
|||
if (m_vblank_handler)
|
||||
{
|
||||
auto cb = m_vblank_handler;
|
||||
Emu.GetCallbackManager().Async([cb]()
|
||||
Emu.GetCallbackManager().Register([cb]()
|
||||
{
|
||||
cb(1);
|
||||
return 0;
|
||||
});
|
||||
}
|
||||
continue;
|
||||
|
@ -2200,6 +2207,7 @@ void RSXThread::Task()
|
|||
const u32 cmd = ReadIO32(get);
|
||||
const u32 count = (cmd >> 18) & 0x7ff;
|
||||
//if(cmd == 0) continue;
|
||||
//LOG_NOTICE(Log::RSX, "put=0x%x, get=0x%x, cmd=0x%x (%s)", put, get, cmd, GetMethodName(cmd & 0xffff).c_str());
|
||||
|
||||
if(cmd & CELL_GCM_METHOD_FLAG_JUMP)
|
||||
{
|
||||
|
@ -2212,7 +2220,7 @@ void RSXThread::Task()
|
|||
{
|
||||
m_call_stack.push(get + 4);
|
||||
u32 offs = cmd & ~CELL_GCM_METHOD_FLAG_CALL;
|
||||
//u32 addr = Memory.RSXIOMem.GetStartAddr() + offs;
|
||||
//u32 addr = offs;
|
||||
//LOG_WARNING(RSX, "rsx call(0x%x) #0x%x - 0x%x - 0x%x", offs, addr, cmd, get);
|
||||
m_ctrl->get = offs;
|
||||
continue;
|
||||
|
@ -2234,14 +2242,14 @@ void RSXThread::Task()
|
|||
|
||||
if(cmd == 0)
|
||||
{
|
||||
LOG_ERROR(Log::RSX, "null cmd: cmd=0x%x, put=0x%x, get=0x%x (addr=0x%x)", cmd, put, get, (u32)Memory.RSXIOMem.RealAddr(get));
|
||||
Emu.Pause();
|
||||
//HACK! We couldn't be here
|
||||
//ConLog.Error("null cmd: addr=0x%x, put=0x%x, get=0x%x", Memory.RSXIOMem.GetStartAddr() + get, m_ctrl->put, get);
|
||||
//Emu.Pause();
|
||||
m_ctrl->get = get + (count + 1) * 4;
|
||||
continue;
|
||||
}
|
||||
|
||||
auto args = vm::ptr<be_t<u32>>::make((u32)Memory.RSXIOMem.RealAddr(Memory.RSXIOMem.GetStartAddr() + get + 4));
|
||||
auto args = vm::ptr<be_t<u32>>::make((u32)Memory.RSXIOMem.RealAddr(get + 4));
|
||||
|
||||
for(u32 i=0; i<count; i++)
|
||||
{
|
||||
|
@ -2295,7 +2303,7 @@ void RSXThread::Init(const u32 ioAddress, const u32 ioSize, const u32 ctrlAddres
|
|||
u32 RSXThread::ReadIO32(u32 addr)
|
||||
{
|
||||
u32 value;
|
||||
if (!Memory.RSXIOMem.Read32(Memory.RSXIOMem.GetStartAddr() + addr, &value))
|
||||
if (!Memory.RSXIOMem.Read32(addr, &value))
|
||||
{
|
||||
throw fmt::Format("%s(rsxio_addr=0x%x): RSXIO memory not mapped", __FUNCTION__, addr);
|
||||
}
|
||||
|
@ -2304,7 +2312,7 @@ u32 RSXThread::ReadIO32(u32 addr)
|
|||
|
||||
void RSXThread::WriteIO32(u32 addr, u32 value)
|
||||
{
|
||||
if (!Memory.RSXIOMem.Write32(Memory.RSXIOMem.GetStartAddr() + addr, value))
|
||||
if (!Memory.RSXIOMem.Write32(addr, value))
|
||||
{
|
||||
throw fmt::Format("%s(rsxio_addr=0x%x): RSXIO memory not mapped", __FUNCTION__, addr);
|
||||
}
|
||||
|
|
|
@ -228,15 +228,15 @@ enum CellVideoOutRGBOutputRange
|
|||
|
||||
static const CellVideoOutResolution ResolutionTable[] =
|
||||
{
|
||||
{ be_t<u16>::MakeFromBE(se16(0xffff)), be_t<u16>::MakeFromBE(se16(0xffff)) }, //0 - 0
|
||||
{ be_t<u16>::MakeFromBE(se16(1920)), be_t<u16>::MakeFromBE(se16(1080)) }, //1 - 1
|
||||
{ be_t<u16>::MakeFromBE(se16(1280)), be_t<u16>::MakeFromBE(se16(720)) }, //2 - 2
|
||||
{ be_t<u16>::MakeFromBE(se16(720)), be_t<u16>::MakeFromBE(se16(480)) }, //4 - 3
|
||||
{ be_t<u16>::MakeFromBE(se16(720)), be_t<u16>::MakeFromBE(se16(576)) }, //5 - 4
|
||||
{ be_t<u16>::MakeFromBE(se16(1600)), be_t<u16>::MakeFromBE(se16(1080)) }, //10 - 5
|
||||
{ be_t<u16>::MakeFromBE(se16(1440)), be_t<u16>::MakeFromBE(se16(1080)) }, //11 - 6
|
||||
{ be_t<u16>::MakeFromBE(se16(1280)), be_t<u16>::MakeFromBE(se16(1080)) }, //12 - 7
|
||||
{ be_t<u16>::MakeFromBE(se16(960)), be_t<u16>::MakeFromBE(se16(1080)) }, //13 - 8
|
||||
{ be_t<u16>::make(0xffff), be_t<u16>::make(0xffff) }, //0 - 0
|
||||
{ be_t<u16>::make(1920), be_t<u16>::make(1080) }, //1 - 1
|
||||
{ be_t<u16>::make(1280), be_t<u16>::make(720) }, //2 - 2
|
||||
{ be_t<u16>::make(720), be_t<u16>::make(480) }, //4 - 3
|
||||
{ be_t<u16>::make(720), be_t<u16>::make(576) }, //5 - 4
|
||||
{ be_t<u16>::make(1600), be_t<u16>::make(1080) }, //10 - 5
|
||||
{ be_t<u16>::make(1440), be_t<u16>::make(1080) }, //11 - 6
|
||||
{ be_t<u16>::make(1280), be_t<u16>::make(1080) }, //12 - 7
|
||||
{ be_t<u16>::make(960), be_t<u16>::make(1080) }, //13 - 8
|
||||
};
|
||||
|
||||
inline static u32 ResolutionIdToNum(u32 id)
|
||||
|
|
|
@ -55,7 +55,7 @@ namespace cb_detail
|
|||
template<typename T, int g_count, int f_count, int v_count>
|
||||
struct _func_arg<T, ARG_STACK, g_count, f_count, v_count>
|
||||
{
|
||||
static_assert(f_count <= 12, "TODO: Unsupported stack argument type (float)");
|
||||
static_assert(f_count <= 13, "TODO: Unsupported stack argument type (float)");
|
||||
static_assert(v_count <= 12, "TODO: Unsupported stack argument type (vector)");
|
||||
static_assert(sizeof(T) <= 8, "Invalid callback argument type for ARG_STACK");
|
||||
|
||||
|
@ -84,7 +84,7 @@ namespace cb_detail
|
|||
const bool is_float = std::is_floating_point<T1>::value;
|
||||
const bool is_vector = std::is_same<T1, u128>::value;
|
||||
const _func_arg_type t = is_float
|
||||
? ((f_count >= 12) ? ARG_STACK : ARG_FLOAT)
|
||||
? ((f_count >= 13) ? ARG_STACK : ARG_FLOAT)
|
||||
: (is_vector ? ((v_count >= 12) ? ARG_STACK : ARG_VECTOR) : ((g_count >= 8) ? ARG_STACK : ARG_GENERAL));
|
||||
const int g = g_count + (is_float || is_vector ? 0 : 1);
|
||||
const int f = f_count + (is_float ? 1 : 0);
|
||||
|
|
|
@ -6,7 +6,7 @@
|
|||
|
||||
bool LogBase::CheckLogging() const
|
||||
{
|
||||
return Ini.HLELogging.GetValue();
|
||||
return Ini.HLELogging.GetValue() || m_logging;
|
||||
}
|
||||
|
||||
void LogBase::LogOutput(LogType type, const char* info, const std::string& text) const
|
||||
|
|
|
@ -31,12 +31,12 @@ public:
|
|||
|
||||
template<typename... Targs> __noinline void Notice(const u32 id, const char* fmt, Targs... args) const
|
||||
{
|
||||
LogOutput(LogNotice, id, ": ", fmt::Format(fmt, args...));
|
||||
LogOutput(LogNotice, id, " : ", fmt::Format(fmt, args...));
|
||||
}
|
||||
|
||||
template<typename... Targs> __noinline void Notice(const char* fmt, Targs... args) const
|
||||
{
|
||||
LogOutput(LogNotice, ": ", fmt::Format(fmt, args...));
|
||||
LogOutput(LogNotice, " : ", fmt::Format(fmt, args...));
|
||||
}
|
||||
|
||||
template<typename... Targs> __forceinline void Log(const char* fmt, Targs... args) const
|
||||
|
@ -57,12 +57,12 @@ public:
|
|||
|
||||
template<typename... Targs> __noinline void Success(const u32 id, const char* fmt, Targs... args) const
|
||||
{
|
||||
LogOutput(LogSuccess, id, ": ", fmt::Format(fmt, args...));
|
||||
LogOutput(LogSuccess, id, " : ", fmt::Format(fmt, args...));
|
||||
}
|
||||
|
||||
template<typename... Targs> __noinline void Success(const char* fmt, Targs... args) const
|
||||
{
|
||||
LogOutput(LogSuccess, ": ", fmt::Format(fmt, args...));
|
||||
LogOutput(LogSuccess, " : ", fmt::Format(fmt, args...));
|
||||
}
|
||||
|
||||
template<typename... Targs> __noinline void Warning(const u32 id, const char* fmt, Targs... args) const
|
||||
|
|
|
@ -338,12 +338,12 @@ s32 _cellGcmInitBody(vm::ptr<CellGcmContextData> context, u32 cmdSize, u32 ioSiz
|
|||
if (system_mode == CELL_GCM_SYSTEM_MODE_IOMAP_512MB)
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmInit(): 512MB io address space used");
|
||||
Memory.RSXIOMem.SetRange(0x50000000, 0x20000000 /*512MB*/);
|
||||
Memory.RSXIOMem.SetRange(0, 0x20000000 /*512MB*/);
|
||||
}
|
||||
else
|
||||
{
|
||||
cellGcmSys->Warning("cellGcmInit(): 256MB io address space used");
|
||||
Memory.RSXIOMem.SetRange(0x50000000, 0x10000000 /*256MB*/);
|
||||
Memory.RSXIOMem.SetRange(0, 0x10000000 /*256MB*/);
|
||||
}
|
||||
|
||||
if(cellGcmMapEaIoAddress(ioAddress, 0, ioSize) != CELL_OK)
|
||||
|
@ -833,7 +833,7 @@ u32 cellGcmGetMaxIoMapSize()
|
|||
{
|
||||
cellGcmSys->Log("cellGcmGetMaxIoMapSize()");
|
||||
|
||||
return (u32)(Memory.RSXIOMem.GetEndAddr() - Memory.RSXIOMem.GetStartAddr() - Memory.RSXIOMem.GetReservedAmount());
|
||||
return (u32)(Memory.RSXIOMem.GetEndAddr() - Memory.RSXIOMem.GetReservedAmount());
|
||||
}
|
||||
|
||||
void cellGcmGetOffsetTable(vm::ptr<CellGcmOffsetTable> table)
|
||||
|
@ -850,7 +850,7 @@ s32 cellGcmIoOffsetToAddress(u32 ioOffset, u64 address)
|
|||
|
||||
u64 realAddr;
|
||||
|
||||
if (!Memory.RSXIOMem.getRealAddr(Memory.RSXIOMem.GetStartAddr() + ioOffset, realAddr))
|
||||
if (!Memory.RSXIOMem.getRealAddr(ioOffset, realAddr))
|
||||
return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
vm::write64(address, realAddr);
|
||||
|
@ -865,7 +865,7 @@ s32 cellGcmMapEaIoAddress(u32 ea, u32 io, u32 size)
|
|||
if ((ea & 0xFFFFF) || (io & 0xFFFFF) || (size & 0xFFFFF)) return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
// Check if the mapping was successfull
|
||||
if (Memory.RSXIOMem.Map(ea, size, Memory.RSXIOMem.GetStartAddr() + io))
|
||||
if (Memory.RSXIOMem.Map(ea, size, io))
|
||||
{
|
||||
// Fill the offset table
|
||||
for (u32 i = 0; i<(size >> 20); i++)
|
||||
|
@ -914,16 +914,13 @@ s32 cellGcmMapMainMemory(u32 ea, u32 size, vm::ptr<be_t<u32>> offset)
|
|||
{
|
||||
cellGcmSys->Warning("cellGcmMapMainMemory(ea=0x%x,size=0x%x,offset_addr=0x%x)", ea, size, offset.addr());
|
||||
|
||||
u32 io;
|
||||
|
||||
if ((ea & 0xFFFFF) || (size & 0xFFFFF)) return CELL_GCM_ERROR_FAILURE;
|
||||
|
||||
//check if the mapping was successfull
|
||||
if (io = (u32)Memory.RSXIOMem.Map(ea, size, 0))
|
||||
{
|
||||
// convert to offset
|
||||
io = io - (u32)Memory.RSXIOMem.GetStartAddr();
|
||||
u32 io = Memory.RSXIOMem.Map(ea, size);
|
||||
|
||||
//check if the mapping was successfull
|
||||
if (Memory.RSXIOMem.Write32(io, 0))
|
||||
{
|
||||
//fill the offset table
|
||||
for (u32 i = 0; i<(size >> 20); i++)
|
||||
{
|
||||
|
@ -968,8 +965,8 @@ s32 cellGcmUnmapEaIoAddress(u64 ea)
|
|||
{
|
||||
cellGcmSys->Log("cellGcmUnmapEaIoAddress(ea=0x%llx)", ea);
|
||||
|
||||
u32 size = Memory.RSXIOMem.UnmapRealAddress(ea);
|
||||
if (size)
|
||||
u32 size;
|
||||
if (Memory.RSXIOMem.UnmapRealAddress(ea, size))
|
||||
{
|
||||
u64 io;
|
||||
ea = ea >> 20;
|
||||
|
@ -994,8 +991,8 @@ s32 cellGcmUnmapIoAddress(u64 io)
|
|||
{
|
||||
cellGcmSys->Log("cellGcmUnmapIoAddress(io=0x%llx)", io);
|
||||
|
||||
u32 size = Memory.RSXIOMem.UnmapAddress(io);
|
||||
if (size)
|
||||
u32 size;
|
||||
if (Memory.RSXIOMem.UnmapAddress(io, size))
|
||||
{
|
||||
u64 ea;
|
||||
io = io >> 20;
|
||||
|
|
|
@ -283,9 +283,9 @@ int _L10nConvertStr(int src_code, const void* src, size_t * src_len, int dst_cod
|
|||
//TODO: Check the code in emulation. If support for UTF8/UTF16/UTF32/UCS2/UCS4 should use wider chars.. awful.
|
||||
int L10nConvertStr(int src_code, vm::ptr<const void> src, vm::ptr<be_t<u32>> src_len, int dst_code, vm::ptr<void> dst, vm::ptr<be_t<u32>> dst_len)
|
||||
{
|
||||
cellL10n->Todo("L10nConvertStr(src_code=%d,src=0x%x,src_len=%ld,dst_code=%d,dst=0x%x,dst_len=%ld)",
|
||||
cellL10n->Error("L10nConvertStr(src_code=%d, srca_addr=0x%x, src_len_addr=0x%x, dst_code=%d, dst_addr=0x%x, dst_len_addr=0x%x)",
|
||||
src_code, src.addr(), src_len.addr(), dst_code, dst.addr(), dst_len.addr());
|
||||
cellL10n->Todo("L10nConvertStr: 1st char at dst: %x(Hex)", *((char*)src.get_ptr()));
|
||||
//cellL10n->Todo("L10nConvertStr: 1st char at dst: 0x%x", *((char*)src.get_ptr()));
|
||||
#ifdef _MSC_VER
|
||||
unsigned int srcCode = 0, dstCode = 0; //OEM code pages
|
||||
bool src_page_converted = _L10nCodeParse(src_code, srcCode); //Check if code is in list.
|
||||
|
|
|
@ -21,6 +21,7 @@ enum MsgDialogState
|
|||
};
|
||||
|
||||
std::atomic<MsgDialogState> g_msg_dialog_state(msgDialogNone);
|
||||
u64 g_msg_dialog_status;
|
||||
u64 g_msg_dialog_wait_until;
|
||||
u32 g_msg_dialog_progress_bar_count;
|
||||
|
||||
|
@ -100,14 +101,14 @@ int cellMsgDialogOpen2(u32 type, vm::ptr<const char> msgString, vm::ptr<CellMsgD
|
|||
case CELL_MSGDIALOG_TYPE_SE_MUTE_ON: break;
|
||||
}
|
||||
|
||||
u64 status = CELL_MSGDIALOG_BUTTON_NONE;
|
||||
g_msg_dialog_status = CELL_MSGDIALOG_BUTTON_NONE;
|
||||
|
||||
volatile bool m_signal = false;
|
||||
CallAfter([type, msg, &status, &m_signal]()
|
||||
CallAfter([type, msg, &m_signal]()
|
||||
{
|
||||
if (Emu.IsStopped()) return;
|
||||
|
||||
MsgDialogCreate(type, msg.c_str(), status);
|
||||
MsgDialogCreate(type, msg.c_str(), g_msg_dialog_status);
|
||||
|
||||
m_signal = true;
|
||||
});
|
||||
|
@ -134,9 +135,9 @@ int cellMsgDialogOpen2(u32 type, vm::ptr<const char> msgString, vm::ptr<CellMsgD
|
|||
|
||||
if (callback && (g_msg_dialog_state != msgDialogAbort))
|
||||
{
|
||||
Emu.GetCallbackManager().Register([callback, status, userData]() -> s32
|
||||
Emu.GetCallbackManager().Register([callback, userData]() -> s32
|
||||
{
|
||||
callback((s32)status, userData);
|
||||
callback((s32)g_msg_dialog_status, userData);
|
||||
return CELL_OK;
|
||||
});
|
||||
}
|
||||
|
|
|
@ -46,7 +46,7 @@ int cellNetCtlTerm()
|
|||
|
||||
int cellNetCtlGetState(vm::ptr<u32> state)
|
||||
{
|
||||
cellNetCtl->Log("cellNetCtlGetState(state_addr=0x%x)", state.addr());
|
||||
cellNetCtl->Warning("cellNetCtlGetState(state_addr=0x%x)", state.addr());
|
||||
|
||||
*state = CELL_NET_CTL_STATE_Disconnected; // TODO: Allow other states
|
||||
|
||||
|
@ -55,7 +55,7 @@ int cellNetCtlGetState(vm::ptr<u32> state)
|
|||
|
||||
int cellNetCtlAddHandler(vm::ptr<cellNetCtlHandler> handler, vm::ptr<void> arg, vm::ptr<s32> hid)
|
||||
{
|
||||
cellNetCtl->Todo("cellNetCtlAddHandler(handler_addr=0x%x, arg_addr=0x%x, hid=0x%x)", handler.addr(), arg.addr(), hid.addr());
|
||||
cellNetCtl->Todo("cellNetCtlAddHandler(handler_addr=0x%x, arg_addr=0x%x, hid_addr=0x%x)", handler.addr(), arg.addr(), hid.addr());
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -69,7 +69,7 @@ int cellNetCtlDelHandler(s32 hid)
|
|||
|
||||
int cellNetCtlGetInfo(s32 code, vm::ptr<CellNetCtlInfo> info)
|
||||
{
|
||||
cellNetCtl->Todo("cellNetCtlGetInfo(code=%x, info_addr=0x%x)", code, info.addr());
|
||||
cellNetCtl->Todo("cellNetCtlGetInfo(code=0x%x, info_addr=0x%x)", code, info.addr());
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -105,7 +105,7 @@ u8 pamfGetStreamChannel(vm::ptr<CellPamfReader> pSelf, u8 stream)
|
|||
}
|
||||
else
|
||||
{
|
||||
cellPamf->Error("pamfGetStreamChannel: stream type %x got invalid stream id=%x", pAddr->stream_headers[stream].type, pAddr->stream_headers[stream].stream_id);
|
||||
cellPamf->Error("pamfGetStreamChannel: stream type 0x%x got invalid stream id=0x%x", pAddr->stream_headers[stream].type, pAddr->stream_headers[stream].stream_id);
|
||||
return 0;
|
||||
}
|
||||
case 0xdc:
|
||||
|
|
|
@ -19,7 +19,7 @@ u32 libpngdec_rtoc;
|
|||
s32 pngDecCreate(
|
||||
vm::ptr<u32> mainHandle,
|
||||
vm::ptr<const CellPngDecThreadInParam> param,
|
||||
vm::ptr<const CellPngDecExtThreadInParam> ext = {})
|
||||
vm::ptr<const CellPngDecExtThreadInParam> ext = vm::ptr<const CellPngDecExtThreadInParam>::make(0))
|
||||
{
|
||||
// alloc memory (should probably use param->cbCtrlMallocFunc)
|
||||
auto dec = CellPngDecMainHandle::make(Memory.Alloc(sizeof(PngDecoder), 128));
|
||||
|
@ -60,8 +60,8 @@ s32 pngDecOpen(
|
|||
vm::ptr<u32> subHandle,
|
||||
vm::ptr<const CellPngDecSrc> src,
|
||||
vm::ptr<CellPngDecOpnInfo> openInfo,
|
||||
vm::ptr<const CellPngDecCbCtrlStrm> cb = {},
|
||||
vm::ptr<const CellPngDecOpnParam> param = {})
|
||||
vm::ptr<const CellPngDecCbCtrlStrm> cb = vm::ptr<const CellPngDecCbCtrlStrm>::make(0),
|
||||
vm::ptr<const CellPngDecOpnParam> param = vm::ptr<const CellPngDecOpnParam>::make(0))
|
||||
{
|
||||
// alloc memory (should probably use dec->malloc)
|
||||
auto stream = CellPngDecSubHandle::make(Memory.Alloc(sizeof(PngStream), 128));
|
||||
|
@ -129,7 +129,7 @@ s32 pngDecClose(CellPngDecSubHandle stream)
|
|||
s32 pngReadHeader(
|
||||
CellPngDecSubHandle stream,
|
||||
vm::ptr<CellPngDecInfo> info,
|
||||
vm::ptr<CellPngDecExtInfo> extInfo = {})
|
||||
vm::ptr<CellPngDecExtInfo> extInfo = vm::ptr<CellPngDecExtInfo>::make(0))
|
||||
{
|
||||
CellPngDecInfo& current_info = stream->info;
|
||||
|
||||
|
@ -193,8 +193,8 @@ s32 pngDecSetParameter(
|
|||
CellPngDecSubHandle stream,
|
||||
vm::ptr<const CellPngDecInParam> inParam,
|
||||
vm::ptr<CellPngDecOutParam> outParam,
|
||||
vm::ptr<const CellPngDecExtInParam> extInParam = {},
|
||||
vm::ptr<CellPngDecExtOutParam> extOutParam = {})
|
||||
vm::ptr<const CellPngDecExtInParam> extInParam = vm::ptr<const CellPngDecExtInParam>::make(0),
|
||||
vm::ptr<CellPngDecExtOutParam> extOutParam = vm::ptr<CellPngDecExtOutParam>::make(0))
|
||||
{
|
||||
CellPngDecInfo& current_info = stream->info;
|
||||
CellPngDecOutParam& current_outParam = stream->outParam;
|
||||
|
@ -235,8 +235,8 @@ s32 pngDecodeData(
|
|||
vm::ptr<u8> data,
|
||||
vm::ptr<const CellPngDecDataCtrlParam> dataCtrlParam,
|
||||
vm::ptr<CellPngDecDataOutInfo> dataOutInfo,
|
||||
vm::ptr<const CellPngDecCbCtrlDisp> cbCtrlDisp = {},
|
||||
vm::ptr<CellPngDecDispParam> dispParam = {})
|
||||
vm::ptr<const CellPngDecCbCtrlDisp> cbCtrlDisp = vm::ptr<const CellPngDecCbCtrlDisp>::make(0),
|
||||
vm::ptr<CellPngDecDispParam> dispParam = vm::ptr<CellPngDecDispParam>::make(0))
|
||||
{
|
||||
dataOutInfo->status = CELL_PNGDEC_DEC_STATUS_STOP;
|
||||
|
||||
|
|
|
@ -603,7 +603,7 @@ void cellRescExit()
|
|||
if (IsPalTemporal())
|
||||
{
|
||||
cellGcmSetSecondVFrequency(CELL_GCM_DISPLAY_FREQUENCY_DISABLE);
|
||||
cellGcmSetVBlankHandler({});
|
||||
cellGcmSetVBlankHandler(vm::ptr<void(*)(const u32)>::make(0));
|
||||
//GcmSysTypePrefix::cellGcmSetSecondVHandler(NULL);
|
||||
|
||||
if (IsPalInterpolate())
|
||||
|
@ -780,20 +780,20 @@ int cellRescSetDisplayMode(u32 displayMode)
|
|||
cellGcmSetSecondVFrequency(CELL_GCM_DISPLAY_FREQUENCY_59_94HZ);
|
||||
//cellGcmSetVBlankHandler(IntrHandler50);
|
||||
//cellGcmSetSecondVHandler(IntrHandler60);
|
||||
cellGcmSetFlipHandler({});
|
||||
cellGcmSetFlipHandler(vm::ptr<void(*)(const u32)>::make(0));
|
||||
}
|
||||
else if (IsPalDrop())
|
||||
{
|
||||
//InitLabels();
|
||||
cellGcmSetSecondVFrequency(CELL_GCM_DISPLAY_FREQUENCY_59_94HZ);
|
||||
cellGcmSetVBlankHandler({});
|
||||
cellGcmSetVBlankHandler(vm::ptr<void(*)(const u32)>::make(0));
|
||||
//cellGcmSetSecondVHandler(IntrHandler60Drop);
|
||||
cellGcmSetFlipHandler({});
|
||||
cellGcmSetFlipHandler(vm::ptr<void(*)(const u32)>::make(0));
|
||||
}
|
||||
else if (IsPal60Hsync())
|
||||
{
|
||||
cellGcmSetSecondVFrequency(CELL_GCM_DISPLAY_FREQUENCY_59_94HZ);
|
||||
cellGcmSetVBlankHandler({});
|
||||
cellGcmSetVBlankHandler(vm::ptr<void(*)(const u32)>::make(0));
|
||||
}
|
||||
|
||||
if (s_rescInternalInstance->s_applicationVBlankHandler) SetVBlankHandler(s_rescInternalInstance->s_applicationVBlankHandler);
|
||||
|
|
|
@ -178,7 +178,7 @@ void getSaveDataStat(SaveDataEntry entry, vm::ptr<CellSaveDataStatGet> statGet)
|
|||
strcpy_trunc(statGet->getParam.listParam, entry.listParam);
|
||||
|
||||
statGet->fileNum = 0;
|
||||
statGet->fileList = vm::bptr<CellSaveDataFileStat>::make(0);
|
||||
statGet->fileList.set(be_t<u32>::make(0));
|
||||
statGet->fileListNum = 0;
|
||||
std::string saveDir = "/dev_hdd0/home/00000001/savedata/" + entry.dirName; // TODO: Get the path of the current user
|
||||
vfsDir dir(saveDir);
|
||||
|
@ -210,7 +210,7 @@ void getSaveDataStat(SaveDataEntry entry, vm::ptr<CellSaveDataStatGet> statGet)
|
|||
}
|
||||
}
|
||||
|
||||
statGet->fileList = vm::bptr<CellSaveDataFileStat>::make(be_t<u32>::MakeFromLE((u32)Memory.Alloc(sizeof(CellSaveDataFileStat) * (u32)fileEntries.size(), sizeof(CellSaveDataFileStat))));
|
||||
statGet->fileList = vm::bptr<CellSaveDataFileStat>::make(be_t<u32>::make((u32)Memory.Alloc(sizeof(CellSaveDataFileStat) * (u32)fileEntries.size(), sizeof(CellSaveDataFileStat))));
|
||||
for (u32 i=0; i<fileEntries.size(); i++)
|
||||
memcpy(&statGet->fileList[i], &fileEntries[i], sizeof(CellSaveDataFileStat));
|
||||
}
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -1,4 +1,7 @@
|
|||
#pragma once
|
||||
#include "Emu/SysCalls/lv2/sys_lwmutex.h"
|
||||
#include "Emu/SysCalls/lv2/sys_lwcond.h"
|
||||
#include "Emu/SysCalls/lv2/sys_spu.h"
|
||||
|
||||
// Core return codes.
|
||||
enum
|
||||
|
@ -14,6 +17,27 @@ enum
|
|||
CELL_SPURS_CORE_ERROR_NULL_POINTER = 0x80410711,
|
||||
};
|
||||
|
||||
//
|
||||
enum
|
||||
{
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_AGAIN = 0x80410801,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_INVAL = 0x80410802,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_NOSYS = 0x80410803,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_NOMEM = 0x80410804,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_SRCH = 0x80410805,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_NOENT = 0x80410806,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_NOEXEC = 0x80410807,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_DEADLK = 0x80410808,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_PERM = 0x80410809,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_BUSY = 0x8041080A,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_ABORT = 0x8041080C,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_FAULT = 0x8041080D,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_CHILD = 0x8041080E,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_STAT = 0x8041080F,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_ALIGN = 0x80410810,
|
||||
CELL_SPURS_POLICY_MODULE_ERROR_NULL_POINTER = 0x80410811,
|
||||
};
|
||||
|
||||
// Task return codes.
|
||||
enum
|
||||
{
|
||||
|
@ -32,6 +56,38 @@ enum
|
|||
CELL_SPURS_TASK_ERROR_SHUTDOWN = 0x80410920,
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
CELL_SPURS_JOB_ERROR_AGAIN = 0x80410A01,
|
||||
CELL_SPURS_JOB_ERROR_INVAL = 0x80410A02,
|
||||
CELL_SPURS_JOB_ERROR_NOSYS = 0x80410A03,
|
||||
CELL_SPURS_JOB_ERROR_NOMEM = 0x80410A04,
|
||||
CELL_SPURS_JOB_ERROR_SRCH = 0x80410A05,
|
||||
CELL_SPURS_JOB_ERROR_NOENT = 0x80410A06,
|
||||
CELL_SPURS_JOB_ERROR_NOEXEC = 0x80410A07,
|
||||
CELL_SPURS_JOB_ERROR_DEADLK = 0x80410A08,
|
||||
CELL_SPURS_JOB_ERROR_PERM = 0x80410A09,
|
||||
CELL_SPURS_JOB_ERROR_BUSY = 0x80410A0A,
|
||||
CELL_SPURS_JOB_ERROR_JOB_DESCRIPTOR = 0x80410A0B,
|
||||
CELL_SPURS_JOB_ERROR_JOB_DESCRIPTOR_SIZE = 0x80410A0C,
|
||||
CELL_SPURS_JOB_ERROR_FAULT = 0x80410A0D,
|
||||
CELL_SPURS_JOB_ERROR_CHILD = 0x80410A0E,
|
||||
CELL_SPURS_JOB_ERROR_STAT = 0x80410A0F,
|
||||
CELL_SPURS_JOB_ERROR_ALIGN = 0x80410A10,
|
||||
CELL_SPURS_JOB_ERROR_NULL_POINTER = 0x80410A11,
|
||||
CELL_SPURS_JOB_ERROR_MEMORY_CORRUPTED = 0x80410A12,
|
||||
|
||||
CELL_SPURS_JOB_ERROR_MEMORY_SIZE = 0x80410A17,
|
||||
CELL_SPURS_JOB_ERROR_UNKNOWN_COMMAND = 0x80410A18,
|
||||
CELL_SPURS_JOB_ERROR_JOBLIST_ALIGNMENT = 0x80410A19,
|
||||
CELL_SPURS_JOB_ERROR_JOB_ALIGNMENT = 0x80410A1a,
|
||||
CELL_SPURS_JOB_ERROR_CALL_OVERFLOW = 0x80410A1b,
|
||||
CELL_SPURS_JOB_ERROR_ABORT = 0x80410A1c,
|
||||
CELL_SPURS_JOB_ERROR_DMALIST_ELEMENT = 0x80410A1d,
|
||||
CELL_SPURS_JOB_ERROR_NUM_CACHE = 0x80410A1e,
|
||||
CELL_SPURS_JOB_ERROR_INVALID_BINARY = 0x80410A1f,
|
||||
};
|
||||
|
||||
// SPURS defines.
|
||||
enum SPURSKernelInterfaces
|
||||
{
|
||||
|
@ -80,26 +136,32 @@ class SPURSManager;
|
|||
class SPURSManagerEventFlag;
|
||||
class SPURSManagerTaskset;
|
||||
|
||||
// Core CellSpurs structures.
|
||||
struct CellSpurs
|
||||
{
|
||||
SPURSManager *spurs;
|
||||
};
|
||||
|
||||
struct CellSpurs2
|
||||
{
|
||||
SPURSManager *spurs;
|
||||
};
|
||||
struct CellSpurs;
|
||||
|
||||
enum SpursAttrFlags : u32
|
||||
{
|
||||
SAF_NONE = 0x0,
|
||||
SAF_EXIT_IF_NO_WORK = 0x1,
|
||||
SAF_NONE = 0x0,
|
||||
|
||||
SAF_EXIT_IF_NO_WORK = 0x1,
|
||||
SAF_UNKNOWN_FLAG_30 = 0x2,
|
||||
SAF_SECOND_VERSION = 0x4,
|
||||
|
||||
SAF_UNKNOWN_FLAG_9 = 0x00400000,
|
||||
SAF_UNKNOWN_FLAG_8 = 0x00800000,
|
||||
SAF_UNKNOWN_FLAG_7 = 0x01000000,
|
||||
SAF_SYSTEM_WORKLOAD_ENABLED = 0x02000000,
|
||||
SAF_SPU_PRINTF_ENABLED = 0x10000000,
|
||||
SAF_SPU_TGT_EXCLUSIVE_NON_CONTEXT = 0x20000000,
|
||||
SAF_SPU_MEMORY_CONTAINER_SET = 0x40000000,
|
||||
SAF_UNKNOWN_FLAG_0 = 0x80000000,
|
||||
};
|
||||
|
||||
enum SpursFlags1 : u8
|
||||
{
|
||||
SF1_NONE = 0x0,
|
||||
|
||||
SF1_IS_SECOND = 0x40,
|
||||
SF1_EXIT_IF_NO_WORK = 0x80,
|
||||
};
|
||||
|
||||
struct CellSpursAttribute
|
||||
|
@ -113,7 +175,7 @@ struct CellSpursAttribute
|
|||
u8 _u8[size];
|
||||
struct { be_t<u32> _u32[size / sizeof(u32)]; };
|
||||
|
||||
// real structure
|
||||
// real data
|
||||
struct
|
||||
{
|
||||
be_t<u32> revision; // 0x0
|
||||
|
@ -140,6 +202,214 @@ struct CellSpursAttribute
|
|||
};
|
||||
};
|
||||
|
||||
struct CellSpursWorkloadFlag
|
||||
{
|
||||
be_t<u64> unused0;
|
||||
be_t<u32> unused1;
|
||||
atomic_t<u32> flag;
|
||||
};
|
||||
|
||||
typedef void(*CellSpursShutdownCompletionEventHook)(vm::ptr<CellSpurs>, u32 wid, vm::ptr<void> arg);
|
||||
|
||||
// Core CellSpurs structures
|
||||
struct CellSpurs
|
||||
{
|
||||
static const uint align = 128;
|
||||
static const uint size = 0x2000; // size of CellSpurs2
|
||||
static const uint size1 = 0x1000; // size of CellSpurs
|
||||
static const uint size2 = 0x1000;
|
||||
|
||||
struct _sub_str1
|
||||
{
|
||||
u8 unk0[0x20];
|
||||
be_t<u64> sem; // 0x20
|
||||
u8 unk1[0x8];
|
||||
vm::bptr<CellSpursShutdownCompletionEventHook, 1, u64> hook; // 0x30
|
||||
vm::bptr<void, 1, u64> hookArg; // 0x38
|
||||
u8 unk2[0x40];
|
||||
};
|
||||
|
||||
static_assert(sizeof(_sub_str1) == 0x80, "Wrong _sub_str1 size");
|
||||
|
||||
struct _sub_str2
|
||||
{
|
||||
be_t<u32> unk0;
|
||||
be_t<u32> unk1;
|
||||
be_t<u32> unk2;
|
||||
be_t<u32> unk3;
|
||||
be_t<u64> port; // 0x10
|
||||
u8 unk_[0x68];
|
||||
};
|
||||
|
||||
static_assert(sizeof(_sub_str2) == 0x80, "Wrong _sub_str2 size");
|
||||
|
||||
struct _sub_str3
|
||||
{
|
||||
vm::bptr<const void, 1, u64> pm; // policy module
|
||||
be_t<u64> data; // spu argument
|
||||
be_t<u32> size;
|
||||
atomic_t<u8> copy;
|
||||
be_t<u64> priority;
|
||||
};
|
||||
|
||||
static_assert(sizeof(_sub_str3) == 0x20, "Wrong _sub_str3 size");
|
||||
|
||||
struct _sub_str4
|
||||
{
|
||||
static const uint size = 0x10;
|
||||
|
||||
vm::bptr<const char, 1, u64> nameClass;
|
||||
vm::bptr<const char, 1, u64> nameInstance;
|
||||
};
|
||||
|
||||
union
|
||||
{
|
||||
// raw data
|
||||
u8 _u8[size];
|
||||
std::array<be_t<u32>, size / sizeof(u32)> _u32;
|
||||
|
||||
// real data
|
||||
struct
|
||||
{
|
||||
atomic_t<u8> wklReadyCount[0x20]; // 0x0 (index = wid)
|
||||
u8 wklA[0x10]; // 0x20 (packed 4-bit data, index = wid % 16, internal index = wid / 16)
|
||||
u8 wklB[0x10]; // 0x30 (packed 4-bit data, index = wid % 16, internal index = wid / 16)
|
||||
u8 wklMinCnt[0x10]; // 0x40 (seems only for first 0..15 wids)
|
||||
atomic_t<u8> wklMaxCnt[0x10]; // 0x50 (packed 4-bit data, index = wid % 16, internal index = wid / 16)
|
||||
CellSpursWorkloadFlag wklFlag; // 0x60
|
||||
atomic_t<u16> wklSet1; // 0x70 (bitset for 0..15 wids)
|
||||
atomic_t<u8> x72; // 0x72
|
||||
u8 x73; // 0x73
|
||||
u8 flags1; // 0x74
|
||||
u8 x75; // 0x75
|
||||
u8 nSpus; // 0x76
|
||||
atomic_t<u8> flagRecv; // 0x77
|
||||
atomic_t<u16> wklSet2; // 0x78 (bitset for 16..32 wids)
|
||||
u8 x7A[6]; // 0x7A
|
||||
atomic_t<u8> wklStat1[0x10]; // 0x80
|
||||
u8 wklD1[0x10]; // 0x90
|
||||
u8 wklE1[0x10]; // 0xA0
|
||||
atomic_t<u32> wklMskA; // 0xB0
|
||||
atomic_t<u32> wklMskB; // 0xB4
|
||||
u8 xB8[5]; // 0xB8
|
||||
atomic_t<u8> xBD; // 0xBD
|
||||
u8 xBE[2]; // 0xBE
|
||||
u8 xC0[8]; // 0xC0
|
||||
u8 xC8; // 0xC8
|
||||
u8 spuPort; // 0xC9
|
||||
u8 xCA; // 0xCA
|
||||
u8 xCB; // 0xCB
|
||||
u8 xCC; // 0xCC
|
||||
u8 xCD; // 0xCD
|
||||
u8 xCE; // 0xCE
|
||||
u8 xCF; // 0xCF
|
||||
atomic_t<u8> wklStat2[0x10]; // 0xD0
|
||||
u8 wklD2[0x10]; // 0xE0
|
||||
u8 wklE2[0x10]; // 0xF0
|
||||
_sub_str1 wklF1[0x10]; // 0x100
|
||||
be_t<u64> unk22; // 0x900
|
||||
u8 unknown7[0x980 - 0x908];
|
||||
be_t<u64> semPrv; // 0x980
|
||||
be_t<u32> unk11; // 0x988
|
||||
be_t<u32> unk12; // 0x98C
|
||||
be_t<u64> unk13; // 0x990
|
||||
u8 unknown4[0xB00 - 0x998];
|
||||
_sub_str3 wklG1[0x10]; // 0xB00
|
||||
_sub_str3 wklSysG; // 0xD00
|
||||
be_t<u64> ppu0; // 0xD20
|
||||
be_t<u64> ppu1; // 0xD28
|
||||
be_t<u32> spuTG; // 0xD30
|
||||
be_t<u32> spus[8]; // 0xD34
|
||||
u8 unknown3[0xD5C - 0xD54];
|
||||
be_t<u32> queue; // 0xD5C
|
||||
be_t<u32> port; // 0xD60
|
||||
atomic_t<u8> xD64; // 0xD64
|
||||
atomic_t<u8> xD65; // 0xD65
|
||||
atomic_t<u8> xD66; // 0xD66
|
||||
atomic_t<u32> enableEH; // 0xD68
|
||||
be_t<u32> exception; // 0xD6C
|
||||
sys_spu_image spuImg; // 0xD70
|
||||
be_t<u32> flags; // 0xD80
|
||||
be_t<s32> spuPriority; // 0xD84
|
||||
be_t<u32> ppuPriority; // 0xD88
|
||||
char prefix[0x0f]; // 0xD8C
|
||||
u8 prefixSize; // 0xD9B
|
||||
be_t<u32> unk5; // 0xD9C
|
||||
be_t<u32> revision; // 0xDA0
|
||||
be_t<u32> sdkVersion; // 0xDA4
|
||||
atomic_t<u64> spups; // 0xDA8
|
||||
sys_lwmutex_t mutex; // 0xDB0
|
||||
sys_lwcond_t cond; // 0xDC8
|
||||
u8 unknown9[0xE00 - 0xDD0];
|
||||
_sub_str4 wklH1[0x10]; // 0xE00
|
||||
_sub_str2 sub3; // 0xF00
|
||||
u8 unknown6[0x1000 - 0xF80];
|
||||
_sub_str3 wklG2[0x10]; // 0x1000
|
||||
_sub_str1 wklF2[0x10]; // 0x1200
|
||||
_sub_str4 wklH2[0x10]; // 0x1A00
|
||||
} m;
|
||||
|
||||
// alternative implementation
|
||||
struct
|
||||
{
|
||||
SPURSManager *spurs;
|
||||
} c;
|
||||
};
|
||||
|
||||
__forceinline atomic_t<u8>& wklStat(const u32 wid)
|
||||
{
|
||||
if (wid & 0x10)
|
||||
{
|
||||
return m.wklStat2[wid & 0xf];
|
||||
}
|
||||
else
|
||||
{
|
||||
return m.wklStat1[wid & 0xf];
|
||||
}
|
||||
}
|
||||
|
||||
__forceinline vm::ptr<sys_lwmutex_t> get_lwmutex()
|
||||
{
|
||||
return vm::ptr<sys_lwmutex_t>::make(Memory.RealToVirtualAddr(&m.mutex));
|
||||
}
|
||||
|
||||
__forceinline vm::ptr<sys_lwcond_t> get_lwcond()
|
||||
{
|
||||
return vm::ptr<sys_lwcond_t>::make(Memory.RealToVirtualAddr(&m.cond));
|
||||
}
|
||||
};
|
||||
|
||||
typedef CellSpurs CellSpurs2;
|
||||
|
||||
struct CellSpursWorkloadAttribute
|
||||
{
|
||||
static const uint align = 8;
|
||||
static const uint size = 512;
|
||||
|
||||
union
|
||||
{
|
||||
// raw data
|
||||
u8 _u8[size];
|
||||
|
||||
// real data
|
||||
struct
|
||||
{
|
||||
be_t<u32> revision;
|
||||
be_t<u32> sdkVersion;
|
||||
vm::bptr<const void> pm;
|
||||
be_t<u32> size;
|
||||
be_t<u64> data;
|
||||
u8 priority[8];
|
||||
be_t<u32> minContention;
|
||||
be_t<u32> maxContention;
|
||||
vm::bptr<const char> nameClass;
|
||||
vm::bptr<const char> nameInstance;
|
||||
vm::bptr<CellSpursShutdownCompletionEventHook> hook;
|
||||
vm::bptr<void> hookArg;
|
||||
} m;
|
||||
};
|
||||
};
|
||||
|
||||
struct CellSpursEventFlag
|
||||
{
|
||||
SPURSManagerEventFlag *eventFlag;
|
||||
|
@ -336,3 +606,6 @@ struct CellSpursTaskBinInfo
|
|||
be_t<u32> __reserved__;
|
||||
CellSpursTaskLsPattern lsPattern;
|
||||
};
|
||||
|
||||
s64 spursAttachLv2EventQueue(vm::ptr<CellSpurs> spurs, u32 queue, vm::ptr<u8> port, s32 isDynamic, bool wasCreated);
|
||||
s64 spursWakeUp(vm::ptr<CellSpurs> spurs);
|
||||
|
|
File diff suppressed because it is too large
Load diff
|
@ -31,58 +31,58 @@ enum
|
|||
|
||||
struct CellSyncMutex
|
||||
{
|
||||
be_t<u16> m_freed;
|
||||
be_t<u16> m_order;
|
||||
|
||||
volatile u32& m_data()
|
||||
struct data_t
|
||||
{
|
||||
return *reinterpret_cast<u32*>(this);
|
||||
be_t<u16> m_rel; // release order (increased when mutex is unlocked)
|
||||
be_t<u16> m_acq; // acquire order (increased when mutex is locked)
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncMutex) == 4, "CellSyncMutex: wrong size");
|
||||
|
||||
struct CellSyncBarrier
|
||||
{
|
||||
be_t<s16> m_value;
|
||||
be_t<s16> m_count;
|
||||
|
||||
volatile u32& m_data()
|
||||
struct data_t
|
||||
{
|
||||
return *reinterpret_cast<u32*>(this);
|
||||
be_t<s16> m_value;
|
||||
be_t<s16> m_count;
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncBarrier) == 4, "CellSyncBarrier: wrong size");
|
||||
|
||||
struct CellSyncRwm
|
||||
{
|
||||
be_t<u16> m_readers;
|
||||
be_t<u16> m_writers;
|
||||
struct data_t
|
||||
{
|
||||
be_t<u16> m_readers;
|
||||
be_t<u16> m_writers;
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
be_t<u32> m_size;
|
||||
vm::bptr<void, 1, u64> m_buffer;
|
||||
|
||||
volatile u32& m_data()
|
||||
{
|
||||
return *reinterpret_cast<u32*>(this);
|
||||
};
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncRwm) == 16, "CellSyncRwm: wrong size");
|
||||
|
||||
struct CellSyncQueue
|
||||
{
|
||||
be_t<u32> m_v1;
|
||||
be_t<u32> m_v2;
|
||||
struct data_t
|
||||
{
|
||||
be_t<u32> m_v1;
|
||||
be_t<u32> m_v2;
|
||||
};
|
||||
|
||||
atomic_t<data_t> data;
|
||||
be_t<u32> m_size;
|
||||
be_t<u32> m_depth;
|
||||
vm::bptr<u8, 1, u64> m_buffer;
|
||||
be_t<u64> reserved;
|
||||
|
||||
volatile u64& m_data()
|
||||
{
|
||||
return *reinterpret_cast<u64*>(this);
|
||||
};
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncQueue) == 32, "CellSyncQueue: wrong size");
|
||||
|
@ -97,60 +97,68 @@ enum CellSyncQueueDirection : u32 // CellSyncLFQueueDirection
|
|||
|
||||
struct CellSyncLFQueue
|
||||
{
|
||||
be_t<u16> m_h1; // 0x0
|
||||
be_t<u16> m_h2; // 0x2
|
||||
be_t<u16> m_h3; // 0x4
|
||||
be_t<u16> m_h4; // 0x6
|
||||
be_t<u16> m_h5; // 0x8
|
||||
be_t<u16> m_h6; // 0xA
|
||||
be_t<u16> m_h7; // 0xC
|
||||
be_t<u16> m_h8; // 0xE
|
||||
be_t<u32> m_size; // 0x10
|
||||
be_t<u32> m_depth; // 0x14
|
||||
struct pop1_t
|
||||
{
|
||||
be_t<u16> m_h1;
|
||||
be_t<u16> m_h2;
|
||||
be_t<u16> m_h3;
|
||||
be_t<u16> m_h4;
|
||||
};
|
||||
|
||||
struct pop2_t
|
||||
{
|
||||
be_t<u16> pack;
|
||||
};
|
||||
|
||||
struct pop3_t
|
||||
{
|
||||
be_t<u16> m_h1;
|
||||
be_t<u16> m_h2;
|
||||
};
|
||||
|
||||
struct push1_t
|
||||
{
|
||||
be_t<u16> m_h5;
|
||||
be_t<u16> m_h6;
|
||||
be_t<u16> m_h7;
|
||||
be_t<u16> m_h8;
|
||||
};
|
||||
|
||||
struct push2_t
|
||||
{
|
||||
be_t<u16> pack;
|
||||
};
|
||||
|
||||
struct push3_t
|
||||
{
|
||||
be_t<u16> m_h5;
|
||||
be_t<u16> m_h6;
|
||||
};
|
||||
|
||||
union
|
||||
{
|
||||
atomic_t<pop1_t> pop1; // 0x0
|
||||
atomic_t<pop3_t> pop3;
|
||||
};
|
||||
union
|
||||
{
|
||||
atomic_t<push1_t> push1; // 0x8
|
||||
atomic_t<push3_t> push3;
|
||||
};
|
||||
be_t<u32> m_size; // 0x10
|
||||
be_t<u32> m_depth; // 0x14
|
||||
vm::bptr<u8, 1, u64> m_buffer; // 0x18
|
||||
u8 m_bs[4]; // 0x20
|
||||
u8 m_bs[4]; // 0x20
|
||||
be_t<CellSyncQueueDirection> m_direction; // 0x24
|
||||
be_t<u32> m_v1; // 0x28
|
||||
be_t<u32> m_sync; // 0x2C
|
||||
be_t<u16> m_hs[32]; // 0x30
|
||||
be_t<u32> m_v1; // 0x28
|
||||
atomic_t<u32> init; // 0x2C
|
||||
atomic_t<push2_t> push2; // 0x30
|
||||
be_t<u16> m_hs1[15]; // 0x32
|
||||
atomic_t<pop2_t> pop2; // 0x50
|
||||
be_t<u16> m_hs2[15]; // 0x52
|
||||
vm::bptr<void, 1, u64> m_eaSignal; // 0x70
|
||||
be_t<u32> m_v2; // 0x78
|
||||
be_t<u32> m_v3; // 0x7C
|
||||
|
||||
volatile u32& m_data()
|
||||
{
|
||||
return *reinterpret_cast<u32*>((u8*)this + 0x2c);
|
||||
}
|
||||
|
||||
volatile u64& m_push1()
|
||||
{
|
||||
return *reinterpret_cast<u64*>((u8*)this + 0x8);
|
||||
}
|
||||
|
||||
volatile u32& m_push2()
|
||||
{
|
||||
return *reinterpret_cast<u32*>((u8*)this + 0x30);
|
||||
}
|
||||
|
||||
volatile u32& m_push3()
|
||||
{
|
||||
return *reinterpret_cast<u32*>((u8*)this + 0x8);
|
||||
}
|
||||
|
||||
volatile u64& m_pop1()
|
||||
{
|
||||
return *reinterpret_cast<u64*>((u8*)this + 0x0);
|
||||
}
|
||||
|
||||
volatile u32& m_pop2()
|
||||
{
|
||||
return *reinterpret_cast<u32*>((u8*)this + 0x50);
|
||||
}
|
||||
|
||||
volatile u32& m_pop3()
|
||||
{
|
||||
return *reinterpret_cast<u32*>((u8*)this + 0x0);
|
||||
}
|
||||
be_t<u32> m_v2; // 0x78
|
||||
be_t<u32> m_eq_id; // 0x7C
|
||||
};
|
||||
|
||||
static_assert(sizeof(CellSyncLFQueue) == 128, "CellSyncLFQueue: wrong size");
|
||||
|
@ -172,5 +180,5 @@ s32 syncLFQueueGetPopPointer(vm::ptr<CellSyncLFQueue> queue, s32& pointer, u32 i
|
|||
s32 syncLFQueueGetPopPointer2(vm::ptr<CellSyncLFQueue> queue, s32& pointer, u32 isBlocking, u32 useEventQueue);
|
||||
s32 syncLFQueueCompletePopPointer(vm::ptr<CellSyncLFQueue> queue, s32 pointer, const std::function<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull);
|
||||
s32 syncLFQueueCompletePopPointer2(vm::ptr<CellSyncLFQueue> queue, s32 pointer, const std::function<s32(u32 addr, u32 arg)> fpSendSignal, u32 noQueueFull);
|
||||
s32 syncLFQueueAttachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
|
||||
s32 syncLFQueueDetachLv2EventQueue(vm::ptr<be_t<u32>> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
|
||||
s32 syncLFQueueAttachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
|
||||
s32 syncLFQueueDetachLv2EventQueue(vm::ptr<u32> spus, u32 num, vm::ptr<CellSyncLFQueue> queue);
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
#include "Emu/SysCalls/Callback.h"
|
||||
|
||||
#include "Emu/FS/vfsFile.h"
|
||||
#include "Emu/FS/vfsStreamMemory.h"
|
||||
#include "Emu/SysCalls/lv2/sys_spu.h"
|
||||
#include "Emu/SysCalls/lv2/sys_lwmutex.h"
|
||||
#include "Emu/SysCalls/lv2/sys_spinlock.h"
|
||||
|
@ -21,8 +20,6 @@
|
|||
|
||||
Module *sysPrxForUser = nullptr;
|
||||
|
||||
extern u32 LoadSpuImage(vfsStream& stream, u32& spu_ep);
|
||||
|
||||
int _sys_heap_create_heap(const u32 heap_addr, const u32 align, const u32 size)
|
||||
{
|
||||
sysPrxForUser->Warning("_sys_heap_create_heap(heap_addr=0x%x, align=0x%x, size=0x%x)", heap_addr, align, size);
|
||||
|
@ -97,18 +94,9 @@ int sys_spu_elf_get_segments(u32 elf_img, vm::ptr<sys_spu_segment> segments, int
|
|||
|
||||
int sys_spu_image_import(vm::ptr<sys_spu_image> img, u32 src, u32 type)
|
||||
{
|
||||
sysPrxForUser->Warning("sys_spu_image_import(img=0x%x, src=0x%x, type=0x%x)", img.addr(), src, type);
|
||||
sysPrxForUser->Warning("sys_spu_image_import(img=0x%x, src=0x%x, type=%d)", img.addr(), src, type);
|
||||
|
||||
vfsStreamMemory f(src);
|
||||
u32 entry;
|
||||
u32 offset = LoadSpuImage(f, entry);
|
||||
|
||||
img->type = type;
|
||||
img->entry_point = entry;
|
||||
img->segs_addr = offset;
|
||||
img->nsegs = 0;
|
||||
|
||||
return CELL_OK;
|
||||
return spu_image_import(*img, src, type);
|
||||
}
|
||||
|
||||
int sys_spu_image_close(vm::ptr<sys_spu_image> img)
|
||||
|
@ -143,7 +131,7 @@ int sys_raw_spu_image_load(int id, vm::ptr<sys_spu_image> img)
|
|||
sysPrxForUser->Warning("sys_raw_spu_image_load(id=0x%x, img_addr=0x%x)", id, img.addr());
|
||||
|
||||
// TODO: use segment info
|
||||
memcpy(vm::get_ptr<void>(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * id), vm::get_ptr<void>(img->segs_addr), 256 * 1024);
|
||||
memcpy(vm::get_ptr<void>(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * id), vm::get_ptr<void>(img->addr), 256 * 1024);
|
||||
vm::write32(RAW_SPU_BASE_ADDR + RAW_SPU_OFFSET * id + RAW_SPU_PROB_OFFSET + SPU_NPC_offs, (u32)img->entry_point);
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -238,8 +226,6 @@ vm::ptr<char> _sys_strncpy(vm::ptr<char> dest, vm::ptr<const char> source, u32 l
|
|||
return dest;
|
||||
}
|
||||
|
||||
typedef s32(*spu_printf_cb_t)(u32 arg);
|
||||
|
||||
vm::ptr<spu_printf_cb_t> spu_printf_agcb;
|
||||
vm::ptr<spu_printf_cb_t> spu_printf_dgcb;
|
||||
vm::ptr<spu_printf_cb_t> spu_printf_atcb;
|
||||
|
@ -273,68 +259,63 @@ s32 _sys_spu_printf_finalize()
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s64 _sys_spu_printf_attach_group(u32 arg)
|
||||
s64 _sys_spu_printf_attach_group(u32 group)
|
||||
{
|
||||
sysPrxForUser->Warning("_sys_spu_printf_attach_group(arg=0x%x)", arg);
|
||||
sysPrxForUser->Warning("_sys_spu_printf_attach_group(group=%d)", group);
|
||||
|
||||
if (!spu_printf_agcb)
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
return spu_printf_agcb(arg);
|
||||
return spu_printf_agcb(group);
|
||||
}
|
||||
|
||||
s64 _sys_spu_printf_detach_group(u32 arg)
|
||||
s64 _sys_spu_printf_detach_group(u32 group)
|
||||
{
|
||||
sysPrxForUser->Warning("_sys_spu_printf_detach_group(arg=0x%x)", arg);
|
||||
sysPrxForUser->Warning("_sys_spu_printf_detach_group(group=%d)", group);
|
||||
|
||||
if (!spu_printf_dgcb)
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
return spu_printf_dgcb(arg);
|
||||
return spu_printf_dgcb(group);
|
||||
}
|
||||
|
||||
s64 _sys_spu_printf_attach_thread(u32 arg)
|
||||
s64 _sys_spu_printf_attach_thread(u32 thread)
|
||||
{
|
||||
sysPrxForUser->Warning("_sys_spu_printf_attach_thread(arg=0x%x)", arg);
|
||||
sysPrxForUser->Warning("_sys_spu_printf_attach_thread(thread=%d)", thread);
|
||||
|
||||
if (!spu_printf_atcb)
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
return spu_printf_atcb(arg);
|
||||
return spu_printf_atcb(thread);
|
||||
}
|
||||
|
||||
s64 _sys_spu_printf_detach_thread(u32 arg)
|
||||
s64 _sys_spu_printf_detach_thread(u32 thread)
|
||||
{
|
||||
sysPrxForUser->Warning("_sys_spu_printf_detach_thread(arg=0x%x)", arg);
|
||||
sysPrxForUser->Warning("_sys_spu_printf_detach_thread(thread=%d)", thread);
|
||||
|
||||
if (!spu_printf_dtcb)
|
||||
{
|
||||
return CELL_ESTAT;
|
||||
}
|
||||
|
||||
return spu_printf_dtcb(arg);
|
||||
return spu_printf_dtcb(thread);
|
||||
}
|
||||
|
||||
s32 _sys_snprintf(vm::ptr<char> dst, u32 count, vm::ptr<const char> fmt, u32 a1, u32 a2) // va_args...
|
||||
s32 _sys_snprintf(vm::ptr<char> dst, u32 count, vm::ptr<const char> fmt) // va_args...
|
||||
{
|
||||
sysPrxForUser->Todo("_sys_snprintf(dst_addr=0x%x, count=%d, fmt_addr=0x%x['%s'], ...)", dst.addr(), count, fmt.addr(), fmt.get_ptr());
|
||||
|
||||
if (std::string(fmt.get_ptr()) == "%s_%08x")
|
||||
{
|
||||
return snprintf(dst.get_ptr(), count, fmt.get_ptr(), vm::get_ptr<char>(a1), a2);
|
||||
}
|
||||
|
||||
Emu.Pause();
|
||||
return 0;
|
||||
}
|
||||
|
||||
s32 _sys_printf(vm::ptr<const char> fmt)
|
||||
s32 _sys_printf(vm::ptr<const char> fmt) // va_args...
|
||||
{
|
||||
sysPrxForUser->Todo("_sys_printf(fmt_addr=0x%x, ...)", fmt.addr());
|
||||
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
# pragma once
|
||||
#pragma once
|
||||
|
||||
struct HeapInfo
|
||||
{
|
||||
|
@ -14,5 +14,13 @@ struct HeapInfo
|
|||
}
|
||||
};
|
||||
|
||||
typedef s32(*spu_printf_cb_t)(u32 arg);
|
||||
|
||||
// Aux
|
||||
extern vm::ptr<spu_printf_cb_t> spu_printf_agcb;
|
||||
extern vm::ptr<spu_printf_cb_t> spu_printf_dgcb;
|
||||
extern vm::ptr<spu_printf_cb_t> spu_printf_atcb;
|
||||
extern vm::ptr<spu_printf_cb_t> spu_printf_dtcb;
|
||||
|
||||
// SysCalls
|
||||
vm::ptr<void> _sys_memset(vm::ptr<void> dst, s32 value, u32 size);
|
||||
|
|
|
@ -57,14 +57,14 @@ namespace detail
|
|||
template<typename T, int g_count, int f_count, int v_count>
|
||||
struct bind_arg<T, ARG_STACK, g_count, f_count, v_count>
|
||||
{
|
||||
static_assert(f_count <= 12, "TODO: Unsupported stack argument type (float)");
|
||||
static_assert(f_count <= 13, "TODO: Unsupported stack argument type (float)");
|
||||
static_assert(v_count <= 12, "TODO: Unsupported stack argument type (vector)");
|
||||
static_assert(sizeof(T) <= 8, "Invalid function argument type for ARG_STACK");
|
||||
|
||||
static __forceinline T func(PPUThread& CPU)
|
||||
{
|
||||
// TODO: check stack argument displacement
|
||||
const u64 res = CPU.GetStackArg(8 + std::max(g_count - 8, 0) + std::max(f_count - 12, 0) + std::max(v_count - 12, 0));
|
||||
const u64 res = CPU.GetStackArg(8 + std::max(g_count - 8, 0) + std::max(f_count - 13, 0) + std::max(v_count - 12, 0));
|
||||
return (T&)res;
|
||||
}
|
||||
};
|
||||
|
@ -144,7 +144,7 @@ namespace detail
|
|||
const bool is_float = std::is_floating_point<T>::value;
|
||||
const bool is_vector = std::is_same<T, u128>::value;
|
||||
const bind_arg_type t = is_float
|
||||
? ((f_count >= 12) ? ARG_STACK : ARG_FLOAT)
|
||||
? ((f_count >= 13) ? ARG_STACK : ARG_FLOAT)
|
||||
: (is_vector ? ((v_count >= 12) ? ARG_STACK : ARG_VECTOR) : ((g_count >= 8) ? ARG_STACK : ARG_GENERAL));
|
||||
const int g = g_count + (is_float || is_vector ? 0 : 1);
|
||||
const int f = f_count + (is_float ? 1 : 0);
|
||||
|
|
|
@ -78,6 +78,7 @@ public:
|
|||
case TYPE_MUTEX: m_mutex_name[id] = name; break;
|
||||
case TYPE_COND: m_cond_name[id] = name; break;
|
||||
|
||||
default: LOG_ERROR(GENERAL, "Unknown IDType = %d", type);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -93,13 +94,17 @@ public:
|
|||
}
|
||||
}
|
||||
|
||||
std::string& GetSyncPrimName(const IDType type, const u32 id)
|
||||
const std::string& GetSyncPrimName(const IDType type, const u32 id)
|
||||
{
|
||||
static const std::string empty = "";
|
||||
|
||||
switch (type)
|
||||
{
|
||||
case TYPE_LWCOND: return m_lw_cond_name[id];
|
||||
case TYPE_MUTEX: return m_mutex_name[id];
|
||||
case TYPE_COND: return m_cond_name[id];
|
||||
|
||||
default: LOG_ERROR(GENERAL, "Unknown IDType = %d", type); return empty;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -14,6 +14,8 @@ s32 sys_cond_create(vm::ptr<be_t<u32>> cond_id, u32 mutex_id, vm::ptr<sys_cond_a
|
|||
sys_cond.Log("sys_cond_create(cond_id_addr=0x%x, mutex_id=%d, attr_addr=0x%x)",
|
||||
cond_id.addr(), mutex_id, attr.addr());
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
if (attr->pshared.ToBE() != se32(0x200))
|
||||
{
|
||||
sys_cond.Error("Invalid pshared attribute(0x%x)", (u32)attr->pshared);
|
||||
|
@ -45,6 +47,8 @@ s32 sys_cond_destroy(u32 cond_id)
|
|||
{
|
||||
sys_cond.Warning("sys_cond_destroy(cond_id=%d)", cond_id);
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
Cond* cond;
|
||||
if (!Emu.GetIdManager().GetIDData(cond_id, cond))
|
||||
{
|
||||
|
|
|
@ -27,6 +27,7 @@ struct Cond
|
|||
, m_queue(name)
|
||||
, signaler(0)
|
||||
{
|
||||
signal.initialize();
|
||||
}
|
||||
};
|
||||
|
||||
|
|
|
@ -10,7 +10,24 @@
|
|||
|
||||
SysCallBase sys_event("sys_event");
|
||||
|
||||
s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, int size)
|
||||
u32 event_queue_create(u32 protocol, s32 type, u64 name_u64, u64 event_queue_key, s32 size)
|
||||
{
|
||||
EventQueue* eq = new EventQueue(protocol, type, name_u64, event_queue_key, size);
|
||||
|
||||
if (event_queue_key && !Emu.GetEventManager().RegisterKey(eq, event_queue_key))
|
||||
{
|
||||
delete eq;
|
||||
return 0;
|
||||
}
|
||||
|
||||
std::string name((const char*)&name_u64, 8);
|
||||
u32 id = sys_event.GetNewId(eq, TYPE_EVENT_QUEUE);
|
||||
sys_event.Warning("*** event_queue created [%s] (protocol=0x%x, type=0x%x, key=0x%llx, size=0x%x): id = %d",
|
||||
name.c_str(), protocol, type, event_queue_key, size, id);
|
||||
return id;
|
||||
}
|
||||
|
||||
s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, s32 size)
|
||||
{
|
||||
sys_event.Warning("sys_event_queue_create(equeue_id_addr=0x%x, attr_addr=0x%x, event_queue_key=0x%llx, size=%d)",
|
||||
equeue_id.addr(), attr.addr(), event_queue_key, size);
|
||||
|
@ -33,7 +50,7 @@ s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue
|
|||
{
|
||||
case se32(SYS_PPU_QUEUE): break;
|
||||
case se32(SYS_SPU_QUEUE): break;
|
||||
default: sys_event.Error("Unknown 0x%x type attr", (u32)attr->type); return CELL_EINVAL;
|
||||
default: sys_event.Error("Unknown 0x%x type attr", (s32)attr->type); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (event_queue_key && Emu.GetEventManager().CheckKey(event_queue_key))
|
||||
|
@ -41,20 +58,13 @@ s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue
|
|||
return CELL_EEXIST;
|
||||
}
|
||||
|
||||
EventQueue* eq = new EventQueue((u32)attr->protocol, (int)attr->type, attr->name_u64, event_queue_key, size);
|
||||
|
||||
if (event_queue_key && !Emu.GetEventManager().RegisterKey(eq, event_queue_key))
|
||||
if (u32 id = event_queue_create(attr->protocol, attr->type, attr->name_u64, event_queue_key, size))
|
||||
{
|
||||
delete eq;
|
||||
return CELL_EAGAIN;
|
||||
*equeue_id = id;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
u32 id = sys_event.GetNewId(eq, TYPE_EVENT_QUEUE);
|
||||
*equeue_id = id;
|
||||
sys_event.Warning("*** event_queue created [%s] (protocol=0x%x, type=0x%x): id = %d",
|
||||
std::string(attr->name, 8).c_str(), (u32)attr->protocol, (int)attr->type, id);
|
||||
|
||||
return CELL_OK;
|
||||
return CELL_EAGAIN;
|
||||
}
|
||||
|
||||
s32 sys_event_queue_destroy(u32 equeue_id, int mode)
|
||||
|
@ -102,7 +112,7 @@ s32 sys_event_queue_destroy(u32 equeue_id, int mode)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_array, int size, vm::ptr<be_t<u32>> number)
|
||||
s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_array, s32 size, vm::ptr<be_t<u32>> number)
|
||||
{
|
||||
sys_event.Todo("sys_event_queue_tryreceive(equeue_id=%d, event_array_addr=0x%x, size=%d, number_addr=0x%x)",
|
||||
equeue_id, event_array.addr(), size, number.addr());
|
||||
|
@ -141,10 +151,11 @@ s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_arra
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> event, u64 timeout)
|
||||
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event, u64 timeout)
|
||||
{
|
||||
sys_event.Log("sys_event_queue_receive(equeue_id=%d, event_addr=0x%x, timeout=%lld)",
|
||||
equeue_id, event.addr(), timeout);
|
||||
// dummy_event argument is ignored, data returned in registers
|
||||
sys_event.Log("sys_event_queue_receive(equeue_id=%d, dummy_event_addr=0x%x, timeout=%lld)",
|
||||
equeue_id, dummy_event.addr(), timeout);
|
||||
|
||||
EventQueue* eq;
|
||||
if (!Emu.GetIdManager().GetIDData(equeue_id, eq))
|
||||
|
@ -183,19 +194,20 @@ s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> event, u64 ti
|
|||
}
|
||||
}
|
||||
case SMR_SIGNAL:
|
||||
{
|
||||
eq->events.pop(*event);
|
||||
eq->owner.unlock(tid);
|
||||
sys_event.Log(" *** event received: source=0x%llx, d1=0x%llx, d2=0x%llx, d3=0x%llx",
|
||||
(u64)event->source, (u64)event->data1, (u64)event->data2, (u64)event->data3);
|
||||
/* passing event data in registers */
|
||||
PPUThread& t = GetCurrentPPUThread();
|
||||
t.GPR[4] = event->source;
|
||||
t.GPR[5] = event->data1;
|
||||
t.GPR[6] = event->data2;
|
||||
t.GPR[7] = event->data3;
|
||||
return CELL_OK;
|
||||
}
|
||||
{
|
||||
sys_event_data event;
|
||||
eq->events.pop(event);
|
||||
eq->owner.unlock(tid);
|
||||
sys_event.Log(" *** event received: source=0x%llx, d1=0x%llx, d2=0x%llx, d3=0x%llx",
|
||||
(u64)event.source, (u64)event.data1, (u64)event.data2, (u64)event.data3);
|
||||
/* passing event data in registers */
|
||||
PPUThread& t = GetCurrentPPUThread();
|
||||
t.GPR[4] = event.source;
|
||||
t.GPR[5] = event.data1;
|
||||
t.GPR[6] = event.data2;
|
||||
t.GPR[7] = event.data3;
|
||||
return CELL_OK;
|
||||
}
|
||||
case SMR_FAILED: break;
|
||||
default: eq->sq.invalidate(tid); return CELL_ECANCELED;
|
||||
}
|
||||
|
@ -225,7 +237,16 @@ s32 sys_event_queue_drain(u32 equeue_id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_event_port_create(vm::ptr<be_t<u32>> eport_id, int port_type, u64 name)
|
||||
u32 event_port_create(u64 name)
|
||||
{
|
||||
EventPort* eport = new EventPort();
|
||||
u32 id = sys_event.GetNewId(eport, TYPE_EVENT_PORT);
|
||||
eport->name = name ? name : ((u64)process_getpid() << 32) | (u64)id;
|
||||
sys_event.Warning("*** sys_event_port created: id = %d", id);
|
||||
return id;
|
||||
}
|
||||
|
||||
s32 sys_event_port_create(vm::ptr<be_t<u32>> eport_id, s32 port_type, u64 name)
|
||||
{
|
||||
sys_event.Warning("sys_event_port_create(eport_id_addr=0x%x, port_type=0x%x, name=0x%llx)",
|
||||
eport_id.addr(), port_type, name);
|
||||
|
@ -236,12 +257,7 @@ s32 sys_event_port_create(vm::ptr<be_t<u32>> eport_id, int port_type, u64 name)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
EventPort* eport = new EventPort();
|
||||
u32 id = sys_event.GetNewId(eport, TYPE_EVENT_PORT);
|
||||
eport->name = name ? name : ((u64)process_getpid() << 32) | (u64)id;
|
||||
*eport_id = id;
|
||||
sys_event.Warning("*** sys_event_port created: id = %d", id);
|
||||
|
||||
*eport_id = event_port_create(name);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
|
|
@ -37,7 +37,7 @@ enum EventSourceKey : u64
|
|||
struct sys_event_queue_attr
|
||||
{
|
||||
be_t<u32> protocol; // SYS_SYNC_PRIORITY or SYS_SYNC_FIFO
|
||||
be_t<int> type; // SYS_PPU_QUEUE or SYS_SPU_QUEUE
|
||||
be_t<s32> type; // SYS_PPU_QUEUE or SYS_SPU_QUEUE
|
||||
union
|
||||
{
|
||||
char name[8];
|
||||
|
@ -212,17 +212,22 @@ struct EventQueue
|
|||
, key(key)
|
||||
, events(size) // size: max event count this queue can hold
|
||||
{
|
||||
owner.initialize();
|
||||
}
|
||||
};
|
||||
|
||||
// Aux
|
||||
u32 event_port_create(u64 name);
|
||||
u32 event_queue_create(u32 protocol, s32 type, u64 name_u64, u64 event_queue_key, s32 size);
|
||||
|
||||
// SysCalls
|
||||
s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, int size);
|
||||
s32 sys_event_queue_destroy(u32 equeue_id, int mode);
|
||||
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> event, u64 timeout);
|
||||
s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_array, int size, vm::ptr<be_t<u32>> number);
|
||||
s32 sys_event_queue_create(vm::ptr<be_t<u32>> equeue_id, vm::ptr<sys_event_queue_attr> attr, u64 event_queue_key, s32 size);
|
||||
s32 sys_event_queue_destroy(u32 equeue_id, s32 mode);
|
||||
s32 sys_event_queue_receive(u32 equeue_id, vm::ptr<sys_event_data> dummy_event, u64 timeout);
|
||||
s32 sys_event_queue_tryreceive(u32 equeue_id, vm::ptr<sys_event_data> event_array, s32 size, vm::ptr<be_t<u32>> number);
|
||||
s32 sys_event_queue_drain(u32 event_queue_id);
|
||||
|
||||
s32 sys_event_port_create(vm::ptr<be_t<u32>> eport_id, int port_type, u64 name);
|
||||
s32 sys_event_port_create(vm::ptr<be_t<u32>> eport_id, s32 port_type, u64 name);
|
||||
s32 sys_event_port_destroy(u32 eport_id);
|
||||
s32 sys_event_port_connect_local(u32 event_port_id, u32 event_queue_id);
|
||||
s32 sys_event_port_disconnect(u32 eport_id);
|
||||
|
|
|
@ -45,6 +45,8 @@ struct EventFlag
|
|||
, m_protocol(protocol)
|
||||
, m_type(type)
|
||||
{
|
||||
m_mutex.initialize();
|
||||
signal.initialize();
|
||||
}
|
||||
|
||||
u32 check();
|
||||
|
|
|
@ -9,27 +9,39 @@
|
|||
|
||||
SysCallBase sys_lwcond("sys_lwcond");
|
||||
|
||||
s32 lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name_u64)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
u32 id = sys_lwcond.GetNewId(new Lwcond(name_u64), TYPE_LWCOND);
|
||||
u32 addr = Memory.RealToVirtualAddr(&lwmutex);
|
||||
lwcond.lwmutex.set(be_t<u32>::make(addr));
|
||||
lwcond.lwcond_queue = id;
|
||||
|
||||
std::string name((const char*)&name_u64, 8);
|
||||
|
||||
sys_lwcond.Warning("*** lwcond created [%s] (lwmutex_addr=0x%x): id = %d",
|
||||
name.c_str(), addr, id);
|
||||
|
||||
Emu.GetSyncPrimManager().AddSyncPrimData(TYPE_LWCOND, id, name);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_create(lwcond_addr=0x%x, lwmutex_addr=0x%x, attr_addr=0x%x)",
|
||||
lwcond.addr(), lwmutex.addr(), attr.addr());
|
||||
|
||||
u32 id = sys_lwcond.GetNewId(new Lwcond(attr->name_u64), TYPE_LWCOND);
|
||||
lwcond->lwmutex = lwmutex.addr();
|
||||
lwcond->lwcond_queue = id;
|
||||
|
||||
sys_lwcond.Warning("*** lwcond created [%s] (lwmutex_addr=0x%x): id = %d",
|
||||
std::string(attr->name, 8).c_str(), lwmutex.addr(), (u32) lwcond->lwcond_queue);
|
||||
|
||||
Emu.GetSyncPrimManager().AddSyncPrimData(TYPE_LWCOND, id, std::string(attr->name, 8));
|
||||
|
||||
return CELL_OK;
|
||||
return lwcond_create(*lwcond, *lwmutex, attr->name_u64);
|
||||
}
|
||||
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_destroy(lwcond_addr=0x%x)", lwcond.addr());
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
u32 id = lwcond->lwcond_queue;
|
||||
|
||||
Lwcond* lw;
|
||||
|
@ -58,7 +70,7 @@ s32 sys_lwcond_signal(vm::ptr<sys_lwcond_t> lwcond)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex);
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
|
||||
|
||||
if (u32 target = (mutex->attribute.ToBE() == se32(SYS_SYNC_PRIORITY) ? lw->m_queue.pop_prio() : lw->m_queue.pop()))
|
||||
{
|
||||
|
@ -84,7 +96,7 @@ s32 sys_lwcond_signal_all(vm::ptr<sys_lwcond_t> lwcond)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex);
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
|
||||
|
||||
while (u32 target = (mutex->attribute.ToBE() == se32(SYS_SYNC_PRIORITY) ? lw->m_queue.pop_prio() : lw->m_queue.pop()))
|
||||
{
|
||||
|
@ -144,9 +156,9 @@ s32 sys_lwcond_wait(vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex);
|
||||
auto mutex = vm::ptr<sys_lwmutex_t>::make(lwcond->lwmutex.addr());
|
||||
u32 tid_le = GetCurrentPPUThread().GetId();
|
||||
be_t<u32> tid = be_t<u32>::MakeFromLE(tid_le);
|
||||
be_t<u32> tid = be_t<u32>::make(tid_le);
|
||||
|
||||
SleepQueue* sq = nullptr;
|
||||
Emu.GetIdManager().GetIDData((u32)mutex->sleep_queue, sq);
|
||||
|
@ -168,7 +180,7 @@ s32 sys_lwcond_wait(vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
|||
|
||||
if (sq)
|
||||
{
|
||||
mutex->mutex.unlock(tid, be_t<u32>::MakeFromLE(mutex->attribute.ToBE() == se32(SYS_SYNC_PRIORITY) ? sq->pop_prio() : sq->pop()));
|
||||
mutex->mutex.unlock(tid, be_t<u32>::make(mutex->attribute.ToBE() == se32(SYS_SYNC_PRIORITY) ? sq->pop_prio() : sq->pop()));
|
||||
}
|
||||
else if (mutex->attribute.ToBE() == se32(SYS_SYNC_RETRY))
|
||||
{
|
||||
|
|
|
@ -1,5 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
struct sys_lwmutex_t;
|
||||
|
||||
struct sys_lwcond_attribute_t
|
||||
{
|
||||
union
|
||||
|
@ -11,7 +13,7 @@ struct sys_lwcond_attribute_t
|
|||
|
||||
struct sys_lwcond_t
|
||||
{
|
||||
be_t<u32> lwmutex;
|
||||
vm::bptr<sys_lwmutex_t> lwmutex;
|
||||
be_t<u32> lwcond_queue;
|
||||
};
|
||||
|
||||
|
@ -23,9 +25,13 @@ struct Lwcond
|
|||
Lwcond(u64 name)
|
||||
: m_queue(name)
|
||||
{
|
||||
signal.initialize();
|
||||
}
|
||||
};
|
||||
|
||||
// Aux
|
||||
s32 lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name_u64);
|
||||
|
||||
// SysCalls
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr);
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond);
|
||||
|
|
|
@ -11,50 +11,54 @@ SysCallBase sys_lwmutex("sys_lwmutex");
|
|||
|
||||
// TODO: move SleepQueue somewhere
|
||||
|
||||
s32 lwmutex_create(sys_lwmutex_t& lwmutex, u32 protocol, u32 recursive, u64 name_u64)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
lwmutex.waiter = ~0;
|
||||
lwmutex.mutex.initialize();
|
||||
lwmutex.attribute = protocol | recursive;
|
||||
lwmutex.recursive_count = 0;
|
||||
u32 sq_id = sys_lwmutex.GetNewId(new SleepQueue(name_u64), TYPE_LWMUTEX);
|
||||
lwmutex.sleep_queue = sq_id;
|
||||
|
||||
std::string name((const char*)&name_u64, 8);
|
||||
sys_lwmutex.Notice("*** lwmutex created [%s] (attribute=0x%x): sq_id = %d", name.c_str(), protocol | recursive, sq_id);
|
||||
|
||||
Emu.GetSyncPrimManager().AddLwMutexData(sq_id, name, GetCurrentPPUThread().GetId());
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attribute_t> attr)
|
||||
{
|
||||
sys_lwmutex.Log("sys_lwmutex_create(lwmutex_addr=0x%x, lwmutex_attr_addr=0x%x)",
|
||||
lwmutex.addr(), attr.addr());
|
||||
sys_lwmutex.Warning("sys_lwmutex_create(lwmutex_addr=0x%x, attr_addr=0x%x)", lwmutex.addr(), attr.addr());
|
||||
|
||||
switch (attr->attr_recursive.ToBE())
|
||||
switch (attr->recursive.ToBE())
|
||||
{
|
||||
case se32(SYS_SYNC_RECURSIVE): break;
|
||||
case se32(SYS_SYNC_NOT_RECURSIVE): break;
|
||||
default: sys_lwmutex.Error("Unknown recursive attribute(0x%x)", (u32)attr->attr_recursive); return CELL_EINVAL;
|
||||
default: sys_lwmutex.Error("Unknown recursive attribute(0x%x)", (u32)attr->recursive); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
switch (attr->attr_protocol.ToBE())
|
||||
switch (attr->protocol.ToBE())
|
||||
{
|
||||
case se32(SYS_SYNC_PRIORITY): break;
|
||||
case se32(SYS_SYNC_RETRY): break;
|
||||
case se32(SYS_SYNC_PRIORITY_INHERIT): sys_lwmutex.Error("Invalid SYS_SYNC_PRIORITY_INHERIT protocol attr"); return CELL_EINVAL;
|
||||
case se32(SYS_SYNC_FIFO): break;
|
||||
default: sys_lwmutex.Error("Unknown protocol attribute(0x%x)", (u32)attr->attr_protocol); return CELL_EINVAL;
|
||||
default: sys_lwmutex.Error("Unknown protocol attribute(0x%x)", (u32)attr->protocol); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
lwmutex->attribute = attr->attr_protocol | attr->attr_recursive;
|
||||
//waiter is currently unused by the emulator but some games apparently directly read this value
|
||||
lwmutex->waiter = ~0;
|
||||
lwmutex->mutex.initialize();
|
||||
//lwmutex->waiter = lwmutex->owner.GetOwner();
|
||||
lwmutex->pad = 0;
|
||||
lwmutex->recursive_count = 0;
|
||||
|
||||
u32 sq_id = sys_lwmutex.GetNewId(new SleepQueue(attr->name_u64), TYPE_LWMUTEX);
|
||||
lwmutex->sleep_queue = sq_id;
|
||||
|
||||
sys_lwmutex.Warning("*** lwmutex created [%s] (attribute=0x%x): sq_id = %d",
|
||||
std::string(attr->name, 8).c_str(), (u32) lwmutex->attribute, sq_id);
|
||||
|
||||
Emu.GetSyncPrimManager().AddLwMutexData(sq_id, std::string(attr->name, 8), GetCurrentPPUThread().GetId());
|
||||
|
||||
return CELL_OK;
|
||||
return lwmutex_create(*lwmutex, attr->protocol, attr->recursive, attr->name_u64);
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_destroy(vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
{
|
||||
sys_lwmutex.Warning("sys_lwmutex_destroy(lwmutex_addr=0x%x)", lwmutex.addr());
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
u32 sq_id = lwmutex->sleep_queue;
|
||||
if (!Emu.GetIdManager().CheckID(sq_id)) return CELL_ESRCH;
|
||||
|
||||
|
@ -78,14 +82,14 @@ s32 sys_lwmutex_lock(vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout)
|
|||
//ConLog.Write("*** lock mutex (addr=0x%x, attr=0x%x, Nrec=%d, owner=%d, waiter=%d)",
|
||||
//lwmutex.addr(), (u32)lwmutex->attribute, (u32)lwmutex->recursive_count, lwmutex->vars.parts.owner.GetOwner(), (u32)lwmutex->waiter);
|
||||
|
||||
return lwmutex->lock(be_t<u32>::MakeFromLE(GetCurrentPPUThread().GetId()), timeout ? ((timeout < 1000) ? 1 : (timeout / 1000)) : 0);
|
||||
return lwmutex->lock(be_t<u32>::make(GetCurrentPPUThread().GetId()), timeout ? ((timeout < 1000) ? 1 : (timeout / 1000)) : 0);
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_trylock(vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
{
|
||||
sys_lwmutex.Log("sys_lwmutex_trylock(lwmutex_addr=0x%x)", lwmutex.addr());
|
||||
|
||||
return lwmutex->trylock(be_t<u32>::MakeFromLE(GetCurrentPPUThread().GetId()));
|
||||
return lwmutex->trylock(be_t<u32>::make(GetCurrentPPUThread().GetId()));
|
||||
}
|
||||
|
||||
s32 sys_lwmutex_unlock(vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
|
@ -95,7 +99,7 @@ s32 sys_lwmutex_unlock(vm::ptr<sys_lwmutex_t> lwmutex)
|
|||
//ConLog.Write("*** unlocking mutex (addr=0x%x, attr=0x%x, Nrec=%d, owner=%d, waiter=%d)",
|
||||
//lwmutex.addr(), (u32)lwmutex->attribute, (u32)lwmutex->recursive_count, (u32)lwmutex->vars.parts.owner.GetOwner(), (u32)lwmutex->waiter);
|
||||
|
||||
return lwmutex->unlock(be_t<u32>::MakeFromLE(GetCurrentPPUThread().GetId()));
|
||||
return lwmutex->unlock(be_t<u32>::make(GetCurrentPPUThread().GetId()));
|
||||
}
|
||||
|
||||
void SleepQueue::push(u32 tid)
|
||||
|
@ -291,7 +295,7 @@ int sys_lwmutex_t::unlock(be_t<u32> tid)
|
|||
recursive_count -= 1;
|
||||
if (!recursive_count.ToBE())
|
||||
{
|
||||
be_t<u32> target = be_t<u32>::MakeFromBE(se32(0));
|
||||
be_t<u32> target = be_t<u32>::make(0);
|
||||
switch (attribute.ToBE() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK))
|
||||
{
|
||||
case se32(SYS_SYNC_FIFO):
|
||||
|
|
|
@ -29,8 +29,8 @@ enum
|
|||
|
||||
struct sys_lwmutex_attribute_t
|
||||
{
|
||||
be_t<u32> attr_protocol;
|
||||
be_t<u32> attr_recursive;
|
||||
be_t<u32> protocol;
|
||||
be_t<u32> recursive;
|
||||
union
|
||||
{
|
||||
char name[8];
|
||||
|
@ -66,19 +66,26 @@ struct SleepQueue
|
|||
|
||||
struct sys_lwmutex_t
|
||||
{
|
||||
/* volatile */ SMutexBase<be_t<u32>> mutex;
|
||||
/* volatile */ be_t<u32> waiter; // not used
|
||||
u64 &all_info(){return *(reinterpret_cast<u64*>(this));}
|
||||
SMutexBase<be_t<u32>> mutex;
|
||||
be_t<u32> waiter; // currently not used
|
||||
be_t<u32> attribute;
|
||||
be_t<u32> recursive_count;
|
||||
be_t<u32> sleep_queue;
|
||||
be_t<u32> pad;
|
||||
|
||||
u64& all_info()
|
||||
{
|
||||
return *(reinterpret_cast<u64*>(this));
|
||||
}
|
||||
|
||||
int trylock(be_t<u32> tid);
|
||||
int unlock(be_t<u32> tid);
|
||||
int lock(be_t<u32> tid, u64 timeout);
|
||||
};
|
||||
|
||||
// Aux
|
||||
s32 lwmutex_create(sys_lwmutex_t& lwmutex, u32 protocol, u32 recursive, u64 name_u64);
|
||||
|
||||
// SysCalls
|
||||
s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attribute_t> attr);
|
||||
s32 sys_lwmutex_destroy(vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
|
|
|
@ -30,6 +30,8 @@ s32 sys_mutex_create(vm::ptr<be_t<u32>> mutex_id, vm::ptr<sys_mutex_attribute> a
|
|||
{
|
||||
sys_mutex.Log("sys_mutex_create(mutex_id_addr=0x%x, attr_addr=0x%x)", mutex_id.addr(), attr.addr());
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
switch (attr->protocol.ToBE())
|
||||
{
|
||||
case se32(SYS_SYNC_FIFO): break;
|
||||
|
@ -73,6 +75,8 @@ s32 sys_mutex_destroy(u32 mutex_id)
|
|||
{
|
||||
sys_mutex.Warning("sys_mutex_destroy(mutex_id=%d)", mutex_id);
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
Mutex* mutex;
|
||||
if (!Emu.GetIdManager().GetIDData(mutex_id, mutex))
|
||||
{
|
||||
|
|
|
@ -34,6 +34,7 @@ struct Mutex
|
|||
, m_queue(name)
|
||||
, cond_count(0)
|
||||
{
|
||||
m_mutex.initialize();
|
||||
}
|
||||
|
||||
~Mutex();
|
||||
|
|
|
@ -3,6 +3,7 @@
|
|||
#include "Emu/System.h"
|
||||
#include "Emu/SysCalls/SysCalls.h"
|
||||
#include "Emu/SysCalls/Callback.h"
|
||||
#include "Emu/Memory/atomic_type.h"
|
||||
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
|
@ -147,6 +148,40 @@ s32 sys_ppu_thread_restart(u64 thread_id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
PPUThread* ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, const std::string& name, std::function<void(PPUThread&)> task)
|
||||
{
|
||||
PPUThread& new_thread = *(PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
|
||||
u32 id = new_thread.GetId();
|
||||
new_thread.SetEntry(entry);
|
||||
new_thread.SetPrio(prio);
|
||||
new_thread.SetStackSize(stacksize);
|
||||
//new_thread.flags = flags;
|
||||
new_thread.m_has_interrupt = false;
|
||||
new_thread.m_is_interrupt = is_interrupt;
|
||||
new_thread.SetName(name);
|
||||
new_thread.m_custom_task = task;
|
||||
|
||||
sys_ppu_thread.Notice("*** New PPU Thread [%s] (%s, entry=0x%x): id = %d", name.c_str(),
|
||||
is_interrupt ? "interrupt" :
|
||||
(is_joinable ? "joinable" : "non-joinable"), entry, id);
|
||||
|
||||
if (!is_interrupt)
|
||||
{
|
||||
new_thread.Run();
|
||||
new_thread.GPR[3] = arg;
|
||||
new_thread.Exec();
|
||||
}
|
||||
else
|
||||
{
|
||||
new_thread.InitStack();
|
||||
new_thread.InitRegs();
|
||||
new_thread.DoRun();
|
||||
}
|
||||
|
||||
return &new_thread;
|
||||
}
|
||||
|
||||
s32 sys_ppu_thread_create(vm::ptr<be_t<u64>> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::ptr<const char> threadname)
|
||||
{
|
||||
sys_ppu_thread.Log("sys_ppu_thread_create(thread_id_addr=0x%x, entry=0x%x, arg=0x%llx, prio=%d, stacksize=0x%x, flags=0x%llx, threadname_addr=0x%x('%s'))",
|
||||
|
@ -171,41 +206,18 @@ s32 sys_ppu_thread_create(vm::ptr<be_t<u64>> thread_id, u32 entry, u64 arg, s32
|
|||
default: sys_ppu_thread.Error("sys_ppu_thread_create(): unknown flags value (0x%llx)", flags); return CELL_EPERM;
|
||||
}
|
||||
|
||||
PPUThread& new_thread = *(PPUThread*)&Emu.GetCPU().AddThread(CPU_THREAD_PPU);
|
||||
|
||||
*thread_id = new_thread.GetId();
|
||||
new_thread.SetEntry(entry);
|
||||
new_thread.SetPrio(prio);
|
||||
new_thread.SetStackSize(stacksize);
|
||||
//new_thread.flags = flags;
|
||||
new_thread.m_has_interrupt = false;
|
||||
new_thread.m_is_interrupt = is_interrupt;
|
||||
new_thread.SetName(threadname ? threadname.get_ptr() : "");
|
||||
|
||||
sys_ppu_thread.Notice("*** New PPU Thread [%s] (flags=0x%llx, entry=0x%x): id = %d", new_thread.GetName().c_str(), flags, entry, new_thread.GetId());
|
||||
|
||||
if (!is_interrupt)
|
||||
{
|
||||
new_thread.Run();
|
||||
new_thread.GPR[3] = arg;
|
||||
new_thread.Exec();
|
||||
}
|
||||
else
|
||||
{
|
||||
new_thread.InitStack();
|
||||
new_thread.InitRegs();
|
||||
new_thread.DoRun();
|
||||
}
|
||||
std::string name = threadname ? threadname.get_ptr() : "";
|
||||
|
||||
*thread_id = ppu_thread_create(entry, arg, prio, stacksize, is_joinable, is_interrupt, name)->GetId();
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<std::atomic<be_t<u32>>> once_ctrl, vm::ptr<void(*)()> init)
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_t<u32>> once_ctrl, vm::ptr<void(*)()> init)
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_once(once_ctrl_addr=0x%x, init_addr=0x%x)", once_ctrl.addr(), init.addr());
|
||||
|
||||
be_t<u32> old = be_t<u32>::MakeFromBE(se32(SYS_PPU_THREAD_ONCE_INIT));
|
||||
if (once_ctrl->compare_exchange_weak(old, be_t<u32>::MakeFromBE(se32(SYS_PPU_THREAD_DONE_INIT))))
|
||||
be_t<u32> cmp = be_t<u32>::make(SYS_PPU_THREAD_ONCE_INIT);
|
||||
if (once_ctrl->compare_and_swap(cmp, be_t<u32>::make(SYS_PPU_THREAD_DONE_INIT)) == cmp)
|
||||
{
|
||||
init.call(CPU);
|
||||
}
|
||||
|
|
|
@ -1,9 +1,11 @@
|
|||
#pragma once
|
||||
|
||||
enum
|
||||
class PPUThread;
|
||||
|
||||
enum : u32
|
||||
{
|
||||
SYS_PPU_THREAD_ONCE_INIT,
|
||||
SYS_PPU_THREAD_DONE_INIT,
|
||||
SYS_PPU_THREAD_ONCE_INIT = 0,
|
||||
SYS_PPU_THREAD_DONE_INIT = 1,
|
||||
};
|
||||
|
||||
enum ppu_thread_flags : u64
|
||||
|
@ -12,6 +14,9 @@ enum ppu_thread_flags : u64
|
|||
SYS_PPU_THREAD_CREATE_INTERRUPT = 0x2,
|
||||
};
|
||||
|
||||
// Aux
|
||||
PPUThread* ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, const std::string& name, std::function<void(PPUThread&)> task = nullptr);
|
||||
|
||||
// SysCalls
|
||||
void sys_ppu_thread_exit(PPUThread& CPU, u64 errorcode);
|
||||
void sys_internal_ppu_thread_exit(PPUThread& CPU, u64 errorcode);
|
||||
|
@ -25,6 +30,6 @@ s32 sys_ppu_thread_get_stack_information(PPUThread& CPU, u32 info_addr);
|
|||
s32 sys_ppu_thread_stop(u64 thread_id);
|
||||
s32 sys_ppu_thread_restart(u64 thread_id);
|
||||
s32 sys_ppu_thread_create(vm::ptr<be_t<u64>> thread_id, u32 entry, u64 arg, s32 prio, u32 stacksize, u64 flags, vm::ptr<const char> threadname);
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<std::atomic<be_t<u32>>> once_ctrl, vm::ptr<void(*)()> init);
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_t<u32>> once_ctrl, vm::ptr<void(*)()> init);
|
||||
s32 sys_ppu_thread_get_id(PPUThread& CPU, vm::ptr<be_t<u64>> thread_id);
|
||||
s32 sys_ppu_thread_rename(u64 thread_id, vm::ptr<const char> name);
|
||||
|
|
|
@ -5,13 +5,23 @@
|
|||
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/Cell/PPUThread.h"
|
||||
#include "sys_semaphore.h"
|
||||
#include "sys_time.h"
|
||||
//#include "Utilities/SMutex.h"
|
||||
#include "sys_semaphore.h"
|
||||
|
||||
SysCallBase sys_semaphore("sys_semaphore");
|
||||
|
||||
s32 sys_semaphore_create(vm::ptr<be_t<u32>> sem, vm::ptr<sys_semaphore_attribute> attr, int initial_count, int max_count)
|
||||
u32 semaphore_create(s32 initial_count, s32 max_count, u32 protocol, u64 name_u64)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
const std::string name((const char*)&name_u64, 8);
|
||||
const u32 id = sys_semaphore.GetNewId(new Semaphore(initial_count, max_count, protocol, name_u64), TYPE_SEMAPHORE);
|
||||
sys_semaphore.Notice("*** semaphore created [%s] (protocol=0x%x): id = %d", name.c_str(), protocol, id);
|
||||
Emu.GetSyncPrimManager().AddSemaphoreData(id, name, initial_count, max_count);
|
||||
return id;
|
||||
}
|
||||
|
||||
s32 sys_semaphore_create(vm::ptr<be_t<u32>> sem, vm::ptr<sys_semaphore_attribute> attr, s32 initial_count, s32 max_count)
|
||||
{
|
||||
sys_semaphore.Warning("sys_semaphore_create(sem_addr=0x%x, attr_addr=0x%x, initial_count=%d, max_count=%d)",
|
||||
sem.addr(), attr.addr(), initial_count, max_count);
|
||||
|
@ -37,13 +47,7 @@ s32 sys_semaphore_create(vm::ptr<be_t<u32>> sem, vm::ptr<sys_semaphore_attribute
|
|||
default: sys_semaphore.Error("Unknown protocol attribute(0x%x)", (u32)attr->protocol); return CELL_EINVAL;
|
||||
}
|
||||
|
||||
u32 id = sys_semaphore.GetNewId(new Semaphore(initial_count, max_count, attr->protocol, attr->name_u64), TYPE_SEMAPHORE);
|
||||
*sem = id;
|
||||
sys_semaphore.Notice("*** semaphore created [%s] (protocol=0x%x): id = %d",
|
||||
std::string(attr->name, 8).c_str(), (u32)attr->protocol, id);
|
||||
|
||||
Emu.GetSyncPrimManager().AddSemaphoreData(id, std::string(attr->name, 8), initial_count, max_count);
|
||||
|
||||
*sem = semaphore_create(initial_count, max_count, attr->protocol, attr->name_u64);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -51,6 +55,8 @@ s32 sys_semaphore_destroy(u32 sem_id)
|
|||
{
|
||||
sys_semaphore.Warning("sys_semaphore_destroy(sem_id=%d)", sem_id);
|
||||
|
||||
LV2_LOCK(0);
|
||||
|
||||
Semaphore* sem;
|
||||
if (!Emu.GetIdManager().GetIDData(sem_id, sem))
|
||||
{
|
||||
|
@ -144,7 +150,7 @@ s32 sys_semaphore_trywait(u32 sem_id)
|
|||
}
|
||||
}
|
||||
|
||||
s32 sys_semaphore_post(u32 sem_id, int count)
|
||||
s32 sys_semaphore_post(u32 sem_id, s32 count)
|
||||
{
|
||||
sys_semaphore.Log("sys_semaphore_post(sem_id=%d, count=%d)", sem_id, count);
|
||||
|
||||
|
@ -159,7 +165,7 @@ s32 sys_semaphore_post(u32 sem_id, int count)
|
|||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if (count + sem->m_value - (int)sem->m_queue.count() > sem->max)
|
||||
if (count + sem->m_value - (s32)sem->m_queue.count() > sem->max)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
|
|
@ -7,7 +7,7 @@ struct sys_semaphore_attribute
|
|||
be_t<u32> protocol;
|
||||
be_t<u32> pshared; // undefined
|
||||
be_t<u64> ipc_key; // undefined
|
||||
be_t<int> flags; // undefined
|
||||
be_t<s32> flags; // undefined
|
||||
be_t<u32> pad; // not used
|
||||
union
|
||||
{
|
||||
|
@ -20,14 +20,14 @@ struct Semaphore
|
|||
{
|
||||
std::mutex m_mutex;
|
||||
SleepQueue m_queue;
|
||||
int m_value;
|
||||
s32 m_value;
|
||||
u32 signal;
|
||||
|
||||
const int max;
|
||||
const s32 max;
|
||||
const u32 protocol;
|
||||
const u64 name;
|
||||
|
||||
Semaphore(int initial_count, int max_count, u32 protocol, u64 name)
|
||||
Semaphore(s32 initial_count, s32 max_count, u32 protocol, u64 name)
|
||||
: m_value(initial_count)
|
||||
, signal(0)
|
||||
, max(max_count)
|
||||
|
@ -37,10 +37,13 @@ struct Semaphore
|
|||
}
|
||||
};
|
||||
|
||||
// Aux
|
||||
u32 semaphore_create(s32 initial_count, s32 max_count, u32 protocol, u64 name_u64);
|
||||
|
||||
// SysCalls
|
||||
s32 sys_semaphore_create(vm::ptr<be_t<u32>> sem, vm::ptr<sys_semaphore_attribute> attr, int initial_count, int max_count);
|
||||
s32 sys_semaphore_create(vm::ptr<be_t<u32>> sem, vm::ptr<sys_semaphore_attribute> attr, s32 initial_count, s32 max_count);
|
||||
s32 sys_semaphore_destroy(u32 sem_id);
|
||||
s32 sys_semaphore_wait(u32 sem_id, u64 timeout);
|
||||
s32 sys_semaphore_trywait(u32 sem_id);
|
||||
s32 sys_semaphore_post(u32 sem_id, int count);
|
||||
s32 sys_semaphore_post(u32 sem_id, s32 count);
|
||||
s32 sys_semaphore_get_value(u32 sem_id, vm::ptr<be_t<s32>> count);
|
||||
|
|
|
@ -2,27 +2,28 @@
|
|||
#include "Emu/Memory/Memory.h"
|
||||
#include "Emu/System.h"
|
||||
#include "Emu/SysCalls/SysCalls.h"
|
||||
#include "Emu/Memory/atomic_type.h"
|
||||
|
||||
#include "sys_spinlock.h"
|
||||
|
||||
SysCallBase sys_spinlock("sys_spinlock");
|
||||
|
||||
void sys_spinlock_initialize(vm::ptr<std::atomic<be_t<u32>>> lock)
|
||||
void sys_spinlock_initialize(vm::ptr<atomic_t<u32>> lock)
|
||||
{
|
||||
sys_spinlock.Log("sys_spinlock_initialize(lock_addr=0x%x)", lock.addr());
|
||||
|
||||
// prx: set 0 and sync
|
||||
*lock = be_t<u32>::MakeFromBE(0);
|
||||
lock->exchange(be_t<u32>::make(0));
|
||||
}
|
||||
|
||||
void sys_spinlock_lock(vm::ptr<std::atomic<be_t<u32>>> lock)
|
||||
void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock)
|
||||
{
|
||||
sys_spinlock.Log("sys_spinlock_lock(lock_addr=0x%x)", lock.addr());
|
||||
|
||||
// prx: exchange with 0xabadcafe, repeat until exchanged with 0
|
||||
while (lock->exchange(be_t<u32>::MakeFromBE(se32(0xabadcafe))).ToBE())
|
||||
while (lock->exchange(be_t<u32>::make(0xabadcafe)).ToBE())
|
||||
{
|
||||
while (lock->load(std::memory_order_relaxed).ToBE())
|
||||
while (lock->read_relaxed().ToBE())
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
if (Emu.IsStopped())
|
||||
|
@ -39,12 +40,12 @@ void sys_spinlock_lock(vm::ptr<std::atomic<be_t<u32>>> lock)
|
|||
}
|
||||
}
|
||||
|
||||
s32 sys_spinlock_trylock(vm::ptr<std::atomic<be_t<u32>>> lock)
|
||||
s32 sys_spinlock_trylock(vm::ptr<atomic_t<u32>> lock)
|
||||
{
|
||||
sys_spinlock.Log("sys_spinlock_trylock(lock_addr=0x%x)", lock.addr());
|
||||
|
||||
// prx: exchange with 0xabadcafe, translate exchanged value
|
||||
if (lock->exchange(be_t<u32>::MakeFromBE(se32(0xabadcafe))).ToBE())
|
||||
if (lock->exchange(be_t<u32>::make(0xabadcafe)).ToBE())
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
@ -52,10 +53,10 @@ s32 sys_spinlock_trylock(vm::ptr<std::atomic<be_t<u32>>> lock)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
void sys_spinlock_unlock(vm::ptr<std::atomic<be_t<u32>>> lock)
|
||||
void sys_spinlock_unlock(vm::ptr<atomic_t<u32>> lock)
|
||||
{
|
||||
sys_spinlock.Log("sys_spinlock_unlock(lock_addr=0x%x)", lock.addr());
|
||||
|
||||
// prx: sync and set 0
|
||||
*lock = be_t<u32>::MakeFromBE(0);
|
||||
lock->exchange(be_t<u32>::make(0));
|
||||
}
|
|
@ -1,7 +1,7 @@
|
|||
#pragma once
|
||||
|
||||
// SysCalls
|
||||
void sys_spinlock_initialize(vm::ptr<std::atomic<be_t<u32>>> lock);
|
||||
void sys_spinlock_lock(vm::ptr<std::atomic<be_t<u32>>> lock);
|
||||
s32 sys_spinlock_trylock(vm::ptr<std::atomic<be_t<u32>>> lock);
|
||||
void sys_spinlock_unlock(vm::ptr<std::atomic<be_t<u32>>> lock);
|
||||
void sys_spinlock_initialize(vm::ptr<atomic_t<u32>> lock);
|
||||
void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock);
|
||||
s32 sys_spinlock_trylock(vm::ptr<atomic_t<u32>> lock);
|
||||
void sys_spinlock_unlock(vm::ptr<atomic_t<u32>> lock);
|
||||
|
|
|
@ -5,6 +5,7 @@
|
|||
|
||||
#include "Emu/CPU/CPUThreadManager.h"
|
||||
#include "Emu/Cell/RawSPUThread.h"
|
||||
#include "Emu/FS/vfsStreamMemory.h"
|
||||
#include "Emu/FS/vfsFile.h"
|
||||
#include "Loader/ELF.h"
|
||||
#include "sys_spu.h"
|
||||
|
@ -22,7 +23,20 @@ u32 LoadSpuImage(vfsStream& stream, u32& spu_ep)
|
|||
return spu_offset;
|
||||
}
|
||||
|
||||
//156
|
||||
s32 spu_image_import(sys_spu_image& img, u32 src, u32 type)
|
||||
{
|
||||
vfsStreamMemory f(src);
|
||||
u32 entry;
|
||||
u32 offset = LoadSpuImage(f, entry);
|
||||
|
||||
img.type = SYS_SPU_IMAGE_TYPE_USER;
|
||||
img.entry_point = entry;
|
||||
img.addr = offset; // TODO: writing actual segment info
|
||||
img.nsegs = 1; // wrong value
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_spu_image_open(vm::ptr<sys_spu_image> img, vm::ptr<const char> path)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_image_open(img_addr=0x%x, path_addr=0x%x [%s])", img.addr(), path.addr(), path.get_ptr());
|
||||
|
@ -37,15 +51,48 @@ s32 sys_spu_image_open(vm::ptr<sys_spu_image> img, vm::ptr<const char> path)
|
|||
u32 entry;
|
||||
u32 offset = LoadSpuImage(f, entry);
|
||||
|
||||
img->type = 1;
|
||||
img->type = SYS_SPU_IMAGE_TYPE_USER;
|
||||
img->entry_point = entry;
|
||||
img->segs_addr = offset;
|
||||
img->nsegs = 0;
|
||||
img->addr = offset; // TODO: writing actual segment info
|
||||
img->nsegs = 1; // wrong value
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
//172
|
||||
SPUThread* spu_thread_initialize(SpuGroupInfo* group, u32 spu_num, sys_spu_image& img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task)
|
||||
{
|
||||
if (option)
|
||||
{
|
||||
sys_spu.Todo("Unsupported SPU Thread options (0x%x)", option);
|
||||
}
|
||||
|
||||
u32 spu_ep = (u32)img.entry_point;
|
||||
// Copy SPU image:
|
||||
// TODO: use segment info
|
||||
u32 spu_offset = (u32)Memory.Alloc(256 * 1024, 4096);
|
||||
memcpy(vm::get_ptr<void>(spu_offset), vm::get_ptr<void>(img.addr), 256 * 1024);
|
||||
|
||||
SPUThread& new_thread = static_cast<SPUThread&>(Emu.GetCPU().AddThread(CPU_THREAD_SPU));
|
||||
//initialize from new place:
|
||||
new_thread.SetOffset(spu_offset);
|
||||
new_thread.SetEntry(spu_ep);
|
||||
new_thread.SetName(name);
|
||||
new_thread.m_custom_task = task;
|
||||
new_thread.Run();
|
||||
new_thread.GPR[3] = u128::from64(0, a1);
|
||||
new_thread.GPR[4] = u128::from64(0, a2);
|
||||
new_thread.GPR[5] = u128::from64(0, a3);
|
||||
new_thread.GPR[6] = u128::from64(0, a4);
|
||||
|
||||
const u32 id = new_thread.GetId();
|
||||
if (group) group->list[spu_num] = id;
|
||||
new_thread.group = group;
|
||||
|
||||
sys_spu.Warning("*** New SPU Thread [%s] (ep=0x%x, opt=0x%x, a1=0x%llx, a2=0x%llx, a3=0x%llx, a4=0x%llx): id=%d, spu_offset=0x%x",
|
||||
name.c_str(), spu_ep, option, a1, a2, a3, a4, id, spu_offset);
|
||||
return &new_thread;
|
||||
}
|
||||
|
||||
s32 sys_spu_thread_initialize(vm::ptr<be_t<u32>> thread, u32 group, u32 spu_num, vm::ptr<sys_spu_image> img, vm::ptr<sys_spu_thread_attribute> attr, vm::ptr<sys_spu_thread_argument> arg)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_initialize(thread_addr=0x%x, group=0x%x, spu_num=%d, img_addr=0x%x, attr_addr=0x%x, arg_addr=0x%x)",
|
||||
|
@ -67,49 +114,23 @@ s32 sys_spu_thread_initialize(vm::ptr<be_t<u32>> thread, u32 group, u32 spu_num,
|
|||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
u32 spu_ep = (u32)img->entry_point;
|
||||
|
||||
std::string name = "SPUThread";
|
||||
if (attr->name)
|
||||
{
|
||||
name = std::string(attr->name.get_ptr(), attr->name_len);
|
||||
}
|
||||
|
||||
u64 a1 = arg->arg1;
|
||||
u64 a2 = arg->arg2;
|
||||
u64 a3 = arg->arg3;
|
||||
u64 a4 = arg->arg4;
|
||||
|
||||
// Copy SPU image:
|
||||
// TODO: use correct segment info
|
||||
u32 spu_offset = (u32)Memory.Alloc(256 * 1024, 4096);
|
||||
memcpy(vm::get_ptr<void>(spu_offset), vm::get_ptr<void>(img->segs_addr), 256 * 1024);
|
||||
|
||||
CPUThread& new_thread = Emu.GetCPU().AddThread(CPU_THREAD_SPU);
|
||||
//initialize from new place:
|
||||
new_thread.SetOffset(spu_offset);
|
||||
new_thread.SetEntry(spu_ep);
|
||||
new_thread.SetName(name);
|
||||
new_thread.Run();
|
||||
static_cast<SPUThread&>(new_thread).GPR[3] = u128::from64(0, a1);
|
||||
static_cast<SPUThread&>(new_thread).GPR[4] = u128::from64(0, a2);
|
||||
static_cast<SPUThread&>(new_thread).GPR[5] = u128::from64(0, a3);
|
||||
static_cast<SPUThread&>(new_thread).GPR[6] = u128::from64(0, a4);
|
||||
|
||||
u32 id = new_thread.GetId();
|
||||
*thread = group_info->list[spu_num] = id;
|
||||
static_cast<SPUThread&>(new_thread).group = group_info;
|
||||
|
||||
sys_spu.Warning("*** New SPU Thread [%s] (img_offset=0x%x, ls_offset=0x%x, ep=0x%x, a1=0x%llx, a2=0x%llx, a3=0x%llx, a4=0x%llx): id=%d",
|
||||
(attr->name ? attr->name.get_ptr() : ""), (u32)img->segs_addr, ((SPUThread&)new_thread).dmac.ls_offset, spu_ep, a1, a2, a3, a4, id);
|
||||
|
||||
*thread = spu_thread_initialize(
|
||||
group_info,
|
||||
spu_num,
|
||||
*img,
|
||||
attr->name ? std::string(attr->name.get_ptr(), attr->name_len) : "SPUThread",
|
||||
attr->option,
|
||||
arg->arg1,
|
||||
arg->arg2,
|
||||
arg->arg3,
|
||||
arg->arg4)->GetId();
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
//166
|
||||
s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_set_argument(id=%d, arg_addr=0x%x)", id, arg.addr());
|
||||
|
||||
CPUThread* thr = Emu.GetCPU().GetThread(id);
|
||||
|
||||
if(!thr || thr->GetType() != CPU_THREAD_SPU)
|
||||
|
@ -127,7 +148,6 @@ s32 sys_spu_thread_set_argument(u32 id, vm::ptr<sys_spu_thread_argument> arg)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//165
|
||||
s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<be_t<u32>> status)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_get_exit_status(id=%d, status_addr=0x%x)", id, status.addr());
|
||||
|
@ -149,7 +169,6 @@ s32 sys_spu_thread_get_exit_status(u32 id, vm::ptr<be_t<u32>> status)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//171
|
||||
s32 sys_spu_thread_group_destroy(u32 id)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_group_destroy(id=%d)", id);
|
||||
|
@ -192,7 +211,6 @@ s32 sys_spu_thread_group_destroy(u32 id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//173
|
||||
s32 sys_spu_thread_group_start(u32 id)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_group_start(id=%d)", id);
|
||||
|
@ -218,6 +236,7 @@ s32 sys_spu_thread_group_start(u32 id)
|
|||
CPUThread* t = Emu.GetCPU().GetThread(group_info->list[i]);
|
||||
if (t)
|
||||
{
|
||||
((SPUThread*)t)->SPU.Status.SetValue(SPU_STATUS_RUNNING);
|
||||
t->Exec();
|
||||
}
|
||||
}
|
||||
|
@ -228,7 +247,6 @@ s32 sys_spu_thread_group_start(u32 id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//174
|
||||
s32 sys_spu_thread_group_suspend(u32 id)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_group_suspend(id=%d)", id);
|
||||
|
@ -275,7 +293,6 @@ s32 sys_spu_thread_group_suspend(u32 id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//175
|
||||
s32 sys_spu_thread_group_resume(u32 id)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_group_resume(id=%d)", id);
|
||||
|
@ -323,7 +340,6 @@ s32 sys_spu_thread_group_resume(u32 id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//176: Left doing nothing, indeed
|
||||
s32 sys_spu_thread_group_yield(u32 id)
|
||||
{
|
||||
sys_spu.Error("sys_spu_thread_group_yield(id=%d)", id);
|
||||
|
@ -366,7 +382,6 @@ s32 sys_spu_thread_group_yield(u32 id)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//177: Left omit the EPERM check.
|
||||
s32 sys_spu_thread_group_terminate(u32 id, int value)
|
||||
{
|
||||
sys_spu.Error("sys_spu_thread_group_terminate(id=%d, value=%d)", id, value);
|
||||
|
@ -406,27 +421,37 @@ s32 sys_spu_thread_group_terminate(u32 id, int value)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//170
|
||||
s32 sys_spu_thread_group_create(vm::ptr<be_t<u32>> id, u32 num, int prio, vm::ptr<sys_spu_thread_group_attribute> attr)
|
||||
SpuGroupInfo* spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type, u32 container)
|
||||
{
|
||||
LV2_LOCK(0);
|
||||
|
||||
if (type)
|
||||
{
|
||||
sys_spu.Todo("Unsupported SPU Thread Group type (0x%x)", type);
|
||||
}
|
||||
|
||||
auto group = new SpuGroupInfo(name, num, prio, type, container);
|
||||
const u32 _id = sys_spu.GetNewId(group);
|
||||
group->m_id = _id;
|
||||
sys_spu.Notice("*** SPU Thread Group created [%s] (num=%d, prio=%d, type=0x%x, container=%d): id=%d",
|
||||
name.c_str(), num, prio, type, container, _id);
|
||||
return group;
|
||||
}
|
||||
|
||||
s32 sys_spu_thread_group_create(vm::ptr<be_t<u32>> id, u32 num, s32 prio, vm::ptr<sys_spu_thread_group_attribute> attr)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_group_create(id_addr=0x%x, num=%d, prio=%d, attr_addr=0x%x)",
|
||||
id.addr(), num, prio, attr.addr());
|
||||
|
||||
if (num > 256) return CELL_EINVAL;
|
||||
|
||||
if (prio < 16 || prio > 255) return CELL_EINVAL;
|
||||
|
||||
const std::string name(attr->name.get_ptr(), attr->nsize);
|
||||
|
||||
*id = sys_spu.GetNewId(new SpuGroupInfo(name, num, prio, attr->type, attr->ct));
|
||||
|
||||
sys_spu.Warning("*** SPU Thread Group created [%s] (type=0x%x, option.ct=0x%x): id=%d",
|
||||
name.c_str(), (int)attr->type, (u32)attr->ct, (u32)*id);
|
||||
if (!num || num > 6 || prio < 16 || prio > 255)
|
||||
{
|
||||
return CELL_EINVAL;
|
||||
}
|
||||
|
||||
*id = spu_thread_group_create(std::string(attr->name.get_ptr(), attr->nsize - 1), num, prio, attr->type, attr->ct)->m_id;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
//178
|
||||
s32 sys_spu_thread_group_join(u32 id, vm::ptr<be_t<u32>> cause, vm::ptr<be_t<u32>> status)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_group_join(id=%d, cause_addr=0x%x, status_addr=0x%x)", id, cause.addr(), status.addr());
|
||||
|
@ -447,7 +472,7 @@ s32 sys_spu_thread_group_join(u32 id, vm::ptr<be_t<u32>> cause, vm::ptr<be_t<u32
|
|||
{
|
||||
while (CPUThread* t = Emu.GetCPU().GetThread(group_info->list[i]))
|
||||
{
|
||||
if (!t->IsRunning())
|
||||
if (!t->IsAlive())
|
||||
{
|
||||
if (((SPUThread*)t)->SPU.Status.GetValue() != SPU_STATUS_STOPPED_BY_STOP)
|
||||
{
|
||||
|
@ -487,7 +512,6 @@ s32 sys_spu_thread_create(vm::ptr<be_t<u32>> thread_id, vm::ptr<be_t<u32>> entry
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//169
|
||||
s32 sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_initialize(max_usable_spu=%d, max_raw_spu=%d)", max_usable_spu, max_raw_spu);
|
||||
|
@ -500,7 +524,6 @@ s32 sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//181
|
||||
s32 sys_spu_thread_write_ls(u32 id, u32 address, u64 value, u32 type)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_write_ls(id=%d, address=0x%x, value=0x%llx, type=0x%x)",
|
||||
|
@ -533,7 +556,6 @@ s32 sys_spu_thread_write_ls(u32 id, u32 address, u64 value, u32 type)
|
|||
}
|
||||
}
|
||||
|
||||
//182
|
||||
s32 sys_spu_thread_read_ls(u32 id, u32 address, vm::ptr<be_t<u64>> value, u32 type)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_read_ls(id=%d, address=0x%x, value_addr=0x%x, type=0x%x)",
|
||||
|
@ -566,10 +588,9 @@ s32 sys_spu_thread_read_ls(u32 id, u32 address, vm::ptr<be_t<u64>> value, u32 ty
|
|||
}
|
||||
}
|
||||
|
||||
//190
|
||||
s32 sys_spu_thread_write_spu_mb(u32 id, u32 value)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_write_spu_mb(id=%d, value=0x%x)", id, value);
|
||||
sys_spu.Warning("sys_spu_thread_write_spu_mb(id=%d, value=0x%x)", id, value);
|
||||
|
||||
CPUThread* thr = Emu.GetCPU().GetThread(id);
|
||||
|
||||
|
@ -583,7 +604,6 @@ s32 sys_spu_thread_write_spu_mb(u32 id, u32 value)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//187
|
||||
s32 sys_spu_thread_set_spu_cfg(u32 id, u64 value)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_set_spu_cfg(id=%d, value=0x%x)", id, value);
|
||||
|
@ -605,7 +625,6 @@ s32 sys_spu_thread_set_spu_cfg(u32 id, u64 value)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//188
|
||||
s32 sys_spu_thread_get_spu_cfg(u32 id, vm::ptr<be_t<u64>> value)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_get_spu_cfg(id=%d, value_addr=0x%x)", id, value.addr());
|
||||
|
@ -622,7 +641,6 @@ s32 sys_spu_thread_get_spu_cfg(u32 id, vm::ptr<be_t<u64>> value)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//184
|
||||
s32 sys_spu_thread_write_snr(u32 id, u32 number, u32 value)
|
||||
{
|
||||
sys_spu.Log("sys_spu_thread_write_snr(id=%d, number=%d, value=0x%x)", id, number, value);
|
||||
|
@ -713,7 +731,6 @@ s32 sys_spu_thread_connect_event(u32 id, u32 eq_id, u32 et, u8 spup)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//
|
||||
s32 sys_spu_thread_disconnect_event(u32 id, u32 et, u8 spup)
|
||||
{
|
||||
sys_spu.Warning("sys_spu_thread_disconnect_event(id=%d, event_type=0x%x, spup=%d)", id, et, spup);
|
||||
|
@ -884,7 +901,6 @@ s32 sys_spu_thread_group_disconnect_event_all_threads(u32 id, u8 spup)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
//160
|
||||
s32 sys_raw_spu_create(vm::ptr<be_t<u32>> id, u32 attr_addr)
|
||||
{
|
||||
sys_spu.Warning("sys_raw_spu_create(id_addr=0x%x, attr_addr=0x%x)", id.addr(), attr_addr);
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
#pragma once
|
||||
|
||||
enum
|
||||
enum : s32
|
||||
{
|
||||
SYS_SPU_THREAD_GROUP_TYPE_NORMAL = 0x00,
|
||||
SYS_SPU_THREAD_GROUP_TYPE_SEQUENTIAL = 0x01,
|
||||
|
@ -8,7 +8,7 @@ enum
|
|||
SYS_SPU_THREAD_GROUP_TYPE_MEMORY_FROM_CONTAINER = 0x04,
|
||||
SYS_SPU_THREAD_GROUP_TYPE_NON_CONTEXT = 0x08,
|
||||
SYS_SPU_THREAD_GROUP_TYPE_EXCLUSIVE_NON_CONTEXT = 0x18,
|
||||
SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM = 0x20
|
||||
SYS_SPU_THREAD_GROUP_TYPE_COOPERATE_WITH_SYSTEM = 0x20,
|
||||
};
|
||||
|
||||
enum
|
||||
|
@ -18,7 +18,22 @@ enum
|
|||
SYS_SPU_THREAD_GROUP_JOIN_TERMINATED = 0x0004
|
||||
};
|
||||
|
||||
enum {
|
||||
enum
|
||||
{
|
||||
SYS_SPU_THREAD_GROUP_EVENT_RUN = 1,
|
||||
SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION = 2,
|
||||
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE = 4,
|
||||
};
|
||||
|
||||
enum : u64
|
||||
{
|
||||
SYS_SPU_THREAD_GROUP_EVENT_RUN_KEY = 0xFFFFFFFF53505500ull,
|
||||
SYS_SPU_THREAD_GROUP_EVENT_EXCEPTION_KEY = 0xFFFFFFFF53505503ull,
|
||||
SYS_SPU_THREAD_GROUP_EVENT_SYSTEM_MODULE_KEY = 0xFFFFFFFF53505504ull,
|
||||
};
|
||||
|
||||
enum
|
||||
{
|
||||
SPU_THREAD_GROUP_STATUS_NOT_INITIALIZED,
|
||||
SPU_THREAD_GROUP_STATUS_INITIALIZED,
|
||||
SPU_THREAD_GROUP_STATUS_READY,
|
||||
|
@ -30,11 +45,11 @@ enum {
|
|||
SPU_THREAD_GROUP_STATUS_UNKNOWN
|
||||
};
|
||||
|
||||
enum
|
||||
enum : s32
|
||||
{
|
||||
SYS_SPU_SEGMENT_TYPE_COPY = 0x0001,
|
||||
SYS_SPU_SEGMENT_TYPE_FILL = 0x0002,
|
||||
SYS_SPU_SEGMENT_TYPE_INFO = 0x0004,
|
||||
SYS_SPU_SEGMENT_TYPE_COPY = 1,
|
||||
SYS_SPU_SEGMENT_TYPE_FILL = 2,
|
||||
SYS_SPU_SEGMENT_TYPE_INFO = 4,
|
||||
};
|
||||
|
||||
struct sys_spu_thread_group_attribute
|
||||
|
@ -45,6 +60,13 @@ struct sys_spu_thread_group_attribute
|
|||
be_t<u32> ct; // memory container id
|
||||
};
|
||||
|
||||
enum : u32
|
||||
{
|
||||
SYS_SPU_THREAD_OPTION_NONE = 0,
|
||||
SYS_SPU_THREAD_OPTION_ASYNC_INTR_ENABLE = 1,
|
||||
SYS_SPU_THREAD_OPTION_DEC_SYNC_TB_ENABLE = 2,
|
||||
};
|
||||
|
||||
struct sys_spu_thread_attribute
|
||||
{
|
||||
vm::bptr<const char> name;
|
||||
|
@ -60,20 +82,43 @@ struct sys_spu_thread_argument
|
|||
be_t<u64> arg4;
|
||||
};
|
||||
|
||||
struct sys_spu_image
|
||||
{
|
||||
be_t<u32> type;
|
||||
be_t<u32> entry_point;
|
||||
be_t<u32> segs_addr; //temporarily used as offset of LS image after elf loading
|
||||
be_t<int> nsegs;
|
||||
};
|
||||
|
||||
struct sys_spu_segment
|
||||
{
|
||||
be_t<int> type;
|
||||
be_t<u32> ls_start;
|
||||
be_t<int> size;
|
||||
be_t<u64> src;
|
||||
be_t<s32> type; // copy, fill, info
|
||||
be_t<u32> ls; // local storage address
|
||||
be_t<s32> size;
|
||||
|
||||
union
|
||||
{
|
||||
be_t<u32> addr; // address or fill value
|
||||
u64 pad;
|
||||
};
|
||||
};
|
||||
|
||||
static_assert(sizeof(sys_spu_segment) == 0x18, "Wrong sys_spu_segment size");
|
||||
|
||||
enum : u32
|
||||
{
|
||||
SYS_SPU_IMAGE_TYPE_USER = 0,
|
||||
SYS_SPU_IMAGE_TYPE_KERNEL = 1,
|
||||
};
|
||||
|
||||
struct sys_spu_image
|
||||
{
|
||||
be_t<u32> type; // user, kernel
|
||||
be_t<u32> entry_point;
|
||||
union
|
||||
{
|
||||
be_t<u32> addr; // temporarily used as offset of the whole LS image (should be removed)
|
||||
vm::bptr<sys_spu_segment> segs;
|
||||
};
|
||||
be_t<s32> nsegs;
|
||||
};
|
||||
|
||||
enum : u32
|
||||
{
|
||||
SYS_SPU_IMAGE_PROTECT = 0,
|
||||
SYS_SPU_IMAGE_DIRECT = 1,
|
||||
};
|
||||
|
||||
struct SpuGroupInfo
|
||||
|
@ -81,15 +126,16 @@ struct SpuGroupInfo
|
|||
std::vector<u32> list;
|
||||
std::atomic<u32> lock;
|
||||
std::string m_name;
|
||||
int m_prio;
|
||||
int m_type;
|
||||
int m_ct;
|
||||
u32 m_id;
|
||||
s32 m_prio;
|
||||
s32 m_type;
|
||||
u32 m_ct;
|
||||
u32 m_count;
|
||||
int m_state; //SPU Thread Group State.
|
||||
s32 m_state; //SPU Thread Group State.
|
||||
u32 m_exit_status;
|
||||
bool m_group_exit;
|
||||
|
||||
SpuGroupInfo(const std::string& name, u32 num, int prio, int type, u32 ct)
|
||||
SpuGroupInfo(const std::string& name, u32 num, s32 prio, s32 type, u32 ct)
|
||||
: m_name(name)
|
||||
, m_prio(prio)
|
||||
, m_type(type)
|
||||
|
@ -107,6 +153,13 @@ struct SpuGroupInfo
|
|||
}
|
||||
};
|
||||
|
||||
class SPUThread;
|
||||
|
||||
// Aux
|
||||
s32 spu_image_import(sys_spu_image& img, u32 src, u32 type);
|
||||
SpuGroupInfo* spu_thread_group_create(const std::string& name, u32 num, s32 prio, s32 type, u32 container);
|
||||
SPUThread* spu_thread_initialize(SpuGroupInfo* group, u32 spu_num, sys_spu_image& img, const std::string& name, u32 option, u64 a1, u64 a2, u64 a3, u64 a4, std::function<void(SPUThread&)> task = nullptr);
|
||||
|
||||
// SysCalls
|
||||
s32 sys_spu_initialize(u32 max_usable_spu, u32 max_raw_spu);
|
||||
s32 sys_spu_image_open(vm::ptr<sys_spu_image> img, vm::ptr<const char> path);
|
||||
|
|
|
@ -6,12 +6,12 @@
|
|||
#include "sys_memory.h"
|
||||
#include "sys_vm.h"
|
||||
|
||||
SysCallBase sys_vm("vm");
|
||||
SysCallBase sys_vm("sys_vm");
|
||||
MemoryContainerInfo* current_ct;
|
||||
|
||||
s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, u32 addr)
|
||||
{
|
||||
sys_vm.Todo("sys_vm_memory_map(vsize=0x%x,psize=0x%x,cidr=0x%x,flags=0x%llx,policy=0x%llx,addr=0x%x)",
|
||||
sys_vm.Error("sys_vm_memory_map(vsize=0x%x, psize=0x%x, cidr=0x%x, flags=0x%llx, policy=0x%llx, addr_addr=0x%x)",
|
||||
vsize, psize, cid, flag, policy, addr);
|
||||
|
||||
// Check virtual size.
|
||||
|
@ -26,25 +26,12 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, u32 a
|
|||
return CELL_ENOMEM;
|
||||
}
|
||||
|
||||
// Use fixed address (TODO: search and use some free address instead)
|
||||
u32 new_addr = Memory.IsGoodAddr(0x60000000) ? 0x70000000 : 0x60000000;
|
||||
|
||||
// If container ID is SYS_MEMORY_CONTAINER_ID_INVALID, allocate directly.
|
||||
if(cid == SYS_MEMORY_CONTAINER_ID_INVALID)
|
||||
{
|
||||
u32 new_addr;
|
||||
switch(flag)
|
||||
{
|
||||
case SYS_MEMORY_PAGE_SIZE_1M:
|
||||
new_addr = (u32)Memory.Alloc(psize, 0x100000);
|
||||
break;
|
||||
|
||||
case SYS_MEMORY_PAGE_SIZE_64K:
|
||||
new_addr = (u32)Memory.Alloc(psize, 0x10000);
|
||||
break;
|
||||
|
||||
default: return CELL_EINVAL;
|
||||
}
|
||||
|
||||
if(!new_addr) return CELL_ENOMEM;
|
||||
|
||||
// Create a new MemoryContainerInfo to act as default container with vsize.
|
||||
current_ct = new MemoryContainerInfo(new_addr, vsize);
|
||||
}
|
||||
|
@ -57,18 +44,22 @@ s32 sys_vm_memory_map(u32 vsize, u32 psize, u32 cid, u64 flag, u64 policy, u32 a
|
|||
current_ct = ct;
|
||||
}
|
||||
|
||||
// Allocate actual memory using virtual size (physical size is ignored)
|
||||
assert(Memory.Map(new_addr, vsize));
|
||||
|
||||
// Write a pointer for the allocated memory.
|
||||
vm::write32(addr, current_ct->addr);
|
||||
vm::write32(addr, new_addr);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_vm_unmap(u32 addr)
|
||||
{
|
||||
sys_vm.Todo("sys_vm_unmap(addr=0x%x)", addr);
|
||||
sys_vm.Error("sys_vm_unmap(addr=0x%x)", addr);
|
||||
|
||||
// Simply free the memory to unmap.
|
||||
if(!Memory.Free(addr)) return CELL_EINVAL;
|
||||
// Unmap memory.
|
||||
assert(addr == 0x60000000 || addr == 0x70000000);
|
||||
if(!Memory.Unmap(addr)) return CELL_EINVAL;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -155,16 +155,16 @@ void Elf32_Shdr::LoadLE(vfsStream& f)
|
|||
void Elf32_Shdr::Show()
|
||||
{
|
||||
#ifdef LOADER_DEBUG
|
||||
LOG_NOTICE(LOADER, "Name offset: %x", sh_name);
|
||||
LOG_NOTICE(LOADER, "Type: %d", sh_type);
|
||||
LOG_NOTICE(LOADER, "Addr: %x", sh_addr);
|
||||
LOG_NOTICE(LOADER, "Offset: %x", sh_offset);
|
||||
LOG_NOTICE(LOADER, "Size: %x", sh_size);
|
||||
LOG_NOTICE(LOADER, "Name offset: 0x%x", sh_name);
|
||||
LOG_NOTICE(LOADER, "Type: 0x%d", sh_type);
|
||||
LOG_NOTICE(LOADER, "Addr: 0x%x", sh_addr);
|
||||
LOG_NOTICE(LOADER, "Offset: 0x%x", sh_offset);
|
||||
LOG_NOTICE(LOADER, "Size: 0x%x", sh_size);
|
||||
LOG_NOTICE(LOADER, "EntSize: %d", sh_entsize);
|
||||
LOG_NOTICE(LOADER, "Flags: %x", sh_flags);
|
||||
LOG_NOTICE(LOADER, "Link: %x", sh_link);
|
||||
LOG_NOTICE(LOADER, "Flags: 0x%x", sh_flags);
|
||||
LOG_NOTICE(LOADER, "Link: 0x%x", sh_link);
|
||||
LOG_NOTICE(LOADER, "Info: %d", sh_info);
|
||||
LOG_NOTICE(LOADER, "Address align: %x", sh_addralign);
|
||||
LOG_NOTICE(LOADER, "Address align: 0x%x", sh_addralign);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
|
|
@ -77,16 +77,16 @@ void Elf64_Shdr::Load(vfsStream& f)
|
|||
void Elf64_Shdr::Show()
|
||||
{
|
||||
#ifdef LOADER_DEBUG
|
||||
LOG_NOTICE(LOADER, "Name offset: %x", sh_name);
|
||||
LOG_NOTICE(LOADER, "Name offset: 0x%x", sh_name);
|
||||
LOG_NOTICE(LOADER, "Type: %d", sh_type);
|
||||
LOG_NOTICE(LOADER, "Addr: %llx", sh_addr);
|
||||
LOG_NOTICE(LOADER, "Offset: %llx", sh_offset);
|
||||
LOG_NOTICE(LOADER, "Size: %llx", sh_size);
|
||||
LOG_NOTICE(LOADER, "Addr: 0x%llx", sh_addr);
|
||||
LOG_NOTICE(LOADER, "Offset: 0x%llx", sh_offset);
|
||||
LOG_NOTICE(LOADER, "Size: 0x%llx", sh_size);
|
||||
LOG_NOTICE(LOADER, "EntSize: %lld", sh_entsize);
|
||||
LOG_NOTICE(LOADER, "Flags: %llx", sh_flags);
|
||||
LOG_NOTICE(LOADER, "Link: %x", sh_link);
|
||||
LOG_NOTICE(LOADER, "Info: %x", sh_info);
|
||||
LOG_NOTICE(LOADER, "Address align: %llx", sh_addralign);
|
||||
LOG_NOTICE(LOADER, "Flags: 0x%llx", sh_flags);
|
||||
LOG_NOTICE(LOADER, "Link: 0x%x", sh_link);
|
||||
LOG_NOTICE(LOADER, "Info: 0x%x", sh_info);
|
||||
LOG_NOTICE(LOADER, "Address align: 0x%llx", sh_addralign);
|
||||
#endif
|
||||
}
|
||||
|
||||
|
@ -455,7 +455,7 @@ bool ELF64Loader::LoadPhdrData(u64 offset)
|
|||
const u32 nid = vm::read32(stub.s_nid + i * 4);
|
||||
const u32 text = vm::read32(stub.s_text + i * 4);
|
||||
|
||||
if (module && !module->Load(nid))
|
||||
if (!module || !module->Load(nid))
|
||||
{
|
||||
LOG_WARNING(LOADER, "Unimplemented function '%s' in '%s' module", SysCalls::GetHLEFuncName(nid).c_str(), module_name.c_str());
|
||||
}
|
||||
|
|
|
@ -35,7 +35,7 @@ const std::string Ehdr_OS_ABIToString(const u8 os_abi)
|
|||
case 0x66: return "Cell OS LV-2";
|
||||
};
|
||||
|
||||
return fmt::Format("Unknown (%x)", os_abi);
|
||||
return fmt::Format("Unknown (0x%x)", os_abi);
|
||||
}
|
||||
|
||||
const std::string Ehdr_MachineToString(const u16 machine)
|
||||
|
@ -48,7 +48,7 @@ const std::string Ehdr_MachineToString(const u16 machine)
|
|||
case MACHINE_ARM: return "ARM";
|
||||
};
|
||||
|
||||
return fmt::Format("Unknown (%x)", machine);
|
||||
return fmt::Format("Unknown (0x%x)", machine);
|
||||
}
|
||||
|
||||
const std::string Phdr_FlagsToString(u32 flags)
|
||||
|
@ -73,7 +73,7 @@ const std::string Phdr_FlagsToString(u32 flags)
|
|||
flags &= ~spu << 0x14;
|
||||
flags &= ~rsx << 0x18;
|
||||
|
||||
if(flags != 0) return fmt::Format("Unknown %s PPU[%x] SPU[%x] RSX[%x]", ret.c_str(), ppu, spu, rsx);
|
||||
if(flags != 0) return fmt::Format("Unknown %s PPU[0x%x] SPU[0x%x] RSX[0x%x]", ret.c_str(), ppu, spu, rsx);
|
||||
|
||||
ret += "PPU[" + FLAGS_TO_STRING(ppu) + "] ";
|
||||
ret += "SPU[" + FLAGS_TO_STRING(spu) + "] ";
|
||||
|
@ -93,7 +93,7 @@ const std::string Phdr_TypeToString(const u32 type)
|
|||
case 0x60000002: return "LOOS+2";
|
||||
};
|
||||
|
||||
return fmt::Format("Unknown (%x)", type);
|
||||
return fmt::Format("Unknown (0x%x)", type);
|
||||
}
|
||||
|
||||
Loader::Loader()
|
||||
|
|
|
@ -44,7 +44,7 @@ bool PSFLoader::LoadHeader()
|
|||
if(!m_header.CheckMagic())
|
||||
return false;
|
||||
|
||||
if(m_show_log) LOG_NOTICE(LOADER, "PSF version: %x", m_header.psf_version);
|
||||
if(m_show_log) LOG_NOTICE(LOADER, "PSF version: 0x%x", m_header.psf_version);
|
||||
|
||||
m_psfindxs.clear();
|
||||
m_entries.clear();
|
||||
|
|
|
@ -144,11 +144,20 @@ bool TROPUSRLoader::Generate(const std::string& filepath, const std::string& con
|
|||
default: trophy_grade = 0;
|
||||
}
|
||||
|
||||
TROPUSREntry4 entry4 = { be_t<u32>::MakeFromBE(se32(4)), be_t<u32>::MakeFromBE(se32(sizeof(TROPUSREntry4) - 0x10)),
|
||||
be_t<u32>::MakeFromLE((u32)m_table4.size()), be_t<u32>::MakeFromBE(se32(0)), be_t<u32>::MakeFromLE(trophy_id),
|
||||
be_t<u32>::MakeFromLE(trophy_grade), be_t<u32>::MakeFromBE(se32(0xFFFFFFFF)) };
|
||||
TROPUSREntry6 entry6 = { be_t<u32>::MakeFromBE(se32(6)), be_t<u32>::MakeFromBE(se32(sizeof(TROPUSREntry6) - 0x10)),
|
||||
be_t<u32>::MakeFromLE((u32)m_table6.size()), be_t<u32>::MakeFromBE(0), be_t<u32>::MakeFromLE(trophy_id) };
|
||||
TROPUSREntry4 entry4 = {
|
||||
be_t<u32>::make(4),
|
||||
be_t<u32>::make(sizeof(TROPUSREntry4) - 0x10),
|
||||
be_t<u32>::make((u32)m_table4.size()),
|
||||
be_t<u32>::make(0),
|
||||
be_t<u32>::make(trophy_id),
|
||||
be_t<u32>::make(trophy_grade),
|
||||
be_t<u32>::make(0xFFFFFFFF) };
|
||||
TROPUSREntry6 entry6 = {
|
||||
be_t<u32>::make(6),
|
||||
be_t<u32>::make(sizeof(TROPUSREntry6) - 0x10),
|
||||
be_t<u32>::make((u32)m_table6.size()),
|
||||
be_t<u32>::make(0),
|
||||
be_t<u32>::make(trophy_id) };
|
||||
|
||||
m_table4.push_back(entry4);
|
||||
m_table6.push_back(entry6);
|
||||
|
@ -156,11 +165,19 @@ bool TROPUSRLoader::Generate(const std::string& filepath, const std::string& con
|
|||
}
|
||||
|
||||
u64 offset = sizeof(TROPUSRHeader) + 2 * sizeof(TROPUSRTableHeader);
|
||||
TROPUSRTableHeader table4header = { be_t<u32>::MakeFromBE(se32(4)), be_t<u32>::MakeFromBE(se32(sizeof(TROPUSREntry4)-0x10)),
|
||||
be_t<u32>::MakeFromBE(se32(1)), be_t<u32>::MakeFromLE((u32)m_table4.size()), be_t<u64>::MakeFromLE(offset) };
|
||||
TROPUSRTableHeader table4header = {
|
||||
be_t<u32>::make(4),
|
||||
be_t<u32>::make(sizeof(TROPUSREntry4) - 0x10),
|
||||
be_t<u32>::make(1),
|
||||
be_t<u32>::make((u32)m_table4.size()),
|
||||
be_t<u64>::make(offset) };
|
||||
offset += m_table4.size() * sizeof(TROPUSREntry4);
|
||||
TROPUSRTableHeader table6header = { be_t<u32>::MakeFromBE(se32(6)), be_t<u32>::MakeFromBE(se32(sizeof(TROPUSREntry6)-0x10)),
|
||||
be_t<u32>::MakeFromBE(se32(1)), be_t<u32>::MakeFromLE((u32)m_table6.size()), be_t<u64>::MakeFromLE(offset) };
|
||||
TROPUSRTableHeader table6header = {
|
||||
be_t<u32>::make(6),
|
||||
be_t<u32>::make(sizeof(TROPUSREntry6) - 0x10),
|
||||
be_t<u32>::make(1),
|
||||
be_t<u32>::make((u32)m_table6.size()),
|
||||
be_t<u64>::make(offset) };
|
||||
offset += m_table6.size() * sizeof(TROPUSREntry6);
|
||||
|
||||
m_tableHeaders.clear();
|
||||
|
|
|
@ -51,7 +51,7 @@ bool TRPLoader::LoadHeader(bool show)
|
|||
return false;
|
||||
|
||||
if (show)
|
||||
LOG_NOTICE(LOADER, "TRP version: %x", m_header.trp_version);
|
||||
LOG_NOTICE(LOADER, "TRP version: 0x%x", m_header.trp_version);
|
||||
|
||||
m_entries.clear();
|
||||
m_entries.resize(m_header.trp_files_count);
|
||||
|
|
|
@ -314,6 +314,7 @@
|
|||
<ClInclude Include="Emu\Io\PadHandler.h" />
|
||||
<ClInclude Include="Emu\Memory\Memory.h" />
|
||||
<ClInclude Include="Emu\Memory\MemoryBlock.h" />
|
||||
<ClInclude Include="Emu\Memory\atomic_type.h" />
|
||||
<ClInclude Include="Emu\RSX\GCM.h" />
|
||||
<ClInclude Include="Emu\RSX\GL\GLBuffers.h" />
|
||||
<ClInclude Include="Emu\RSX\GL\GLFragmentProgram.h" />
|
||||
|
|
|
@ -1225,6 +1225,9 @@
|
|||
<ClInclude Include="Emu\SysCalls\SyncPrimitivesManager.h">
|
||||
<Filter>Emu\SysCalls</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="Emu\Memory\atomic_type.h">
|
||||
<Filter>Emu\Memory</Filter>
|
||||
</ClInclude>
|
||||
<ClInclude Include="Crypto\ec.h">
|
||||
<Filter>Crypto</Filter>
|
||||
</ClInclude>
|
||||
|
|
Loading…
Add table
Reference in a new issue