mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-21 20:15:27 +00:00
sys_lwcond* funcs moved and rewritten
This commit is contained in:
parent
2709dc2e36
commit
3cf80b0831
18 changed files with 433 additions and 288 deletions
|
@ -175,8 +175,6 @@ namespace sce_libc_func
|
|||
void exit(ARMv7Context& context)
|
||||
{
|
||||
sceLibc.Warning("exit()");
|
||||
|
||||
LV2_LOCK;
|
||||
|
||||
for (auto func : g_atexit)
|
||||
{
|
||||
|
|
|
@ -197,6 +197,42 @@ public:
|
|||
}
|
||||
};
|
||||
|
||||
__forceinline static u64 operator ++(_atomic_base<be_t<u64>>& left, int)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
left.atomic_op([&result](be_t<u64>& value)
|
||||
{
|
||||
result = value++;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
__forceinline static u64 operator --(_atomic_base<be_t<u64>>& left, int)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
left.atomic_op([&result](be_t<u64>& value)
|
||||
{
|
||||
result = value--;
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
__forceinline static u64 operator +=(_atomic_base<be_t<u64>>& left, u64 right)
|
||||
{
|
||||
u64 result;
|
||||
|
||||
left.atomic_op([&result, right](be_t<u64>& value)
|
||||
{
|
||||
result = (value += right);
|
||||
});
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
template<typename T> using atomic_le_t = _atomic_base<T>;
|
||||
|
||||
template<typename T> using atomic_be_t = _atomic_base<typename to_be_t<T>::type>;
|
||||
|
|
|
@ -78,10 +78,10 @@ namespace vm
|
|||
}
|
||||
|
||||
template<typename AT2>
|
||||
operator const _ptr_base<T, lvl, AT2>() const
|
||||
operator _ptr_base<T, lvl, AT2>() const
|
||||
{
|
||||
const AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<const _ptr_base<T, lvl, AT2>&>(addr);
|
||||
AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<_ptr_base<T, lvl, AT2>&>(addr);
|
||||
}
|
||||
|
||||
AT addr() const
|
||||
|
@ -94,9 +94,9 @@ namespace vm
|
|||
m_addr = value;
|
||||
}
|
||||
|
||||
static const _ptr_base make(const AT& addr)
|
||||
static _ptr_base make(const AT& addr)
|
||||
{
|
||||
return reinterpret_cast<const _ptr_base&>(addr);
|
||||
return reinterpret_cast<_ptr_base&>(addr);
|
||||
}
|
||||
|
||||
_ptr_base& operator = (const _ptr_base& right) = default;
|
||||
|
@ -203,10 +203,10 @@ namespace vm
|
|||
}
|
||||
|
||||
template<typename AT2>
|
||||
operator const _ptr_base<T, 1, AT2>() const
|
||||
operator _ptr_base<T, 1, AT2>() const
|
||||
{
|
||||
const AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<const _ptr_base<T, 1, AT2>&>(addr);
|
||||
AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<_ptr_base<T, 1, AT2>&>(addr);
|
||||
}
|
||||
|
||||
T* get_ptr() const
|
||||
|
@ -269,17 +269,17 @@ namespace vm
|
|||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
template<typename AT2>
|
||||
operator const _ptr_base<void, 1, AT2>() const
|
||||
operator _ptr_base<void, 1, AT2>() const
|
||||
{
|
||||
const AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<const _ptr_base<void, 1, AT2>&>(addr);
|
||||
AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<_ptr_base<void, 1, AT2>&>(addr);
|
||||
}
|
||||
|
||||
template<typename AT2>
|
||||
operator const _ptr_base<const void, 1, AT2>() const
|
||||
operator _ptr_base<const void, 1, AT2>() const
|
||||
{
|
||||
const AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<const _ptr_base<const void, 1, AT2>&>(addr);
|
||||
AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<_ptr_base<const void, 1, AT2>&>(addr);
|
||||
}
|
||||
|
||||
static const _ptr_base make(const AT& addr)
|
||||
|
@ -332,10 +332,10 @@ namespace vm
|
|||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
template<typename AT2>
|
||||
operator const _ptr_base<const void, 1, AT2>() const
|
||||
operator _ptr_base<const void, 1, AT2>() const
|
||||
{
|
||||
const AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<const _ptr_base<const void, 1, AT2>&>(addr);
|
||||
AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<_ptr_base<const void, 1, AT2>&>(addr);
|
||||
}
|
||||
|
||||
static const _ptr_base make(const AT& addr)
|
||||
|
@ -381,10 +381,10 @@ namespace vm
|
|||
explicit operator bool() const { return m_addr != 0; }
|
||||
|
||||
template<typename AT2>
|
||||
operator const _ptr_base<type, 1, AT2>() const
|
||||
operator _ptr_base<type, 1, AT2>() const
|
||||
{
|
||||
const AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<const _ptr_base<type, 1, AT2>&>(addr);
|
||||
AT2 addr = convert_le_be<AT2>(m_addr);
|
||||
return reinterpret_cast<_ptr_base<type, 1, AT2>&>(addr);
|
||||
}
|
||||
|
||||
static const _ptr_base make(const AT& addr)
|
||||
|
@ -467,7 +467,7 @@ struct cast_ppu_gpr<vm::_ptr_base<T, lvl, AT>, false>
|
|||
{
|
||||
__forceinline static u64 to_gpr(const vm::_ptr_base<T, lvl, AT>& value)
|
||||
{
|
||||
return value.addr();
|
||||
return cast_ppu_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ptr_base<T, lvl, AT> from_gpr(const u64 reg)
|
||||
|
@ -486,7 +486,7 @@ struct cast_armv7_gpr<vm::_ptr_base<T, lvl, AT>, false>
|
|||
{
|
||||
__forceinline static u32 to_gpr(const vm::_ptr_base<T, lvl, AT>& value)
|
||||
{
|
||||
return value.addr();
|
||||
return cast_armv7_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ptr_base<T, lvl, AT> from_gpr(const u32 reg)
|
||||
|
|
|
@ -117,7 +117,7 @@ struct cast_ppu_gpr<vm::_ref_base<T, AT>, false>
|
|||
{
|
||||
__forceinline static u64 to_gpr(const vm::_ref_base<T, AT>& value)
|
||||
{
|
||||
return value.addr();
|
||||
return cast_ppu_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ref_base<T, AT> from_gpr(const u64 reg)
|
||||
|
@ -136,7 +136,7 @@ struct cast_armv7_gpr<vm::_ref_base<T, AT>, false>
|
|||
{
|
||||
__forceinline static u32 to_gpr(const vm::_ref_base<T, AT>& value)
|
||||
{
|
||||
return value.addr();
|
||||
return cast_armv7_gpr<AT, std::is_enum<AT>::value>::to_gpr(value.addr());
|
||||
}
|
||||
|
||||
__forceinline static vm::_ref_base<T, AT> from_gpr(const u32 reg)
|
||||
|
|
|
@ -168,11 +168,7 @@ s32 spursInit(
|
|||
}
|
||||
|
||||
lwmutex_create(spurs->m.mutex, false, SYS_SYNC_PRIORITY, *(u64*)"_spuPrv");
|
||||
|
||||
if (s32 res = lwcond_create(spurs->m.cond, spurs->m.mutex, *(u64*)"_spuPrv"))
|
||||
{
|
||||
assert(!"lwcond_create() failed");
|
||||
}
|
||||
lwcond_create(spurs->m.cond, spurs->m.mutex, *(u64*)"_spuPrv");
|
||||
|
||||
spurs->m.flags1 = (flags & SAF_EXIT_IF_NO_WORK ? SF1_EXIT_IF_NO_WORK : 0) | (isSecond ? SF1_32_WORKLOADS : 0);
|
||||
spurs->m.wklFlagReceiver.write_relaxed(0xff);
|
||||
|
@ -848,7 +844,7 @@ s32 spursWakeUp(PPUThread& CPU, vm::ptr<CellSpurs> spurs)
|
|||
{
|
||||
assert(!"sys_lwmutex_lock() failed");
|
||||
}
|
||||
if (s32 res = sys_lwcond_signal(spurs->get_lwcond()))
|
||||
if (s32 res = sys_lwcond_signal(CPU, spurs->get_lwcond()))
|
||||
{
|
||||
assert(!"sys_lwcond_signal() failed");
|
||||
}
|
||||
|
|
|
@ -124,7 +124,7 @@ s32 sys_lwmutex_create(vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwmutex_attri
|
|||
|
||||
s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex)
|
||||
{
|
||||
sysPrxForUser.Warning("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex);
|
||||
sysPrxForUser.Log("sys_lwmutex_destroy(lwmutex=*0x%x)", lwmutex);
|
||||
|
||||
// check to prevent recursive locking in the next call
|
||||
if (lwmutex->lock_var.read_relaxed().owner == CPU.GetId())
|
||||
|
@ -210,12 +210,12 @@ s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout
|
|||
}
|
||||
|
||||
// atomically increment waiter value using 64 bit op
|
||||
lwmutex->all_info.atomic_op([](be_t<u64>& value){ value++; });
|
||||
lwmutex->all_info++;
|
||||
|
||||
if (lwmutex->owner.compare_and_swap_test(lwmutex::free, tid))
|
||||
{
|
||||
// locking succeeded
|
||||
lwmutex->all_info.atomic_op([](be_t<u64>& value){ value--; });
|
||||
lwmutex->all_info--;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -223,7 +223,7 @@ s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout
|
|||
// lock using the syscall
|
||||
const s32 res = _sys_lwmutex_lock(lwmutex->sleep_queue, timeout);
|
||||
|
||||
lwmutex->all_info.atomic_op([](be_t<u64>& value){ value--; });
|
||||
lwmutex->all_info--;
|
||||
|
||||
if (res == CELL_OK)
|
||||
{
|
||||
|
@ -348,6 +348,275 @@ s32 sys_lwmutex_unlock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr)
|
||||
{
|
||||
sysPrxForUser.Warning("sys_lwcond_create(lwcond=*0x%x, lwmutex=*0x%x, attr=*0x%x)", lwcond, lwmutex, attr);
|
||||
|
||||
std::shared_ptr<lwcond_t> lwc(new lwcond_t(attr->name_u64));
|
||||
|
||||
lwcond->lwcond_queue = Emu.GetIdManager().GetNewID(lwc, TYPE_LWCOND);
|
||||
lwcond->lwmutex = lwmutex;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sysPrxForUser.Log("sys_lwcond_destroy(lwcond=*0x%x)", lwcond);
|
||||
|
||||
const s32 res = _sys_lwcond_destroy(lwcond->lwcond_queue);
|
||||
|
||||
if (res == CELL_OK)
|
||||
{
|
||||
lwcond->lwcond_queue = lwmutex::dead;
|
||||
}
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sysPrxForUser.Log("sys_lwcond_signal(lwcond=*0x%x)", lwcond);
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY))
|
||||
{
|
||||
// TODO (protocol ignored)
|
||||
}
|
||||
|
||||
if (lwmutex->owner.read_relaxed() == CPU.GetId())
|
||||
{
|
||||
// if owns the mutex
|
||||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 1))
|
||||
{
|
||||
lwmutex->all_info--;
|
||||
|
||||
return res == CELL_EPERM ? CELL_OK : res;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (s32 res = sys_lwmutex_trylock(CPU, lwmutex))
|
||||
{
|
||||
// if locking failed
|
||||
|
||||
if (res != CELL_EBUSY)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
// call the syscall
|
||||
return _sys_lwcond_signal(lwcond->lwcond_queue, 0, -1, 2);
|
||||
}
|
||||
|
||||
// if locking succeeded
|
||||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, -1, 3))
|
||||
{
|
||||
lwmutex->all_info--;
|
||||
|
||||
// unlock the lightweight mutex
|
||||
sys_lwmutex_unlock(CPU, lwmutex);
|
||||
|
||||
return res == CELL_ENOENT ? CELL_OK : res;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sysPrxForUser.Log("sys_lwcond_signal_all(lwcond=*0x%x)", lwcond);
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY))
|
||||
{
|
||||
// TODO (protocol ignored)
|
||||
}
|
||||
|
||||
if (lwmutex->owner.read_relaxed() == CPU.GetId())
|
||||
{
|
||||
// if owns the mutex, call the syscall
|
||||
const s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
|
||||
|
||||
if (res <= 0)
|
||||
{
|
||||
// return error or CELL_OK
|
||||
return res;
|
||||
}
|
||||
|
||||
lwmutex->all_info += res;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (s32 res = sys_lwmutex_trylock(CPU, lwmutex))
|
||||
{
|
||||
// if locking failed
|
||||
|
||||
if (res != CELL_EBUSY)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
// call the syscall
|
||||
return _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 2);
|
||||
}
|
||||
|
||||
// if locking succeeded, call the syscall
|
||||
s32 res = _sys_lwcond_signal_all(lwcond->lwcond_queue, lwmutex->sleep_queue, 1);
|
||||
|
||||
if (res > 0)
|
||||
{
|
||||
lwmutex->all_info += res;
|
||||
|
||||
res = CELL_OK;
|
||||
}
|
||||
|
||||
// unlock mutex
|
||||
sys_lwmutex_unlock(CPU, lwmutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
|
||||
{
|
||||
sysPrxForUser.Log("sys_lwcond_signal_to(lwcond=*0x%x, ppu_thread_id=%d)", lwcond, ppu_thread_id);
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if ((lwmutex->attribute.data() & se32(SYS_SYNC_ATTR_PROTOCOL_MASK)) == se32(SYS_SYNC_RETRY))
|
||||
{
|
||||
// TODO (protocol ignored)
|
||||
}
|
||||
|
||||
if (lwmutex->owner.read_relaxed() == CPU.GetId())
|
||||
{
|
||||
// if owns the mutex
|
||||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 1))
|
||||
{
|
||||
lwmutex->all_info--;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
if (s32 res = sys_lwmutex_trylock(CPU, lwmutex))
|
||||
{
|
||||
// if locking failed
|
||||
|
||||
if (res != CELL_EBUSY)
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
// call the syscall
|
||||
return _sys_lwcond_signal(lwcond->lwcond_queue, 0, ppu_thread_id, 2);
|
||||
}
|
||||
|
||||
// if locking succeeded
|
||||
lwmutex->all_info++;
|
||||
|
||||
// call the syscall
|
||||
if (s32 res = _sys_lwcond_signal(lwcond->lwcond_queue, lwmutex->sleep_queue, ppu_thread_id, 3))
|
||||
{
|
||||
lwmutex->all_info--;
|
||||
|
||||
// unlock the lightweight mutex
|
||||
sys_lwmutex_unlock(CPU, lwmutex);
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
{
|
||||
sysPrxForUser.Log("sys_lwcond_wait(lwcond=*0x%x, timeout=0x%llx)", lwcond, timeout);
|
||||
|
||||
const be_t<u32> tid = be_t<u32>::make(CPU.GetId());
|
||||
|
||||
const vm::ptr<sys_lwmutex_t> lwmutex = lwcond->lwmutex;
|
||||
|
||||
if (lwmutex->owner.read_relaxed() != tid)
|
||||
{
|
||||
// if not owner of the mutex
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
// save old recursive value
|
||||
const be_t<u32> recursive_value = lwmutex->recursive_count;
|
||||
|
||||
// set special value
|
||||
lwmutex->owner.write_relaxed(lwmutex::reserved);
|
||||
lwmutex->recursive_count = 0;
|
||||
|
||||
// call the syscall
|
||||
s32 res = _sys_lwcond_queue_wait(lwcond->lwcond_queue, lwmutex->sleep_queue, timeout);
|
||||
|
||||
if (res == CELL_OK || res == CELL_ESRCH)
|
||||
{
|
||||
if (res == CELL_OK)
|
||||
{
|
||||
lwmutex->all_info--;
|
||||
}
|
||||
|
||||
// restore owner and recursive value
|
||||
lwmutex->owner.exchange(tid);
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
if (res == CELL_EBUSY || res == CELL_ETIMEDOUT)
|
||||
{
|
||||
const s32 res2 = sys_lwmutex_lock(CPU, lwmutex, 0);
|
||||
|
||||
if (res2 == CELL_OK)
|
||||
{
|
||||
// if successfully locked, restore recursive value
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
return res == CELL_EBUSY ? CELL_OK : res;
|
||||
}
|
||||
|
||||
return res2;
|
||||
}
|
||||
|
||||
if (res == CELL_EDEADLK)
|
||||
{
|
||||
const auto owner = lwmutex->owner.read_relaxed();
|
||||
|
||||
if (owner.data() != se32(lwmutex_reserved))
|
||||
{
|
||||
sysPrxForUser.Fatal("sys_lwcond_wait(lwcond=*0x%x): unexpected lwmutex->owner (0x%x)", lwcond, owner);
|
||||
}
|
||||
|
||||
// restore owner and recursive value
|
||||
lwmutex->owner.exchange(tid);
|
||||
lwmutex->recursive_count = recursive_value;
|
||||
|
||||
return CELL_ETIMEDOUT;
|
||||
}
|
||||
|
||||
sysPrxForUser.Fatal("sys_lwconde_wait(lwcond=*0x%x): unexpected syscall result (0x%x)", lwcond, res);
|
||||
return res;
|
||||
}
|
||||
|
||||
std::string ps3_fmt(PPUThread& context, vm::ptr<const char> fmt, u32 g_count, u32 f_count, u32 v_count)
|
||||
{
|
||||
std::string result;
|
||||
|
@ -849,14 +1118,7 @@ void sys_spinlock_lock(vm::ptr<atomic_t<u32>> lock)
|
|||
// prx: exchange with 0xabadcafe, repeat until exchanged with 0
|
||||
while (lock->exchange(be_t<u32>::make(0xabadcafe)).data())
|
||||
{
|
||||
while (lock->read_relaxed().data())
|
||||
{
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
break;
|
||||
}
|
||||
}
|
||||
g_sys_spinlock_wm.wait_op(lock.addr(), [lock](){ return lock->read_relaxed().data() == 0; });
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
|
@ -885,6 +1147,8 @@ void sys_spinlock_unlock(vm::ptr<atomic_t<u32>> lock)
|
|||
|
||||
// prx: sync and set 0
|
||||
lock->exchange(be_t<u32>::make(0));
|
||||
|
||||
g_sys_spinlock_wm.notify(lock.addr());
|
||||
}
|
||||
|
||||
Module sysPrxForUser("sysPrxForUser", []()
|
||||
|
@ -911,6 +1175,13 @@ Module sysPrxForUser("sysPrxForUser", []()
|
|||
REG_FUNC(sysPrxForUser, sys_lwmutex_trylock);
|
||||
REG_FUNC(sysPrxForUser, sys_lwmutex_unlock);
|
||||
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_create);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_destroy);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_signal);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_signal_all);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_signal_to);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_wait);
|
||||
|
||||
REG_FUNC(sysPrxForUser, sys_time_get_system_time);
|
||||
|
||||
REG_FUNC(sysPrxForUser, sys_process_exit);
|
||||
|
@ -957,13 +1228,6 @@ Module sysPrxForUser("sysPrxForUser", []()
|
|||
REG_FUNC(sysPrxForUser, sys_raw_spu_load);
|
||||
REG_FUNC(sysPrxForUser, sys_raw_spu_image_load);
|
||||
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_create);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_destroy);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_signal);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_signal_all);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_signal_to);
|
||||
REG_FUNC(sysPrxForUser, sys_lwcond_wait);
|
||||
|
||||
REG_FUNC(sysPrxForUser, sys_get_random_number);
|
||||
|
||||
REG_FUNC(sysPrxForUser, sys_spinlock_initialize);
|
||||
|
|
|
@ -33,3 +33,13 @@ s32 sys_lwmutex_lock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex, u64 timeout
|
|||
s32 sys_lwmutex_trylock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
s32 sys_lwmutex_unlock(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
s32 sys_lwmutex_destroy(PPUThread& CPU, vm::ptr<sys_lwmutex_t> lwmutex);
|
||||
|
||||
struct sys_lwcond_t;
|
||||
struct sys_lwcond_attribute_t;
|
||||
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr);
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal_all(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal_to(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id);
|
||||
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout);
|
||||
|
|
|
@ -40,10 +40,10 @@ std::string SyncPrimManager::GetSyncPrimName(u32 id, IDType type)
|
|||
{
|
||||
case TYPE_LWCOND:
|
||||
{
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
std::shared_ptr<lwcond_t> lw;
|
||||
if (Emu.GetIdManager().GetIDData(id, lw))
|
||||
{
|
||||
return std::string((const char*)&lw->queue.name, 8);
|
||||
return std::string((const char*)&lw->name, 8);
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
|
|
@ -9,6 +9,7 @@
|
|||
#include "lv2/cellFs.h"
|
||||
#include "lv2/sleep_queue.h"
|
||||
#include "lv2/sys_lwmutex.h"
|
||||
#include "lv2/sys_lwcond.h"
|
||||
#include "lv2/sys_mutex.h"
|
||||
#include "lv2/sys_cond.h"
|
||||
#include "lv2/sys_event.h"
|
||||
|
@ -146,12 +147,12 @@ const ppu_func_caller sc_table[1024] =
|
|||
bind_func(sys_cond_signal), //108 (0x06C)
|
||||
bind_func(sys_cond_signal_all), //109 (0x06D)
|
||||
bind_func(sys_cond_signal_to), //110 (0x06E)
|
||||
null_func,//bind_func(_sys_lwcond_create) //111 (0x06F) // internal, used by sys_lwcond_create
|
||||
null_func,//bind_func(_sys_lwcond_destroy) //112 (0x070) // internal, used by sys_lwcond_destroy
|
||||
null_func,//bind_func(_sys_lwcond_queue_wait) //113 (0x071) // internal, used by sys_lwcond_wait
|
||||
bind_func(_sys_lwcond_create), //111 (0x06F)
|
||||
bind_func(_sys_lwcond_destroy), //112 (0x070)
|
||||
bind_func(_sys_lwcond_queue_wait), //113 (0x071)
|
||||
bind_func(sys_semaphore_get_value), //114 (0x072)
|
||||
null_func,//bind_func(sys_semaphore_...) //115 (0x073) // internal, used by sys_lwcond_signal, sys_lwcond_signal_to
|
||||
null_func,//bind_func(sys_semaphore_...) //116 (0x074) // internal, used by sys_lwcond_signal_all
|
||||
bind_func(_sys_lwcond_signal), //115 (0x073)
|
||||
bind_func(_sys_lwcond_signal_all), //116 (0x074)
|
||||
null_func,//bind_func(sys_semaphore_...) //117 (0x075) // internal, used by sys_lwmutex_unlock
|
||||
bind_func(sys_event_flag_clear), //118 (0x076)
|
||||
null_func,//bind_func(sys_event_...) //119 (0x077) ROOT
|
||||
|
|
|
@ -89,7 +89,7 @@ s32 sys_cond_signal(u32 cond_id)
|
|||
{
|
||||
cond->signaled++;
|
||||
cond->waiters--;
|
||||
cond->mutex->cv.notify_one();
|
||||
cond->cv.notify_one();
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -111,7 +111,7 @@ s32 sys_cond_signal_all(u32 cond_id)
|
|||
if (cond->waiters)
|
||||
{
|
||||
cond->signaled += cond->waiters.exchange(0);
|
||||
cond->mutex->cv.notify_all();
|
||||
cond->cv.notify_all();
|
||||
}
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -142,7 +142,8 @@ s32 sys_cond_signal_to(u32 cond_id, u32 thread_id)
|
|||
|
||||
cond->signaled++;
|
||||
cond->waiters--;
|
||||
cond->mutex->cv.notify_one();
|
||||
cond->cv.notify_one();
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
|
@ -174,7 +175,7 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
|||
// unlock mutex
|
||||
cond->mutex->owner.reset();
|
||||
|
||||
// not sure whether the recursive value is precisely saved
|
||||
// save recursive value
|
||||
const u32 recursive_value = cond->mutex->recursive_count.exchange(0);
|
||||
|
||||
while (!cond->mutex->owner.expired() || !cond->signaled)
|
||||
|
@ -192,10 +193,11 @@ s32 sys_cond_wait(PPUThread& CPU, u32 cond_id, u64 timeout)
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
cond->mutex->cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
|
||||
// wait on appropriate condition variable
|
||||
(cond->signaled ? cond->mutex->cv : cond->cv).wait_for(lv2_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
// restore mutex owner
|
||||
// reown the mutex and restore recursive value
|
||||
cond->mutex->owner = thread;
|
||||
cond->mutex->recursive_count = recursive_value;
|
||||
|
||||
|
|
|
@ -20,7 +20,8 @@ struct cond_t
|
|||
const u64 name;
|
||||
const std::shared_ptr<mutex_t> mutex; // associated mutex
|
||||
|
||||
// TODO: use sleep queue
|
||||
// TODO: use sleep queue, possibly remove condition variable
|
||||
std::condition_variable cv;
|
||||
std::atomic<s32> waiters;
|
||||
std::atomic<s32> signaled;
|
||||
|
||||
|
|
|
@ -13,220 +13,63 @@
|
|||
|
||||
SysCallBase sys_lwcond("sys_lwcond");
|
||||
|
||||
s32 lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name_u64)
|
||||
void lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name)
|
||||
{
|
||||
const u32 addr = vm::get_addr(&lwmutex);
|
||||
std::shared_ptr<lwcond_t> lwc(new lwcond_t(name));
|
||||
|
||||
std::shared_ptr<Lwcond> lw(new Lwcond(name_u64, addr));
|
||||
lwcond.lwcond_queue = Emu.GetIdManager().GetNewID(lwc, TYPE_LWCOND);
|
||||
}
|
||||
|
||||
const u32 id = Emu.GetIdManager().GetNewID(lw, TYPE_LWCOND);
|
||||
s32 _sys_lwcond_create(vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name, u32 arg5)
|
||||
{
|
||||
sys_lwcond.Warning("_sys_lwcond_create(lwcond_id=*0x%x, lwmutex_id=%d, control=*0x%x, name=0x%llx, arg5=0x%x)", lwcond_id, lwmutex_id, control, name, arg5);
|
||||
|
||||
lw->queue.set_full_name(fmt::Format("Lwcond(%d, addr=0x%x)", id, lw->addr));
|
||||
lwcond.lwmutex.set(addr);
|
||||
lwcond.lwcond_queue = id;
|
||||
std::shared_ptr<lwcond_t> lwc(new lwcond_t(name));
|
||||
|
||||
*lwcond_id = Emu.GetIdManager().GetNewID(lwc, TYPE_LWCOND);
|
||||
|
||||
sys_lwcond.Warning("*** lwcond created [%s] (lwmutex_addr=0x%x): id = %d", std::string((const char*)&name_u64, 8).c_str(), addr, id);
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr)
|
||||
s32 _sys_lwcond_destroy(u32 lwcond_id)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_create(lwcond_addr=0x%x, lwmutex_addr=0x%x, attr_addr=0x%x)",
|
||||
lwcond.addr(), lwmutex.addr(), attr.addr());
|
||||
sys_lwcond.Warning("_sys_lwcond_destroy(lwcond_id=%d)", lwcond_id);
|
||||
|
||||
return lwcond_create(*lwcond, *lwmutex, attr->name_u64);
|
||||
}
|
||||
LV2_LOCK;
|
||||
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond)
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_destroy(lwcond_addr=0x%x)", lwcond.addr());
|
||||
|
||||
u32 id = lwcond->lwcond_queue;
|
||||
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
if (!Emu.GetIdManager().GetIDData(id, lw))
|
||||
std::shared_ptr<lwcond_t> lwc;
|
||||
if (!Emu.GetIdManager().GetIDData(lwcond_id, lwc))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (lw->queue.count()) // TODO: safely make object unusable
|
||||
if (lwc->waiters)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
Emu.GetIdManager().RemoveID(id);
|
||||
Emu.GetIdManager().RemoveID(lwcond_id);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal(vm::ptr<sys_lwcond_t> lwcond)
|
||||
s32 _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_signal(lwcond_addr=0x%x)", lwcond.addr());
|
||||
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
if (!Emu.GetIdManager().GetIDData((u32)lwcond->lwcond_queue, lw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto mutex = lwcond->lwmutex;
|
||||
|
||||
if (u32 target = lw->queue.signal(mutex->attribute))
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_signal(id=%d) aborted", (u32)lwcond->lwcond_queue);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
sys_lwcond.Fatal("_sys_lwcond_signal(lwcond_id=%d, lwmutex_id=%d, ppu_thread_id=%d, mode=%d)", lwcond_id, lwmutex_id, ppu_thread_id, mode);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal_all(vm::ptr<sys_lwcond_t> lwcond)
|
||||
s32 _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_signal_all(lwcond_addr=0x%x)", lwcond.addr());
|
||||
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
if (!Emu.GetIdManager().GetIDData((u32)lwcond->lwcond_queue, lw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto mutex = lwcond->lwmutex;
|
||||
|
||||
while (u32 target = lw->queue.signal(mutex->attribute))
|
||||
{
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_signal_all(id=%d) aborted", (u32)lwcond->lwcond_queue);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
sys_lwcond.Fatal("_sys_lwcond_signal_all(lwcond_id=%d, lwmutex_id=%d, mode=%d)", lwcond_id, lwmutex_id, mode);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_signal_to(vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id)
|
||||
s32 _sys_lwcond_queue_wait(u32 lwcond_id, u32 lwmutex_id, u64 timeout)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_signal_to(lwcond_addr=0x%x, ppu_thread_id=%d)", lwcond.addr(), ppu_thread_id);
|
||||
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
if (!Emu.GetIdManager().GetIDData((u32)lwcond->lwcond_queue, lw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!Emu.GetIdManager().CheckID(ppu_thread_id))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (!lw->queue.signal_selected(ppu_thread_id))
|
||||
{
|
||||
return CELL_EPERM;
|
||||
}
|
||||
sys_lwcond.Fatal("_sys_lwcond_queue_wait(lwcond_id=%d, lwmutex_id=%d, timeout=0x%llx)", lwcond_id, lwmutex_id, timeout);
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
||||
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout)
|
||||
{
|
||||
sys_lwcond.Log("sys_lwcond_wait(lwcond_addr=0x%x, timeout=%lld)", lwcond.addr(), timeout);
|
||||
|
||||
const u64 start_time = get_system_time();
|
||||
|
||||
std::shared_ptr<Lwcond> lw;
|
||||
if (!Emu.GetIdManager().GetIDData((u32)lwcond->lwcond_queue, lw))
|
||||
{
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
auto mutex = lwcond->lwmutex;
|
||||
u32 tid_le = CPU.GetId();
|
||||
auto tid = be_t<u32>::make(tid_le);
|
||||
|
||||
std::shared_ptr<sleep_queue_t> sq;
|
||||
if (!Emu.GetIdManager().GetIDData((u32)mutex->sleep_queue, sq))
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_wait(id=%d): associated mutex had invalid sleep queue (%d)",
|
||||
(u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
|
||||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (mutex->owner.read_sync() != tid)
|
||||
{
|
||||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
lw->queue.push(tid_le, mutex->attribute);
|
||||
|
||||
auto old_recursive = mutex->recursive_count;
|
||||
mutex->recursive_count = 0;
|
||||
|
||||
auto target = be_t<u32>::make(sq->signal(mutex->attribute));
|
||||
if (!mutex->owner.compare_and_swap_test(tid, target))
|
||||
{
|
||||
assert(!"sys_lwcond_wait(): mutex unlocking failed");
|
||||
}
|
||||
|
||||
bool signaled = false;
|
||||
while (true)
|
||||
{
|
||||
if ((signaled = signaled || lw->queue.pop(tid, mutex->attribute))) // check signaled threads
|
||||
{
|
||||
s32 res = sys_lwmutex_lock(CPU, mutex, timeout ? get_system_time() - start_time : 0); // this is bad
|
||||
if (res == CELL_OK)
|
||||
{
|
||||
break;
|
||||
}
|
||||
|
||||
switch (res)
|
||||
{
|
||||
case static_cast<int>(CELL_EDEADLK):
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): associated mutex was locked", (u32)lwcond->lwcond_queue);
|
||||
return CELL_OK; // mutex not locked (but already locked in the incorrect way)
|
||||
}
|
||||
case static_cast<int>(CELL_ESRCH):
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): associated mutex not found (%d)", (u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
|
||||
return CELL_ESRCH; // mutex not locked
|
||||
}
|
||||
case static_cast<int>(CELL_ETIMEDOUT):
|
||||
{
|
||||
return CELL_ETIMEDOUT; // mutex not locked
|
||||
}
|
||||
case static_cast<int>(CELL_EINVAL):
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): invalid associated mutex (%d)", (u32)lwcond->lwcond_queue, (u32)mutex->sleep_queue);
|
||||
return CELL_EINVAL; // mutex not locked
|
||||
}
|
||||
default:
|
||||
{
|
||||
sys_lwcond.Error("sys_lwcond_wait(id=%d): mutex->lock() returned 0x%x", (u32)lwcond->lwcond_queue, res);
|
||||
return CELL_EINVAL; // mutex not locked
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
std::this_thread::sleep_for(std::chrono::milliseconds(1)); // hack
|
||||
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
if (!lw->queue.invalidate(tid_le, mutex->attribute))
|
||||
{
|
||||
assert(!"sys_lwcond_wait() failed (timeout)");
|
||||
}
|
||||
return CELL_ETIMEDOUT; // mutex not locked
|
||||
}
|
||||
|
||||
if (Emu.IsStopped())
|
||||
{
|
||||
sys_lwcond.Warning("sys_lwcond_wait(id=%d) aborted", (u32)lwcond->lwcond_queue);
|
||||
return CELL_OK;
|
||||
}
|
||||
}
|
||||
|
||||
mutex->recursive_count = old_recursive;
|
||||
return CELL_OK;
|
||||
}
|
||||
|
|
|
@ -14,31 +14,31 @@ struct sys_lwcond_attribute_t
|
|||
struct sys_lwcond_t
|
||||
{
|
||||
vm::bptr<sys_lwmutex_t> lwmutex;
|
||||
be_t<u32> lwcond_queue;
|
||||
be_t<u32> lwcond_queue; // lwcond pseudo-id
|
||||
};
|
||||
|
||||
struct Lwcond
|
||||
struct lwcond_t
|
||||
{
|
||||
sleep_queue_t queue;
|
||||
const u64 name;
|
||||
|
||||
const u32 addr;
|
||||
// TODO: use sleep queue
|
||||
std::condition_variable cv;
|
||||
std::atomic<s32> waiters;
|
||||
|
||||
Lwcond(u64 name, u32 addr)
|
||||
: queue(name)
|
||||
, addr(addr)
|
||||
lwcond_t(u64 name)
|
||||
: name(name)
|
||||
{
|
||||
}
|
||||
};
|
||||
|
||||
// Aux
|
||||
s32 lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name_u64);
|
||||
void lwcond_create(sys_lwcond_t& lwcond, sys_lwmutex_t& lwmutex, u64 name);
|
||||
|
||||
class PPUThread;
|
||||
|
||||
// SysCalls
|
||||
s32 sys_lwcond_create(vm::ptr<sys_lwcond_t> lwcond, vm::ptr<sys_lwmutex_t> lwmutex, vm::ptr<sys_lwcond_attribute_t> attr);
|
||||
s32 sys_lwcond_destroy(vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal(vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal_all(vm::ptr<sys_lwcond_t> lwcond);
|
||||
s32 sys_lwcond_signal_to(vm::ptr<sys_lwcond_t> lwcond, u32 ppu_thread_id);
|
||||
s32 sys_lwcond_wait(PPUThread& CPU, vm::ptr<sys_lwcond_t> lwcond, u64 timeout);
|
||||
s32 _sys_lwcond_create(vm::ptr<u32> lwcond_id, u32 lwmutex_id, vm::ptr<sys_lwcond_t> control, u64 name, u32 arg5);
|
||||
s32 _sys_lwcond_destroy(u32 lwcond_id);
|
||||
s32 _sys_lwcond_signal(u32 lwcond_id, u32 lwmutex_id, u32 ppu_thread_id, u32 mode);
|
||||
s32 _sys_lwcond_signal_all(u32 lwcond_id, u32 lwmutex_id, u32 mode);
|
||||
s32 _sys_lwcond_queue_wait(u32 lwcond_id, u32 lwmutex_id, u64 timeout);
|
||||
|
|
|
@ -85,7 +85,7 @@ s32 _sys_lwmutex_lock(u32 lwmutex_id, u64 timeout)
|
|||
// protocol is ignored in current implementation
|
||||
mutex->waiters++; assert(mutex->waiters > 0);
|
||||
|
||||
while (!mutex->signals)
|
||||
while (!mutex->signaled)
|
||||
{
|
||||
if (timeout && get_system_time() - start_time > timeout)
|
||||
{
|
||||
|
@ -102,7 +102,7 @@ s32 _sys_lwmutex_lock(u32 lwmutex_id, u64 timeout)
|
|||
mutex->cv.wait_for(lv2_lock, std::chrono::milliseconds(1));
|
||||
}
|
||||
|
||||
mutex->signals--;
|
||||
mutex->signaled--;
|
||||
|
||||
mutex->waiters--; assert(mutex->waiters >= 0);
|
||||
|
||||
|
@ -121,12 +121,12 @@ s32 _sys_lwmutex_trylock(u32 lwmutex_id)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (mutex->waiters || !mutex->signals)
|
||||
if (mutex->waiters || !mutex->signaled)
|
||||
{
|
||||
return CELL_EBUSY;
|
||||
}
|
||||
|
||||
mutex->signals--;
|
||||
mutex->signaled--;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -143,7 +143,7 @@ s32 _sys_lwmutex_unlock(u32 lwmutex_id)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
mutex->signals++;
|
||||
mutex->signaled++;
|
||||
mutex->cv.notify_one();
|
||||
|
||||
return CELL_OK;
|
||||
|
|
|
@ -72,7 +72,7 @@ struct lwmutex_t
|
|||
const u64 name;
|
||||
|
||||
// this object is not truly a mutex and its syscall names are wrong, it's probabably sleep queue or something
|
||||
std::atomic<u32> signals;
|
||||
std::atomic<u32> signaled;
|
||||
|
||||
// TODO: use sleep queue, possibly remove condition variable
|
||||
std::condition_variable cv;
|
||||
|
|
|
@ -110,10 +110,7 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
|||
return CELL_EKRESOURCE;
|
||||
}
|
||||
|
||||
if (!mutex->recursive_count++)
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
mutex->recursive_count++;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -142,7 +139,6 @@ s32 sys_mutex_lock(PPUThread& CPU, u32 mutex_id, u64 timeout)
|
|||
}
|
||||
|
||||
mutex->owner = thread;
|
||||
mutex->recursive_count = 1;
|
||||
mutex->waiters--; assert(mutex->waiters >= 0);
|
||||
|
||||
return CELL_OK;
|
||||
|
@ -172,10 +168,7 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
|
|||
return CELL_EKRESOURCE;
|
||||
}
|
||||
|
||||
if (!mutex->recursive_count++)
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
mutex->recursive_count++;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -189,7 +182,6 @@ s32 sys_mutex_trylock(PPUThread& CPU, u32 mutex_id)
|
|||
}
|
||||
|
||||
mutex->owner = thread;
|
||||
mutex->recursive_count = 1;
|
||||
|
||||
return CELL_OK;
|
||||
}
|
||||
|
@ -214,12 +206,16 @@ s32 sys_mutex_unlock(PPUThread& CPU, u32 mutex_id)
|
|||
return CELL_EPERM;
|
||||
}
|
||||
|
||||
if (!mutex->recursive_count || (!mutex->recursive && mutex->recursive_count != 1))
|
||||
if (mutex->recursive_count)
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
if (!mutex->recursive)
|
||||
{
|
||||
throw __FUNCTION__;
|
||||
}
|
||||
|
||||
mutex->recursive_count--;
|
||||
}
|
||||
|
||||
if (!--mutex->recursive_count)
|
||||
else
|
||||
{
|
||||
mutex->owner.reset();
|
||||
mutex->cv.notify_one();
|
||||
|
|
|
@ -198,11 +198,13 @@ s32 sys_ppu_thread_create(vm::ptr<u64> thread_id, u32 entry, u64 arg, s32 prio,
|
|||
return CELL_OK;
|
||||
}
|
||||
|
||||
std::mutex g_once_mutex;
|
||||
|
||||
void sys_ppu_thread_once(PPUThread& CPU, vm::ptr<atomic_t<u32>> once_ctrl, vm::ptr<void()> init)
|
||||
{
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_once(once_ctrl_addr=0x%x, init_addr=0x%x)", once_ctrl.addr(), init.addr());
|
||||
sys_ppu_thread.Warning("sys_ppu_thread_once(once_ctrl=*0x%x, init=*0x%x)", once_ctrl, init);
|
||||
|
||||
LV2_LOCK;
|
||||
std::lock_guard<std::mutex> lock(g_once_mutex);
|
||||
|
||||
if (once_ctrl->compare_and_swap_test(be_t<u32>::make(SYS_PPU_THREAD_ONCE_INIT), be_t<u32>::make(SYS_PPU_THREAD_DONE_INIT)))
|
||||
{
|
||||
|
|
|
@ -8,17 +8,13 @@ enum : u32
|
|||
SYS_PPU_THREAD_DONE_INIT = 1,
|
||||
};
|
||||
|
||||
enum ppu_thread_flags : u64
|
||||
// PPU Thread Flags
|
||||
enum : u64
|
||||
{
|
||||
SYS_PPU_THREAD_CREATE_JOINABLE = 0x1,
|
||||
SYS_PPU_THREAD_CREATE_INTERRUPT = 0x2,
|
||||
};
|
||||
|
||||
enum stackSize
|
||||
{
|
||||
SYS_PPU_THREAD_STACK_MIN = 0x4000,
|
||||
};
|
||||
|
||||
// Aux
|
||||
u32 ppu_thread_create(u32 entry, u64 arg, s32 prio, u32 stacksize, bool is_joinable, bool is_interrupt, std::string name, std::function<void(PPUThread&)> task = nullptr);
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue