mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-19 19:15:26 +00:00
PPU/cellSpurs: MGS4: Fix cellSpursAddUrgentCommand race condition
cellSpursAddUrgentCommand searches in 4 slots for an empty slot to put the command at. At first, it seems to do so unordered. Meanwhile, on SPU side, it expects an order between all the commands because it pops them it in FIFO manner. Not keeping track of how many commands are queued in total. After second observation of cellSpursAddUrgentCommand, something odd comes takes places here. Usually, reservation loops are individual and are expected to be closed without any changes of the previous loop affected by the proceeding one. But in this case, after a single failure, the entire operayion is reset, a loop of 4 reservation operations suddenly is reset completely. This makes one wonder if it the HW expects sometjing else here, perhaps it caches the reservation internally here? After some adjustments to LDARX and STDCX to cache the reservation between succeeding loops, Metal Gear Solid 4 no longer freezes!
This commit is contained in:
parent
91b1ec6d5c
commit
047f71b434
3 changed files with 25 additions and 2 deletions
|
@ -484,6 +484,7 @@ auto ppu_feed_data(ppu_thread& ppu, u64 addr)
|
|||
{
|
||||
// Reservation was lost
|
||||
ppu.raddr = 0;
|
||||
ppu.res_cached = 0;
|
||||
}
|
||||
}
|
||||
else
|
||||
|
@ -503,6 +504,7 @@ auto ppu_feed_data(ppu_thread& ppu, u64 addr)
|
|||
if (std::memcmp(buffer + offs, src, size))
|
||||
{
|
||||
ppu.raddr = 0;
|
||||
ppu.res_cached = 0;
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -3103,7 +3103,18 @@ static T ppu_load_acquire_reservation(ppu_thread& ppu, u32 addr)
|
|||
ppu.last_faddr = 0;
|
||||
}
|
||||
|
||||
ppu.rtime = vm::reservation_acquire(addr) & -128;
|
||||
const u32 res_cached = ppu.res_cached;
|
||||
|
||||
if ((addr & -128) == (res_cached & -128))
|
||||
{
|
||||
// Reload "cached" reservation of previous succeeded conditional store
|
||||
// This seems like a hardware feature according to cellSpursAddUrgentCommand function
|
||||
ppu.rtime -= 128;
|
||||
}
|
||||
else
|
||||
{
|
||||
ppu.rtime = vm::reservation_acquire(addr) & -128;
|
||||
}
|
||||
|
||||
be_t<u64> rdata;
|
||||
|
||||
|
@ -3376,7 +3387,7 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||
}
|
||||
|
||||
// Test if store address is on the same aligned 8-bytes memory as load
|
||||
if (const u32 raddr = std::exchange(ppu.raddr, 0); raddr / 8 != addr / 8)
|
||||
if (const u32 raddr = ppu.raddr; raddr / 8 != addr / 8)
|
||||
{
|
||||
// If not and it is on the same aligned 128-byte memory, proceed only if 128-byte reservations are enabled
|
||||
// In realhw the store address can be at any address of the 128-byte cache line
|
||||
|
@ -3389,12 +3400,16 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||
data += 0;
|
||||
}
|
||||
|
||||
ppu.raddr = 0;
|
||||
ppu.res_cached = 0;
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
if (old_data != data || rtime != (res & -128))
|
||||
{
|
||||
ppu.raddr = 0;
|
||||
ppu.res_cached = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -3650,6 +3665,9 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||
}
|
||||
|
||||
ppu.last_faddr = 0;
|
||||
ppu.res_cached = ppu.raddr;
|
||||
ppu.rtime += 128;
|
||||
ppu.raddr = 0;
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -3669,6 +3687,8 @@ static bool ppu_store_reservation(ppu_thread& ppu, u32 addr, u64 reg_value)
|
|||
ppu.res_notify = 0;
|
||||
}
|
||||
|
||||
ppu.raddr = 0;
|
||||
ppu.res_cached = 0;
|
||||
return false;
|
||||
}
|
||||
|
||||
|
|
|
@ -264,6 +264,7 @@ public:
|
|||
u64 rtime{0};
|
||||
alignas(64) std::byte rdata[128]{}; // Reservation data
|
||||
bool use_full_rdata{};
|
||||
u32 res_cached{0}; // Reservation "cached" addresss
|
||||
u32 res_notify{0};
|
||||
u64 res_notify_time{0};
|
||||
|
||||
|
|
Loading…
Add table
Reference in a new issue