SPU: make vm::check_addr checks safe under vm::range_lock

Reuse some internal locking mechanisms.
Also fix vm::range_lock missing check.
This commit is contained in:
Nekotekina 2020-10-27 22:25:32 +03:00
parent c491b73f3a
commit 86785dffa4
2 changed files with 56 additions and 21 deletions

View file

@ -236,7 +236,7 @@ namespace vm
const auto range = utils::address_range::start_length(addr, size);
// Wait for range locks to clear
while (value)
while (true)
{
const u64 bads = for_all_range_locks([&](u32 addr2, u32 size2)
{
@ -416,14 +416,14 @@ namespace vm
}
}
g_addr_lock = addr | (u64{128} << 32);
if (g_shareable[addr >> 16])
{
// Reservation address in shareable memory range
addr = addr & 0xffff;
}
g_addr_lock = addr | (u64{128} << 32);
const auto range = utils::address_range::start_length(addr, 128);
while (true)
@ -691,10 +691,11 @@ namespace vm
}
u8 start_value = 0xff;
u8 shareable = 0;
for (u32 start = addr / 4096, end = start + size / 4096, i = start; i < end + 1; i++)
{
u8 new_val = 0xff;
u32 new_val = 0xff;
if (i < end)
{
@ -702,16 +703,34 @@ namespace vm
new_val |= flags_set;
new_val &= ~flags_clear;
g_pages[i].flags.release(new_val);
new_val &= (page_readable | page_writable);
shareable = g_shareable[i / 16];
}
if (new_val != start_value)
if (new_val != start_value || g_shareable[i / 16] != shareable)
{
if (u32 page_size = (i - start) * 4096)
{
const auto protection = start_value & page_writable ? utils::protection::rw : (start_value & page_readable ? utils::protection::ro : utils::protection::no);
utils::memory_protect(g_base_addr + start * 4096, page_size, protection);
// Protect range locks from observing changes in memory protection
if (shareable)
{
// Unoptimized
_lock_shareable_cache(2, 0, 0x10000);
}
else
{
_lock_shareable_cache(2, start * 4096, page_size);
}
for (u32 j = start; j < i; j++)
{
g_pages[j].flags.release(new_val);
}
if ((new_val ^ start_value) & (page_readable | page_writable))
{
const auto protection = start_value & page_writable ? utils::protection::rw : (start_value & page_readable ? utils::protection::ro : utils::protection::no);
utils::memory_protect(g_base_addr + start * 4096, page_size, protection);
}
}
start_value = new_val;
@ -719,6 +738,8 @@ namespace vm
}
}
g_addr_lock.release(0);
return true;
}
@ -753,26 +774,28 @@ namespace vm
size += 4096;
}
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (!(g_pages[i].flags.exchange(0) & page_allocated))
{
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
}
}
if (shm && shm->flags() != 0 && (--shm->info || g_shareable[addr >> 16]))
{
// Remove mirror from shareable cache
_lock_shareable_cache(0, addr, size);
_lock_shareable_cache(3, 0, 0x10000);
for (u32 i = addr / 65536; i < addr / 65536 + size / 65536; i++)
{
g_shareable[i].release(0);
}
}
// Unlock
g_addr_lock.release(0);
// Protect range locks from actual memory protection changes
_lock_shareable_cache(3, addr, size);
for (u32 i = addr / 4096; i < addr / 4096 + size / 4096; i++)
{
if (!(g_pages[i].flags & page_allocated))
{
fmt::throw_exception("Concurrent access (addr=0x%x, size=0x%x, current_addr=0x%x)" HERE, addr, size, i * 4096);
}
g_pages[i].flags.release(0);
}
// Notify rsx to invalidate range
@ -805,6 +828,9 @@ namespace vm
utils::memory_decommit(g_stat_addr + addr, size);
}
// Unlock
g_addr_lock.release(0);
return size;
}

View file

@ -13,6 +13,8 @@ namespace vm
extern atomic_t<u64> g_addr_lock;
extern atomic_t<u8> g_shareable[];
// Register reader
void passive_lock(cpu_thread& cpu);
@ -28,7 +30,14 @@ namespace vm
const u64 lock_addr = static_cast<u32>(lock_val); // -> u64
const u32 lock_size = static_cast<u32>(lock_val >> 32);
if (u64{begin} + size <= lock_addr || begin >= lock_addr + lock_size) [[likely]]
u64 addr = begin;
if (g_shareable[begin >> 16])
{
addr = addr & 0xffff;
}
if (addr + size <= lock_addr || addr >= lock_addr + lock_size) [[likely]]
{
// Optimistic locking
range_lock->release(begin | (u64{size} << 32));