mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-04-21 03:55:32 +00:00
Merge branch 'master' into myfix
This commit is contained in:
commit
1631e134d9
19 changed files with 218 additions and 109 deletions
|
@ -2412,6 +2412,13 @@ fs::file fs::make_gather(std::vector<fs::file> files)
|
|||
return result;
|
||||
}
|
||||
|
||||
std::string fs::generate_neighboring_path(std::string_view source, [[maybe_unused]] u64 seed)
|
||||
{
|
||||
// Seed is currently not used
|
||||
|
||||
return fmt::format(u8"%s/$%s.%s.tmp", get_parent_dir(source), source.substr(source.find_last_of(fs::delim) + 1), fmt::base57(utils::get_unique_tsc()));
|
||||
}
|
||||
|
||||
bool fs::pending_file::open(std::string_view path)
|
||||
{
|
||||
file.close();
|
||||
|
@ -2430,7 +2437,7 @@ bool fs::pending_file::open(std::string_view path)
|
|||
|
||||
do
|
||||
{
|
||||
m_path = fmt::format(u8"%s/$%s.%s.tmp", get_parent_dir(path), path.substr(path.find_last_of(fs::delim) + 1), fmt::base57(utils::get_unique_tsc()));
|
||||
m_path = fs::generate_neighboring_path(path, 0);
|
||||
|
||||
if (file.open(m_path, fs::create + fs::write + fs::read + fs::excl))
|
||||
{
|
||||
|
@ -2563,21 +2570,52 @@ bool fs::pending_file::commit(bool overwrite)
|
|||
file.close();
|
||||
|
||||
#ifdef _WIN32
|
||||
const auto ws2 = to_wchar(m_dest);
|
||||
const auto wdest = to_wchar(m_dest);
|
||||
|
||||
bool ok = false;
|
||||
|
||||
if (hardlink_paths.empty())
|
||||
{
|
||||
ok = MoveFileExW(ws1.get(), ws2.get(), overwrite ? MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH : MOVEFILE_WRITE_THROUGH);
|
||||
ok = MoveFileExW(ws1.get(), wdest.get(), overwrite ? MOVEFILE_REPLACE_EXISTING | MOVEFILE_WRITE_THROUGH : MOVEFILE_WRITE_THROUGH);
|
||||
}
|
||||
else
|
||||
{
|
||||
ok = ReplaceFileW(ws1.get(), ws2.get(), nullptr, 0, nullptr, nullptr);
|
||||
ok = ReplaceFileW(ws1.get(), wdest.get(), nullptr, 0, nullptr, nullptr);
|
||||
}
|
||||
|
||||
if (ok)
|
||||
{
|
||||
for (const std::wstring& link_name : hardlink_paths)
|
||||
{
|
||||
std::unique_ptr<wchar_t[]> write_temp_path;
|
||||
|
||||
do
|
||||
{
|
||||
write_temp_path = to_wchar(fs::generate_neighboring_path(m_dest, 0));
|
||||
|
||||
// Generate a temporary hard linke
|
||||
if (CreateHardLinkW(wdest.get(), write_temp_path.get(), nullptr))
|
||||
{
|
||||
if (MoveFileExW(write_temp_path.get(), link_name.data(), MOVEFILE_REPLACE_EXISTING))
|
||||
{
|
||||
// Success
|
||||
write_temp_path.reset();
|
||||
break;
|
||||
}
|
||||
|
||||
break;
|
||||
}
|
||||
}
|
||||
while (fs::g_tls_error == fs::error::exist); // Only retry if failed due to existing file
|
||||
|
||||
if (write_temp_path)
|
||||
{
|
||||
// Failure
|
||||
g_tls_error = to_error(GetLastError());
|
||||
return false;
|
||||
}
|
||||
}
|
||||
|
||||
// Disable the destructor
|
||||
m_path.clear();
|
||||
return true;
|
||||
|
@ -2639,6 +2677,17 @@ void fmt_class_string<fs::seek_mode>::format(std::string& out, u64 arg)
|
|||
template<>
|
||||
void fmt_class_string<fs::error>::format(std::string& out, u64 arg)
|
||||
{
|
||||
if (arg == static_cast<u64>(fs::error::unknown))
|
||||
{
|
||||
// Note: may not be the correct error code because it only prints the last
|
||||
#ifdef _WIN32
|
||||
fmt::append(out, "Unknown error [errno=%d]", GetLastError());
|
||||
#else
|
||||
fmt::append(out, "Unknown error [errno=%d]", errno);
|
||||
#endif
|
||||
return;
|
||||
}
|
||||
|
||||
format_enum(out, arg, [](auto arg)
|
||||
{
|
||||
switch (arg)
|
||||
|
|
|
@ -601,6 +601,8 @@ namespace fs
|
|||
// Temporary directory
|
||||
const std::string& get_temp_dir();
|
||||
|
||||
std::string generate_neighboring_path(std::string_view source, u64 seed);
|
||||
|
||||
// Unique pending file creation destined to be renamed to the destination file
|
||||
struct pending_file
|
||||
{
|
||||
|
|
|
@ -472,7 +472,7 @@ error_code cellSyncQueuePush(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue, vm::
|
|||
|
||||
u32 position;
|
||||
|
||||
while (!queue->ctrl.atomic_op([&](auto& ctrl)
|
||||
while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl)
|
||||
{
|
||||
return CellSyncQueue::try_push_begin(ctrl, depth, &position);
|
||||
}))
|
||||
|
@ -509,7 +509,7 @@ error_code cellSyncQueueTryPush(vm::ptr<CellSyncQueue> queue, vm::cptr<void> buf
|
|||
|
||||
u32 position;
|
||||
|
||||
while (!queue->ctrl.atomic_op([&](auto& ctrl)
|
||||
while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl)
|
||||
{
|
||||
return CellSyncQueue::try_push_begin(ctrl, depth, &position);
|
||||
}))
|
||||
|
@ -543,7 +543,7 @@ error_code cellSyncQueuePop(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue, vm::p
|
|||
|
||||
u32 position;
|
||||
|
||||
while (!queue->ctrl.atomic_op([&](auto& ctrl)
|
||||
while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl)
|
||||
{
|
||||
return CellSyncQueue::try_pop_begin(ctrl, depth, &position);
|
||||
}))
|
||||
|
@ -580,7 +580,7 @@ error_code cellSyncQueueTryPop(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buffe
|
|||
|
||||
u32 position;
|
||||
|
||||
while (!queue->ctrl.atomic_op([&](auto& ctrl)
|
||||
while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl)
|
||||
{
|
||||
return CellSyncQueue::try_pop_begin(ctrl, depth, &position);
|
||||
}))
|
||||
|
@ -614,7 +614,7 @@ error_code cellSyncQueuePeek(ppu_thread& ppu, vm::ptr<CellSyncQueue> queue, vm::
|
|||
|
||||
u32 position;
|
||||
|
||||
while (!queue->ctrl.atomic_op([&](auto& ctrl)
|
||||
while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl)
|
||||
{
|
||||
return CellSyncQueue::try_peek_begin(ctrl, depth, &position);
|
||||
}))
|
||||
|
@ -651,7 +651,7 @@ error_code cellSyncQueueTryPeek(vm::ptr<CellSyncQueue> queue, vm::ptr<void> buff
|
|||
|
||||
u32 position;
|
||||
|
||||
while (!queue->ctrl.atomic_op([&](auto& ctrl)
|
||||
while (!queue->ctrl.atomic_op([&](CellSyncQueue::ctrl_t& ctrl)
|
||||
{
|
||||
return CellSyncQueue::try_peek_begin(ctrl, depth, &position);
|
||||
}))
|
||||
|
|
|
@ -543,9 +543,7 @@ class spu_llvm_recompiler : public spu_recompiler_base, public cpu_translator
|
|||
template <typename T = u8>
|
||||
llvm::Value* _ptr(llvm::Value* base, llvm::Value* offset)
|
||||
{
|
||||
const auto off = m_ir->CreateGEP(get_type<u8>(), base, offset);
|
||||
const auto ptr = m_ir->CreateBitCast(off, get_type<T*>());
|
||||
return ptr;
|
||||
return m_ir->CreateGEP(get_type<u8>(), base, offset);
|
||||
}
|
||||
|
||||
template <typename T, typename... Args>
|
||||
|
|
|
@ -2698,7 +2698,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
|
|||
|
||||
bool ok = false;
|
||||
|
||||
std::tie(old, ok) = bits->fetch_op([&](auto& v)
|
||||
std::tie(old, ok) = bits->fetch_op([&](u128& v)
|
||||
{
|
||||
if (v & wmask)
|
||||
{
|
||||
|
@ -2796,7 +2796,7 @@ void spu_thread::do_dma_transfer(spu_thread* _this, const spu_mfc_cmd& args, u8*
|
|||
res += 127;
|
||||
|
||||
// Release bits and notify
|
||||
bits->atomic_op([&](auto& v)
|
||||
bits->atomic_op([&](u128& v)
|
||||
{
|
||||
v &= ~wmask;
|
||||
});
|
||||
|
|
|
@ -1852,7 +1852,7 @@ void lv2_obj::schedule_all(u64 current_time)
|
|||
|
||||
target->start_time = 0;
|
||||
|
||||
if ((target->state.fetch_op(FN(x += cpu_flag::signal, x -= cpu_flag::suspend, x-= remove_yield, void())) & (cpu_flag::wait + cpu_flag::signal)) != cpu_flag::wait)
|
||||
if ((target->state.fetch_op(AOFN(x += cpu_flag::signal, x -= cpu_flag::suspend, x-= remove_yield, void())) & (cpu_flag::wait + cpu_flag::signal)) != cpu_flag::wait)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
|
|
@ -771,7 +771,7 @@ error_code sys_event_port_send(u32 eport_id, u64 data1, u64 data2, u64 data3)
|
|||
return CELL_ESRCH;
|
||||
}
|
||||
|
||||
if (ppu && port->queue->type == SYS_PPU_QUEUE && notified_thread)
|
||||
if (ppu && notified_thread)
|
||||
{
|
||||
// Wait to be requeued
|
||||
if (ppu->test_stopped())
|
||||
|
|
|
@ -142,7 +142,7 @@ error_code _sys_lwmutex_lock(ppu_thread& ppu, u32 lwmutex_id, u64 timeout)
|
|||
|
||||
const auto mutex = idm::get<lv2_obj, lv2_lwmutex>(lwmutex_id, [&, notify = lv2_obj::notify_all_t()](lv2_lwmutex& mutex)
|
||||
{
|
||||
if (s32 signal = mutex.lv2_control.fetch_op([](auto& data)
|
||||
if (s32 signal = mutex.lv2_control.fetch_op([](lv2_lwmutex::control_data_t& data)
|
||||
{
|
||||
if (data.signaled)
|
||||
{
|
||||
|
@ -297,7 +297,7 @@ error_code _sys_lwmutex_trylock(ppu_thread& ppu, u32 lwmutex_id)
|
|||
|
||||
const auto mutex = idm::check<lv2_obj, lv2_lwmutex>(lwmutex_id, [&](lv2_lwmutex& mutex)
|
||||
{
|
||||
auto [_, ok] = mutex.lv2_control.fetch_op([](auto& data)
|
||||
auto [_, ok] = mutex.lv2_control.fetch_op([](lv2_lwmutex::control_data_t& data)
|
||||
{
|
||||
if (data.signaled & 1)
|
||||
{
|
||||
|
|
|
@ -156,7 +156,7 @@ error_code sys_ss_random_number_generator(u64 pkg_id, vm::ptr<void> buf, u64 siz
|
|||
|
||||
error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3)
|
||||
{
|
||||
sys_ss.todo("sys_ss_access_control_engine(pkg_id=0x%llx, a2=0x%llx, a3=0x%llx)", pkg_id, a2, a3);
|
||||
sys_ss.success("sys_ss_access_control_engine(pkg_id=0x%llx, a2=0x%llx, a3=0x%llx)", pkg_id, a2, a3);
|
||||
|
||||
const u64 authid = g_ps3_process_info.self_info.valid ?
|
||||
g_ps3_process_info.self_info.prog_id_hdr.program_authority_id : 0;
|
||||
|
@ -167,7 +167,7 @@ error_code sys_ss_access_control_engine(u64 pkg_id, u64 a2, u64 a3)
|
|||
{
|
||||
if (!g_ps3_process_info.debug_or_root())
|
||||
{
|
||||
return CELL_ENOSYS;
|
||||
return not_an_error(CELL_ENOSYS);
|
||||
}
|
||||
|
||||
if (!a2)
|
||||
|
|
|
@ -945,7 +945,7 @@ namespace vm
|
|||
return true;
|
||||
}
|
||||
|
||||
static u32 _page_unmap(u32 addr, u32 max_size, u64 bflags, utils::shm* shm)
|
||||
static u32 _page_unmap(u32 addr, u32 max_size, u64 bflags, utils::shm* shm, std::vector<std::pair<u64, u64>>& unmap_events)
|
||||
{
|
||||
perf_meter<"PAGE_UNm"_u64> perf0;
|
||||
|
||||
|
@ -1009,7 +1009,7 @@ namespace vm
|
|||
// the RSX might try to call VirtualProtect on memory that is already unmapped
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||
{
|
||||
rsxthr->on_notify_pre_memory_unmapped(addr, size);
|
||||
rsxthr->on_notify_pre_memory_unmapped(addr, size, unmap_events);
|
||||
}
|
||||
|
||||
// Deregister PPU related data
|
||||
|
@ -1309,7 +1309,7 @@ namespace vm
|
|||
}
|
||||
}
|
||||
|
||||
bool block_t::unmap(std::vector<std::pair<u32, u32>>* unmapped)
|
||||
bool block_t::unmap(std::vector<std::pair<u64, u64>>* unmapped)
|
||||
{
|
||||
auto& m_map = (m.*block_map)();
|
||||
|
||||
|
@ -1320,12 +1320,9 @@ namespace vm
|
|||
{
|
||||
const auto next = std::next(it);
|
||||
const auto size = it->second.first;
|
||||
auto unmap = std::make_pair(it->first, _page_unmap(it->first, size, this->flags, it->second.second.get()));
|
||||
|
||||
if (unmapped)
|
||||
{
|
||||
unmapped->emplace_back(unmap);
|
||||
}
|
||||
std::vector<std::pair<u64, u64>> event_data;
|
||||
ensure(size == _page_unmap(it->first, size, this->flags, it->second.second.get(), unmapped ? *unmapped : event_data));
|
||||
|
||||
it = next;
|
||||
}
|
||||
|
@ -1488,14 +1485,16 @@ namespace vm
|
|||
{
|
||||
struct notify_t
|
||||
{
|
||||
u32 addr{};
|
||||
u32 size{};
|
||||
std::vector<std::pair<u64, u64>> event_data;
|
||||
|
||||
~notify_t() noexcept
|
||||
{
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>(); rsxthr && size)
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||
{
|
||||
rsxthr->on_notify_post_memory_unmapped(addr, size);
|
||||
for (const auto [event_data1, event_data2] : event_data)
|
||||
{
|
||||
rsxthr->on_notify_post_memory_unmapped(event_data1, event_data2);
|
||||
}
|
||||
}
|
||||
}
|
||||
} unmap_notification;
|
||||
|
@ -1525,7 +1524,7 @@ namespace vm
|
|||
}
|
||||
|
||||
// Unmap "real" memory pages
|
||||
ensure(size == _page_unmap(addr, size, this->flags, found->second.second.get()));
|
||||
ensure(size == _page_unmap(addr, size, this->flags, found->second.second.get(), unmap_notification.event_data));
|
||||
|
||||
// Clear stack guards
|
||||
if (flags & stack_guarded)
|
||||
|
@ -1537,8 +1536,6 @@ namespace vm
|
|||
// Remove entry
|
||||
m_map.erase(found);
|
||||
|
||||
unmap_notification.size = size;
|
||||
unmap_notification.addr = addr;
|
||||
return size;
|
||||
}
|
||||
}
|
||||
|
@ -1837,7 +1834,7 @@ namespace vm
|
|||
}
|
||||
}
|
||||
|
||||
bool _unmap_block(const std::shared_ptr<block_t>& block, std::vector<std::pair<u32, u32>>* unmapped = nullptr)
|
||||
bool _unmap_block(const std::shared_ptr<block_t>& block, std::vector<std::pair<u64, u64>>* unmapped = nullptr)
|
||||
{
|
||||
return block->unmap(unmapped);
|
||||
}
|
||||
|
@ -1988,15 +1985,15 @@ namespace vm
|
|||
|
||||
struct notify_t
|
||||
{
|
||||
std::vector<std::pair<u32, u32>> addr_size_pairs;
|
||||
std::vector<std::pair<u64, u64>> unmap_data;
|
||||
|
||||
~notify_t() noexcept
|
||||
{
|
||||
for (const auto [addr, size] : addr_size_pairs)
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||
{
|
||||
if (auto rsxthr = g_fxo->try_get<rsx::thread>())
|
||||
for (const auto [event_data1, event_data2] : unmap_data)
|
||||
{
|
||||
rsxthr->on_notify_post_memory_unmapped(addr, size);
|
||||
rsxthr->on_notify_post_memory_unmapped(event_data1, event_data2);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -2031,7 +2028,7 @@ namespace vm
|
|||
|
||||
result.first = std::move(*it);
|
||||
g_locations.erase(it);
|
||||
ensure(_unmap_block(result.first, &unmap_notifications.addr_size_pairs));
|
||||
ensure(_unmap_block(result.first, &unmap_notifications.unmap_data));
|
||||
result.second = true;
|
||||
return result;
|
||||
}
|
||||
|
|
|
@ -133,8 +133,8 @@ namespace vm
|
|||
bool try_alloc(u32 addr, u64 bflags, u32 size, std::shared_ptr<utils::shm>&&) const;
|
||||
|
||||
// Unmap block
|
||||
bool unmap(std::vector<std::pair<u32, u32>>* unmapped = nullptr);
|
||||
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u32, u32>>* unmapped);
|
||||
bool unmap(std::vector<std::pair<u64, u64>>* unmapped = nullptr);
|
||||
friend bool _unmap_block(const std::shared_ptr<block_t>&, std::vector<std::pair<u64, u64>>* unmapped);
|
||||
|
||||
public:
|
||||
block_t(u32 addr, u32 size, u64 flags);
|
||||
|
|
|
@ -1214,7 +1214,7 @@ void GLGSRender::notify_tile_unbound(u32 tile)
|
|||
if (false)
|
||||
{
|
||||
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size);
|
||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size, *std::make_unique<std::vector<std::pair<u64, u64>>>());
|
||||
m_rtts.invalidate_surface_address(addr, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -3508,10 +3508,71 @@ namespace rsx
|
|||
}
|
||||
}
|
||||
|
||||
void thread::on_notify_pre_memory_unmapped(u32 address, u32 size)
|
||||
void thread::on_notify_pre_memory_unmapped(u32 address, u32 size, std::vector<std::pair<u64, u64>>& event_data)
|
||||
{
|
||||
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
||||
{
|
||||
// Each bit represents io entry to be unmapped
|
||||
u64 unmap_status[512 / 64]{};
|
||||
|
||||
for (u32 ea = address >> 20, end = ea + (size >> 20); ea < end; ea++)
|
||||
{
|
||||
const u32 io = utils::rol32(iomap_table.io[ea], 32 - 20);
|
||||
|
||||
if (io + 1)
|
||||
{
|
||||
unmap_status[io / 64] |= 1ull << (io & 63);
|
||||
iomap_table.io[ea].release(-1);
|
||||
iomap_table.ea[io].release(-1);
|
||||
}
|
||||
}
|
||||
|
||||
auto& cfg = g_fxo->get<gcm_config>();
|
||||
|
||||
std::unique_lock<shared_mutex> hle_lock;
|
||||
|
||||
for (u32 i = 0; i < std::size(unmap_status); i++)
|
||||
{
|
||||
// TODO: Check order when sending multiple events
|
||||
if (u64 to_unmap = unmap_status[i])
|
||||
{
|
||||
if (isHLE)
|
||||
{
|
||||
if (!hle_lock)
|
||||
{
|
||||
hle_lock = std::unique_lock{cfg.gcmio_mutex};
|
||||
}
|
||||
|
||||
int bit = 0;
|
||||
|
||||
while (to_unmap)
|
||||
{
|
||||
bit = (std::countr_zero<u64>(utils::rol64(to_unmap, 0 - bit)) + bit);
|
||||
to_unmap &= ~(1ull << bit);
|
||||
|
||||
constexpr u16 null_entry = 0xFFFF;
|
||||
const u32 ea = std::exchange(cfg.offsetTable.eaAddress[(i * 64 + bit)], null_entry);
|
||||
|
||||
if (ea < (rsx::constants::local_mem_base >> 20))
|
||||
{
|
||||
cfg.offsetTable.eaAddress[ea] = null_entry;
|
||||
}
|
||||
}
|
||||
|
||||
continue;
|
||||
}
|
||||
|
||||
// Each 64 entries are grouped by a bit
|
||||
const u64 io_event = SYS_RSX_EVENT_UNMAPPED_BASE << i;
|
||||
event_data.emplace_back(io_event, to_unmap);
|
||||
}
|
||||
}
|
||||
|
||||
if (hle_lock)
|
||||
{
|
||||
hle_lock.unlock();
|
||||
}
|
||||
|
||||
// Pause RSX thread momentarily to handle unmapping
|
||||
eng_lock elock(this);
|
||||
|
||||
|
@ -3541,57 +3602,11 @@ namespace rsx
|
|||
}
|
||||
}
|
||||
|
||||
void thread::on_notify_post_memory_unmapped(u32 address, u32 size)
|
||||
void thread::on_notify_post_memory_unmapped(u64 event_data1, u64 event_data2)
|
||||
{
|
||||
if (rsx_thread_running && address < rsx::constants::local_mem_base)
|
||||
if (!isHLE)
|
||||
{
|
||||
if (!isHLE)
|
||||
{
|
||||
// Each bit represents io entry to be unmapped
|
||||
u64 unmap_status[512 / 64]{};
|
||||
|
||||
for (u32 ea = address >> 20, end = ea + (size >> 20); ea < end; ea++)
|
||||
{
|
||||
const u32 io = utils::rol32(iomap_table.io[ea], 32 - 20);
|
||||
|
||||
if (io + 1)
|
||||
{
|
||||
unmap_status[io / 64] |= 1ull << (io & 63);
|
||||
iomap_table.ea[io].release(-1);
|
||||
iomap_table.io[ea].release(-1);
|
||||
}
|
||||
}
|
||||
|
||||
for (u32 i = 0; i < std::size(unmap_status); i++)
|
||||
{
|
||||
// TODO: Check order when sending multiple events
|
||||
if (u64 to_unmap = unmap_status[i])
|
||||
{
|
||||
// Each 64 entries are grouped by a bit
|
||||
const u64 io_event = SYS_RSX_EVENT_UNMAPPED_BASE << i;
|
||||
send_event(0, io_event, to_unmap);
|
||||
}
|
||||
}
|
||||
}
|
||||
else
|
||||
{
|
||||
// TODO: Fix this
|
||||
u32 ea = address >> 20, io = iomap_table.io[ea];
|
||||
|
||||
if (io + 1)
|
||||
{
|
||||
io >>= 20;
|
||||
|
||||
auto& cfg = g_fxo->get<gcm_config>();
|
||||
std::lock_guard lock(cfg.gcmio_mutex);
|
||||
|
||||
for (const u32 end = ea + (size >> 20); ea < end;)
|
||||
{
|
||||
cfg.offsetTable.ioAddress[ea++] = 0xFFFF;
|
||||
cfg.offsetTable.eaAddress[io++] = 0xFFFF;
|
||||
}
|
||||
}
|
||||
}
|
||||
send_event(0, event_data1, event_data2);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -502,14 +502,15 @@ namespace rsx
|
|||
/**
|
||||
* Notify that a section of memory is to be unmapped
|
||||
* Any data held in the defined range is discarded
|
||||
* Sets optional unmap event data
|
||||
*/
|
||||
void on_notify_pre_memory_unmapped(u32 address_base, u32 size);
|
||||
void on_notify_pre_memory_unmapped(u32 address_base, u32 size, std::vector<std::pair<u64, u64>>& event_data);
|
||||
|
||||
/**
|
||||
* Notify that a section of memory has been unmapped
|
||||
* Any data held in the defined range is discarded
|
||||
*/
|
||||
void on_notify_post_memory_unmapped(u32 address_base, u32 size);
|
||||
void on_notify_post_memory_unmapped(u64 event_data1, u64 event_data2);
|
||||
|
||||
/**
|
||||
* Notify to check internal state during semaphore wait
|
||||
|
|
|
@ -1257,7 +1257,7 @@ void VKGSRender::notify_tile_unbound(u32 tile)
|
|||
if (false)
|
||||
{
|
||||
u32 addr = rsx::get_address(tiles[tile].offset, tiles[tile].location);
|
||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size);
|
||||
on_notify_pre_memory_unmapped(addr, tiles[tile].size, *std::make_unique<std::vector<std::pair<u64, u64>>>());
|
||||
m_rtts.invalidate_surface_address(addr, false);
|
||||
}
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ namespace rpcs3
|
|||
// Currently accessible by Windows and Linux build scripts, see implementations when doing MACOSX
|
||||
const utils::version& get_version()
|
||||
{
|
||||
static constexpr utils::version version{ 0, 0, 33, utils::version_type::alpha, 1, RPCS3_GIT_VERSION };
|
||||
static constexpr utils::version version{ 0, 0, 34, utils::version_type::alpha, 1, RPCS3_GIT_VERSION };
|
||||
return version;
|
||||
}
|
||||
|
||||
|
|
|
@ -160,9 +160,17 @@ bool gui_application::Init()
|
|||
if (m_gui_settings->GetValue(gui::ib_show_welcome).toBool())
|
||||
{
|
||||
welcome_dialog* welcome = new welcome_dialog(m_gui_settings, false);
|
||||
|
||||
bool use_dark_theme = false;
|
||||
|
||||
connect(welcome, &QDialog::accepted, this, [&]()
|
||||
{
|
||||
use_dark_theme = welcome->does_user_want_dark_theme();
|
||||
});
|
||||
|
||||
welcome->exec();
|
||||
|
||||
if (welcome->does_user_want_dark_theme())
|
||||
if (use_dark_theme)
|
||||
{
|
||||
m_gui_settings->SetValue(gui::m_currentStylesheet, "Darker Style by TheMitoSan");
|
||||
}
|
||||
|
|
|
@ -434,7 +434,7 @@ static u32 cond_alloc(uptr iptr, u32 tls_slot = -1)
|
|||
});
|
||||
|
||||
// Set lowest clear bit
|
||||
const u64 bits = s_cond_bits[level3].fetch_op(FN(x |= x + 1, void()));
|
||||
const u64 bits = s_cond_bits[level3].fetch_op(AOFN(x |= x + 1, void()));
|
||||
|
||||
// Find lowest clear bit (before it was set in fetch_op)
|
||||
const u32 id = level3 * 64 + std::countr_one(bits);
|
||||
|
@ -503,9 +503,9 @@ static void cond_free(u32 cond_id, u32 tls_slot = -1)
|
|||
// Release the semaphore tree in the reverse order
|
||||
s_cond_bits[cond_id / 64] &= ~(1ull << (cond_id % 64));
|
||||
|
||||
s_cond_sem3[level2].atomic_op(FN(x -= u128{1} << (level3 * 7)));
|
||||
s_cond_sem2[level1].atomic_op(FN(x -= u128{1} << (level2 * 11)));
|
||||
s_cond_sem1.atomic_op(FN(x -= u128{1} << (level1 * 14)));
|
||||
s_cond_sem3[level2].atomic_op(AOFN(x -= u128{1} << (level3 * 7)));
|
||||
s_cond_sem2[level1].atomic_op(AOFN(x -= u128{1} << (level2 * 11)));
|
||||
s_cond_sem1.atomic_op(AOFN(x -= u128{1} << (level1 * 14)));
|
||||
}
|
||||
|
||||
static cond_handle* cond_id_lock(u32 cond_id, uptr iptr = 0)
|
||||
|
@ -674,19 +674,28 @@ u64 utils::get_unique_tsc()
|
|||
{
|
||||
const u64 stamp0 = utils::get_tsc();
|
||||
|
||||
return s_min_tsc.atomic_op([&](u64& tsc)
|
||||
if (!s_min_tsc.fetch_op([=](u64& tsc)
|
||||
{
|
||||
if (stamp0 <= s_min_tsc)
|
||||
if (stamp0 <= tsc)
|
||||
{
|
||||
// Add 1 if new stamp is too old
|
||||
return ++tsc;
|
||||
return false;
|
||||
}
|
||||
else
|
||||
{
|
||||
// Update last tsc with new stamp otherwise
|
||||
return ((tsc = stamp0));
|
||||
tsc = stamp0;
|
||||
return true;
|
||||
}
|
||||
});
|
||||
}).second)
|
||||
{
|
||||
// Add 1 if new stamp is too old
|
||||
// Avoid doing it in the atomic operaion because, if it gets here it means there is already much cntention
|
||||
// So break the race (at least on x86)
|
||||
return s_min_tsc.add_fetch(1);
|
||||
}
|
||||
|
||||
return stamp0;
|
||||
}
|
||||
|
||||
atomic_t<u16>* root_info::slot_alloc(uptr ptr) noexcept
|
||||
|
|
|
@ -1233,6 +1233,7 @@ public:
|
|||
|
||||
// Atomic operation; returns old value, or pair of old value and return value (cancel op if evaluates to false)
|
||||
template <typename F, typename RT = std::invoke_result_t<F, T&>>
|
||||
requires (!std::is_invocable_v<F, const T> && !std::is_invocable_v<F, volatile T>)
|
||||
std::conditional_t<std::is_void_v<RT>, type, std::pair<type, RT>> fetch_op(F func)
|
||||
{
|
||||
type _new, old = atomic_storage<type>::load(m_data);
|
||||
|
@ -1264,6 +1265,7 @@ public:
|
|||
|
||||
// Atomic operation; returns function result value, function is the lambda
|
||||
template <typename F, typename RT = std::invoke_result_t<F, T&>>
|
||||
requires (!std::is_invocable_v<F, const T> && !std::is_invocable_v<F, volatile T>)
|
||||
RT atomic_op(F func)
|
||||
{
|
||||
type _new, old = atomic_storage<type>::load(m_data);
|
||||
|
@ -1798,3 +1800,31 @@ struct std::common_type<T, atomic_t<T2, Align2>> : std::common_type<std::common_
|
|||
#pragma GCC diagnostic pop
|
||||
#pragma GCC diagnostic pop
|
||||
#endif
|
||||
|
||||
namespace utils
|
||||
{
|
||||
template <typename F>
|
||||
struct aofn_helper
|
||||
{
|
||||
F f;
|
||||
|
||||
aofn_helper(F&& f) noexcept
|
||||
: f(std::forward<F>(f))
|
||||
{
|
||||
}
|
||||
|
||||
template <typename Arg> requires (std::is_same_v<std::remove_reference_t<Arg>, std::remove_cvref_t<Arg>> && !std::is_rvalue_reference_v<Arg>)
|
||||
auto operator()(Arg& arg) const noexcept
|
||||
{
|
||||
return f(std::forward<Arg&>(arg));
|
||||
}
|
||||
};
|
||||
|
||||
template <typename F>
|
||||
aofn_helper(F&& f) -> aofn_helper<F>;
|
||||
}
|
||||
|
||||
// Shorter lambda for non-cv qualified L-values
|
||||
// For use with atomic operations
|
||||
#define AOFN(...) \
|
||||
::utils::aofn_helper([&](auto& x) { return (__VA_ARGS__); })
|
||||
|
|
Loading…
Add table
Reference in a new issue