diff --git a/rpcs3/Emu/Cell/SPUThread.cpp b/rpcs3/Emu/Cell/SPUThread.cpp index 2468e72c09..42e186a2e3 100644 --- a/rpcs3/Emu/Cell/SPUThread.cpp +++ b/rpcs3/Emu/Cell/SPUThread.cpp @@ -5659,6 +5659,24 @@ s64 spu_thread::get_ch_value(u32 ch) // Don't busy-wait with TSX - memory is sensitive if (g_use_rtm || !reservation_busy_waiting) { + if (u32 max_threads = std::min(g_cfg.core.max_spurs_threads, group->max_num); group->max_run != max_threads) + { + constexpr std::string_view spurs_suffix = "CellSpursKernelGroup"sv; + + if (group->name.ends_with(spurs_suffix) && !group->name.substr(0, group->name.size() - spurs_suffix.size()).ends_with("_libsail")) + { + // Hack: don't run more SPURS threads than specified. + if (u32 old = atomic_storage::exchange(group->max_run, max_threads); old > max_threads) + { + spu_log.success("HACK: '%s' (0x%x) limited to %u threads.", group->name, group->id, max_threads); + } + else if (u32 running = group->spurs_running; old < max_threads && running >= old && running < max_threads) + { + group->spurs_running.notify_all(); + } + } + } + if (u32 work_count = g_spu_work_count) { const u32 true_free = utils::sub_saturate(utils::get_thread_count(), 10); diff --git a/rpcs3/Emu/Cell/lv2/sys_spu.cpp b/rpcs3/Emu/Cell/lv2/sys_spu.cpp index 150d755536..87cff4ea34 100644 --- a/rpcs3/Emu/Cell/lv2/sys_spu.cpp +++ b/rpcs3/Emu/Cell/lv2/sys_spu.cpp @@ -731,14 +731,16 @@ error_code sys_spu_thread_initialize(ppu_thread& ppu, vm::ptr thread, u32 g if (++group->init == group->max_num) { - if (g_cfg.core.max_spurs_threads < 6 && group->max_num > 0u + g_cfg.core.max_spurs_threads) + if (u32 max_threads = std::min(g_cfg.core.max_spurs_threads, group->max_num); group->max_num > max_threads) { - if (group->name.ends_with("CellSpursKernelGroup") && !group->name.ends_with("_libsailCellSpursKernelGroup")) + constexpr std::string_view spurs_suffix = "CellSpursKernelGroup"sv; + + if (group->name.ends_with(spurs_suffix) && !group->name.substr(0, group->name.size() - spurs_suffix.size()).ends_with("_libsail")) { // Hack: don't run more SPURS threads than specified. - group->max_run = g_cfg.core.max_spurs_threads; + group->max_run = max_threads; - spu_log.success("HACK: '%s' (0x%x) limited to %u threads.", group->name, group_id, +g_cfg.core.max_spurs_threads); + spu_log.success("HACK: '%s' (0x%x) limited to %u threads.", group->name, group_id, max_threads); } } diff --git a/rpcs3/Emu/system_config.h b/rpcs3/Emu/system_config.h index 236c4269d7..3fbefc8df7 100644 --- a/rpcs3/Emu/system_config.h +++ b/rpcs3/Emu/system_config.h @@ -39,7 +39,7 @@ struct cfg_root : cfg::node cfg::_int<0, 6> preferred_spu_threads{ this, "Preferred SPU Threads", 0, true }; // Number of hardware threads dedicated to heavy simultaneous spu tasks cfg::_int<0, 16> spu_delay_penalty{ this, "SPU delay penalty", 3 }; // Number of milliseconds to block a thread if a virtual 'core' isn't free cfg::_bool spu_loop_detection{ this, "SPU loop detection", false }; // Try to detect wait loops and trigger thread yield - cfg::_int<0, 6> max_spurs_threads{ this, "Max SPURS Threads", 6 }; // HACK. If less then 6, max number of running SPURS threads in each thread group. + cfg::_int<0, 6> max_spurs_threads{ this, "Max SPURS Threads", 6, true }; // HACK. If less then 6, max number of running SPURS threads in each thread group. cfg::_enum spu_block_size{ this, "SPU Block Size", spu_block_size_type::safe }; cfg::_bool spu_accurate_dma{ this, "Accurate SPU DMA", false }; cfg::_bool spu_accurate_reservations{ this, "Accurate SPU Reservations", true };