Replace utils::cnttz{32,64} with std::countr_{zero,one}

Make #include <bit> mandatory.
This commit is contained in:
Nekotekina 2020-04-13 15:31:41 +03:00
parent adfc9d93c3
commit d0c199d455
10 changed files with 15 additions and 37 deletions

View file

@ -2406,7 +2406,7 @@ void thread_ctrl::set_thread_affinity_mask(u64 mask)
SetThreadAffinityMask(_this_thread, mask);
#elif __APPLE__
// Supports only one core
thread_affinity_policy_data_t policy = { static_cast<integer_t>(utils::cnttz64(mask)) };
thread_affinity_policy_data_t policy = { static_cast<integer_t>(std::countr_zero(mask)) };
thread_port_t mach_thread = pthread_mach_thread_np(pthread_self());
thread_policy_set(mach_thread, THREAD_AFFINITY_POLICY, reinterpret_cast<thread_policy_t>(&policy), 1);
#elif defined(__linux__) || defined(__DragonFly__) || defined(__FreeBSD__)

View file

@ -28,30 +28,6 @@ namespace utils
#endif
}
inline u32 cnttz32(u32 arg, bool nonzero = false)
{
#ifdef _MSC_VER
ulong res;
return _BitScanForward(&res, arg) || nonzero ? res : 32;
#elif __BMI__
return _tzcnt_u32(arg);
#else
return arg || nonzero ? __builtin_ctz(arg) : 32;
#endif
}
inline u64 cnttz64(u64 arg, bool nonzero = false)
{
#ifdef _MSC_VER
ulong res;
return _BitScanForward64(&res, arg) || nonzero ? res : 64;
#elif __BMI__
return _tzcnt_u64(arg);
#else
return arg || nonzero ? __builtin_ctzll(arg) : 64;
#endif
}
inline u8 popcnt32(u32 arg)
{
#ifdef _MSC_VER

View file

@ -17,9 +17,12 @@
#include <limits>
#include <array>
#if __has_include(<bit>)
#include <bit>
#ifdef _MSC_VER
#ifndef __cpp_lib_bitops
#define __cpp_lib_bitops
#endif
#endif
#include <bit>
#ifndef __has_builtin
#define __has_builtin(x) 0

View file

@ -280,7 +280,7 @@ struct cpu_counter
if (ok) [[likely]]
{
// Get actual slot number
array_slot = i * 64 + utils::cnttz64(~bits, false);
array_slot = i * 64 + std::countr_one(bits);
break;
}
}
@ -314,7 +314,7 @@ void for_all_cpu(F&& func) noexcept
{
for (u64 bits = ctr->cpu_array_bits[i]; bits; bits &= bits - 1)
{
const u64 index = i * 64 + utils::cnttz64(bits, true);
const u64 index = i * 64 + std::countr_zero(bits);
if (cpu_thread* cpu = ctr->cpu_array[index].load())
{

View file

@ -2144,7 +2144,7 @@ void ppu_acontext::MULLI(ppu_opcode_t op)
}
}
gpr[op.rd] = spec_gpr::range(min, max, gpr[op.ra].tz() + utils::cnttz64(op.simm16));
gpr[op.rd] = spec_gpr::range(min, max, gpr[op.ra].tz() + std::countr_zero<u64>(op.simm16));
}
void ppu_acontext::SUBFIC(ppu_opcode_t op)

View file

@ -989,7 +989,7 @@ struct ppu_acontext
// Return number of trailing zero bits
u64 tz() const
{
return utils::cnttz64(mask());
return std::countr_zero(mask());
}
// Range NOT

View file

@ -32,7 +32,7 @@ namespace rsx
return 0;
}
const u64 r = u64{1} << utils::cnttz64(~_old, false);
const u64 r = u64{1} << std::countr_one(_old);
::overlays.trace("Bit allocated (%u)", r);
return r;
}

View file

@ -819,7 +819,7 @@ void VKGSRender::check_heap_status(u32 flags)
else if (flags)
{
heap_critical = false;
u32 test = 1 << utils::cnttz32(flags, true);
u32 test = 1u << std::countr_zero(flags);
do
{

View file

@ -107,7 +107,7 @@ static u64 slot_alloc()
if (ok)
{
// Find lowest clear bit
return group * 64 + utils::cnttz64(~bits, false);
return group * 64 + std::countr_one(bits);
}
}
@ -269,7 +269,7 @@ static u32 sema_alloc()
if (ok)
{
// Find lowest clear bit
const u32 id = group * 64 + static_cast<u32>(utils::cnttz64(~bits, false));
const u32 id = group * 64 + static_cast<u32>(std::countr_one(bits));
#ifdef USE_POSIX
// Initialize semaphore (should be very fast)

View file

@ -1,6 +1,5 @@
#include "atomic2.hpp"
#include "Utilities/JIT.h"
#include "Utilities/asm.h"
#include "Utilities/sysinfo.h"
//
@ -265,7 +264,7 @@ static u64 rec_alloc()
if (ok)
{
// Find lowest clear bit
return group * 64 + utils::cnttz64(~bits, false);
return group * 64 + std::countr_one(bits);
}
}