Merge branch 'update' of https://github.com/digant73/rpcs3 into update

# Conflicts:
#	rpcs3/Emu/RSX/Core/RSXDrawCommands.cpp
#	rpcs3/Emu/RSX/Core/RSXDrawCommands.h
#	rpcs3/Emu/RSX/Core/RSXDriverState.h
This commit is contained in:
digant 2025-01-03 23:26:57 +01:00
commit 9d8f3b7bc4
3 changed files with 240 additions and 20 deletions

View file

@ -175,10 +175,69 @@ vec4 _texcoord_xform_shadow(const in vec4 coord4, const in sampler_info params)
vec4 _sext_unorm8x4(const in vec4 x)
{
// TODO: Handle clamped sign-extension
const vec4 bits = floor(fma(x, vec4(255.f), vec4(0.5f)));
const bvec4 sign_check = lessThan(bits, vec4(128.f));
const vec4 ret = _select(bits - 256.f, bits, sign_check);
const vec4 bits = floor(fma(x, vec4(255.f), vec4(0.5f))); // floor of ((a * b) + c) (e.g. 0.0 -> 0, 1.0 -> 255)
const bvec4 sign_check = lessThan(bits, vec4(128.f)); // 1 if a < b (e.g. 127 -> 1, 128 -> 0)
const vec4 ret = _select(bits - 256.f, bits, sign_check); // a if c is false, b if c is true
return ret / 127.f;
// const bvec4 clamped_check = lessThan(ret, vec4(-127.f)); // handle clamped sign-extension
// const vec4 ret2 = _select(ret, vec4(-127.f), clamped_check);
// return ret2 / 127.f;
// return vec4(0.0f);
// return ret;
// return ret / vec4(512.f, 512.f, 512.f, 512.f);
// return vec4(-1.f, -1.f, -1.f, 0.f);
/*
const vec4 bits = x; // max di a * b + c (es. 0.0 -> 0.0, 1.0 -> 255)
const bvec4 sign_check = lessThan(bits, vec4(128.f)); // 1 se a < b (es. 127.0 -> 1)
const vec4 ret = _select(bits - 256.f, bits, sign_check); // a if c false, b if c true
return ret / 127.f;
*/
/*
// const vec4 bits = floor(fma(x, vec4(255.f), vec4(0.f))); // max di a*b+c (es. 0.0 -> 0.0, 1.0 -> 255)
const vec4 bits = fma(x, vec4(255.f), vec4(0.f)); // max di a*b+c (es. 0.0 -> 0.0, 1.0 -> 255)
const bvec4 sign_check = lessThan(bits, vec4(128.f)); // 1 se a < b (es. 127.0 -> 1)
// const vec4 ret = _select(vec4(127.f), bits, sign_check); // a if c false, b if c true
const vec4 ret = _select(bits - vec4(256.f), bits, sign_check); // a if c false, b if c true
return ret / 127.f;
*/
/*
const bvec4 sign_check2 = lessThan(ret, vec4(-127.f));
const vec4 ret2 = _select(ret, vec4(-127.f), sign_check2);
// return ret2 * 2; // -255 255
// return ret2 / vec4(127.f); // -1.0 - 1.0
// return ret2
return ret / 256.f;
// return ret + 255.f;
// return ret * 256.f;
return ret * 1024.f;
*/
/*
const vec4 bits = x;
const bvec4 sign_check = lessThan(bits, vec4(0.5f)); // 1 se a < b (es. 0.0 -> 1, 0.5 -> 0)
const vec4 ret = _select(bits - vec4(1.f), bits, sign_check); // a if c false, b if c true
return ret / vec4(0.5f);
*/
/*
const vec4 bits = floor(fma(x, vec4(1.f), vec4(1.f))); // max di a*b+c (es. -1.0 -> 0.0, 1.0 -> 2.0)
// const vec4 bits = fma(x, vec4(1.f), vec4(1.f)); // max di a*b+c (es. -1.0 -> 0.0, 1.0 -> 2.0)
const bvec4 sign_check = lessThan(bits, vec4(1.f)); // 1 se a < b (es. 0.0 -> 1, 1.0 -> 0)
// const vec4 ret = _select(bits - vec4(1.f), bits + vec4(1.f), sign_check); // a if c false, b if c true
const vec4 ret = _select(bits - vec4(1.f), vec4(2.f) - bits, sign_check); // a if c false, b if c true
return ret / vec4(1.f);
*/
/*
const bvec4 sign_check = lessThan(x, vec4(0.5f)); // 1 se a < b (es. 0.4 -> 1, 1.0 -> 0)
const vec4 ret = _select(x - vec4(1.f), x, sign_check); // a if c false, b if c true
return ret * vec4(2.f);
*/
/* const bvec4 sign_check = lessThan(x, vec4(128.f)); // 1 se a < b (es. 127.0 -> 1)
const vec4 ret = _select(x - 256.f, x, sign_check); // a if c false, b if c true
return ret;
// return ret / 127.f;
*/
}
vec4 _process_texel(in vec4 rgba, const in uint control_bits)
@ -208,7 +267,21 @@ vec4 _process_texel(in vec4 rgba, const in uint control_bits)
uvec4 mask;
vec4 convert;
uint op_mask = control_bits & uint(SIGN_EXPAND_MASK);
uint op_mask;
/*
op_mask = control_bits & uint(SEXT_MASK);
if (op_mask != 0u)
{
// Sign-extend the input signal
mask = uvec4(op_mask) & uvec4(SEXT_R_MASK, SEXT_G_MASK, SEXT_B_MASK, SEXT_A_MASK);
convert = _sext_unorm8x4(rgba);
// rgba = _select(rgba, convert, notEqual(mask, uvec4(0)));
rgba = convert * vec4(0.f, 0.f, 0.f, 1.f);
}
*/
op_mask = control_bits & uint(SIGN_EXPAND_MASK);
if (op_mask != 0u)
{
// Expand to signed normalized by decompressing the signal
@ -217,22 +290,37 @@ vec4 _process_texel(in vec4 rgba, const in uint control_bits)
rgba = _select(rgba, convert, notEqual(mask, uvec4(0)));
}
op_mask = control_bits & uint(SEXT_MASK);
if (op_mask != 0u)
{
// Sign-extend the input signal
mask = uvec4(op_mask) & uvec4(SEXT_R_MASK, SEXT_G_MASK, SEXT_B_MASK, SEXT_A_MASK);
convert = _sext_unorm8x4(rgba);
rgba = _select(rgba, convert, notEqual(mask, uvec4(0)));
}
// convert = _sext_unorm8x4(rgba);
// rgba = _select(rgba, convert, notEqual(mask, uvec4(0)));
// convert = (rgba + 1.f) / 2.f;
// convert = rgba * 2.f;
// convert = rgba / 16.f;
convert = rgba;
convert = _sext_unorm8x4(convert);
// convert = convert * vec4(1.f, 1.f, 1.f, 0.f);
// convert = (rgba + 1.f) / 2.f;
rgba = _select(rgba, convert, notEqual(mask, uvec4(0)));
// rgba = convert * vec4(0.f, 0.f, 0.f, 1.f);
// rgba = vec4(0.f, 0.f, 0.f, 0.f);
}
op_mask = control_bits & uint(GAMMA_CTRL_MASK);
if (op_mask != 0u)
{
// Gamma correction
mask = uvec4(op_mask) & uvec4(GAMMA_R_MASK, GAMMA_G_MASK, GAMMA_B_MASK, GAMMA_A_MASK);
convert = srgb_to_linear(rgba);
return _select(rgba, convert, notEqual(mask, uvec4(0)));
// return _select(rgba, convert, notEqual(mask, uvec4(0)));
rgba = _select(rgba, convert, notEqual(mask, uvec4(0)));
}
return rgba;

View file

@ -2205,24 +2205,145 @@ namespace rsx
// NOTE: The ARGB8_signed flag means to reinterpret the raw bytes as signed. This is different than unsigned_remap=bias which does range decompression.
// This is a separate method of setting the format to signed mode without doing so per-channel
// Precedence = SNORM > GAMMA > UNSIGNED_REMAP (See Resistance 3 for GAMMA/BX2 relationship, UE3 for BX2 effect)
const u32 argb8_signed = tex.argb_signed(); // _SNROM
const u32 gamma = tex.gamma() & ~argb8_signed; // _SRGB
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL)? 0u : (~(gamma | argb8_signed) & 0xF); // _BX2
const u32 argb8_signed_cur = tex.argb_signed();
const u32 gamma_cur = tex.gamma();
const bool isUrnMap = tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL;
const bool canRemap = ((tex.remap() >> 8) & 0xAA) == 0xAA;
const u32 argb8_signed = tex.argb_signed(); // _SNROM
const u32 gamma = tex.gamma() & ~argb8_signed; // _SRGB
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL) ? 0u : (~(gamma | argb8_signed) & 0xF); // _BX2
u32 argb8_convert = gamma;
/*
// --- KO --- killzone ghost anche su soldati
const u32 gamma = tex.gamma(); // _SRGB
const u32 argb8_signed = tex.argb_signed() & ~gamma; // _SNROM
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL) ? 0u : (~(gamma | argb8_signed) & 0xF); // _BX2
u32 argb8_convert = gamma;
**
/*
// --- KO --- killzone ghost anche su soldati
const u32 argb8_signed = tex.argb_signed();
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL) ? 0u : (~(argb8_signed) & 0xF); // _BX2
const u32 gamma = tex.gamma() & ~(argb8_signed | unsigned_remap);
u32 argb8_convert = gamma;
*/
/*
// OK killzone
const u32 gamma = tex.gamma();
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL) ? 0u : (~(gamma) & 0xF);
const u32 argb8_signed = tex.argb_signed() & ~(gamma | unsigned_remap);
u32 argb8_convert = gamma;
*/
/*
// OK killzone
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL) ? 0u : 0xF;
const u32 argb8_signed = tex.argb_signed() & ~unsigned_remap;
const u32 gamma = tex.gamma() & ~(argb8_signed | unsigned_remap);
u32 argb8_convert = gamma;
*/
/*
// OK killzone
const u32 unsigned_remap = (tex.unsigned_remap() == CELL_GCM_TEXTURE_UNSIGNED_REMAP_NORMAL) ? 0u : 0xF;
const u32 gamma = tex.gamma() & ~unsigned_remap;
const u32 argb8_signed = tex.argb_signed() & ~(gamma | unsigned_remap);
u32 argb8_convert = gamma;
*/
/*
// OK killzone, Partial OK Resistance
const u32 argb8_signed = argb8_signed_cur & 0x1; // _SNROM
const u32 gamma = gamma_cur & ~argb8_signed_cur; // _SRGB
const u32 unsigned_remap = (isUrnMap) ? 0u : (~(gamma | argb8_signed_cur) & 0xF); // _BX2
u32 argb8_convert = gamma;
*/
/*
// --KO-- killzone, Partial OK Resistance
const u32 argb8_signed = !canRemap ? 0u : tex.argb_signed() & 0xF;
const u32 gamma = tex.gamma() & ~(argb8_signed);
const u32 unsigned_remap = isUrnMap ? 0u : (~(gamma | argb8_signed) & 0xF);
u32 argb8_convert = gamma;
*/
/*
// OK killzone, Partial OK Resistance
const u32 gamma = tex.gamma();
const u32 unsigned_remap = isUrnMap ? 0u : (~(gamma) & 0xF);
const u32 argb8_signed = !canRemap ? 0u : tex.argb_signed() & ~(gamma | unsigned_remap) & 0xF;
u32 argb8_convert = gamma;
*/
/*
// OK killzone, Partial OK Resistance
const u32 unsigned_remap = isUrnMap ? 0u : 0xF;
const u32 argb8_signed = !canRemap ? 0u : tex.argb_signed() & ~unsigned_remap & 0xF;
const u32 gamma = tex.gamma() & ~(argb8_signed | unsigned_remap);
u32 argb8_convert = gamma;
*/
/*
// OK killzone, Partial OK Resistance
const u32 unsigned_remap = isUrnMap ? 0u : 0xF;
const u32 gamma = tex.gamma() & ~(unsigned_remap);
const u32 argb8_signed = !canRemap ? 0u : tex.argb_signed() & ~(gamma | unsigned_remap) & 0xF;
u32 argb8_convert = gamma;
*/
/*
const u32 gamma = tex.gamma();
const u32 argb8_signed = tex.argb_signed() & ~(gamma) & 0xF;
const u32 unsigned_remap = isUrnMap ? 0u : (~(gamma | argb8_signed) & 0xF);
//const u32 unsigned_remap = isUrnMap ? 0u : (~(gamma) & 0xF);
//const u32 argb8_signed = tex.argb_signed() & ~(gamma | unsigned_remap) & 0xF;
u32 argb8_convert = gamma;
*/
/*
// OK killzone, POK Resistance
const u32 gamma = gamma_cur; // _SRGB
// const u32 argb8_signed = argb8_signed_cur & ~gamma; // _SNROM
const u32 argb8_signed = argb8_signed_cur & 0x1; // _SNROM
// const u32 gamma = gamma_cur & ~argb8_signed; // _SRGB
const u32 unsigned_remap = (isUrnMap) ? 0u : (~(gamma | argb8_signed) & 0xF); // _BX2
u32 argb8_convert = gamma;
*/
/*
// OK killzone, POK Resistance
const u32 gamma = gamma_cur; // _SRGB
const u32 unsigned_remap = (isUrnMap) ? 0u : (~(gamma) & 0xF); // _BX2
const u32 argb8_signed = argb8_signed_cur & ~(gamma | unsigned_remap); // _SNROM
// const u32 argb8_signed = argb8_signed_cur & 0x1; // _SNROM
// const u32 gamma = gamma_cur & ~argb8_signed; // _SRGB
u32 argb8_convert = gamma;
*/
/*
static u32 argb8_signed_prev = 0xFFFFFFFF;
static u32 gamma_prev = 0xFFFFFFFF;
static u32 unsigned_remap_prev = 0xFFFFFFFF;
if (argb8_signed_cur || gamma_cur || unsigned_remap)
//if (argb8_signed_cur != argb8_signed_prev || gamma_cur != gamma_prev || unsigned_remap != unsigned_remap_prev)
//if (argb8_signed_cur && unsigned_remap)
//if (argb8_signed_cur)
{
rsx_log.error("signed = %d / %d gamma = %d / %d unsigned = %d / %d", argb8_signed, argb8_signed_cur, gamma, gamma_cur, unsigned_remap, isUrnMap);
//argb8_signed_prev = argb8_signed_cur;
//gamma_prev = gamma_cur;
//unsigned_remap_prev = unsigned_remap;
}
*/
/*
// The options are mutually exclusive
ensure((argb8_signed & gamma) == 0);
ensure((argb8_signed & unsigned_remap) == 0);
ensure((gamma & unsigned_remap) == 0);
*/
// Helper function to apply a per-channel mask based on an input mask
const auto apply_sign_convert_mask = [&](u32 mask, u32 bit_offset)
const auto apply_sign_convert_mask = [&](u32 mask, u32 bit_offset, bool override)
{
// TODO: Use actual remap mask to account for 0 and 1 overrides in default mapping
// TODO: Replace this clusterfuck of texture control with matrix transformation
const auto remap_ctrl = (tex.remap() >> 8) & 0xAA;
if (remap_ctrl == 0xAA)
if (remap_ctrl == 0xAA || override)
{
argb8_convert |= (mask & 0xFu) << bit_offset;
return;
@ -2232,20 +2353,31 @@ namespace rsx
if ((remap_ctrl & 0x0C) == 0x08) argb8_convert |= (mask & 0x2u) << bit_offset;
if ((remap_ctrl & 0x30) == 0x20) argb8_convert |= (mask & 0x4u) << bit_offset;
if ((remap_ctrl & 0xC0) == 0x80) argb8_convert |= (mask & 0x8u) << bit_offset;
//if ((remap_ctrl & 0x03)) argb8_convert |= (mask & 0x1u) << bit_offset;
//if ((remap_ctrl & 0x0C)) argb8_convert |= (mask & 0x2u) << bit_offset;
//if ((remap_ctrl & 0x30)) argb8_convert |= (mask & 0x4u) << bit_offset;
//if ((remap_ctrl & 0xC0)) argb8_convert |= (mask & 0x8u) << bit_offset;
};
if (argb8_signed)
{
// Apply integer sign extension from uint8 to sint8 and renormalize
apply_sign_convert_mask(argb8_signed, texture_control_bits::SEXT_OFFSET);
apply_sign_convert_mask(argb8_signed, texture_control_bits::SEXT_OFFSET, false);
}
if (unsigned_remap)
{
// Apply sign expansion, compressed normal-map style (2n - 1)
apply_sign_convert_mask(unsigned_remap, texture_control_bits::EXPAND_OFFSET);
apply_sign_convert_mask(unsigned_remap, texture_control_bits::EXPAND_OFFSET, false);
}
if (gamma)
{
// Apply sign expansion, compressed normal-map style (2n - 1)
apply_sign_convert_mask(gamma, texture_control_bits::GAMMA_A, false);
}
texture_control |= argb8_convert;
}

View file

@ -410,7 +410,7 @@ namespace utils
return static_cast<T>(value * u64{numerator} / u64{denominator});
}
#if is_u128_emulated
#ifdef _MSC_VER
if constexpr (sizeof(T) <= sizeof(u128) / 2)
{
return static_cast<T>(u128_from_mul(value, numerator) / u64{denominator});