texture_cache: Better overlap handling

This commit is contained in:
poly 2025-02-23 19:18:02 +02:00 committed by IndecisiveTurtle
parent ea206be2e2
commit 207facd718
6 changed files with 59 additions and 50 deletions

View file

@ -610,9 +610,10 @@ bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr,
Image& image = texture_cache.GetImage(image_id);
// Only perform sync if image is:
// - GPU modified; otherwise there are no changes to synchronize.
// - Not CPU modified; otherwise we could overwrite CPU changes with stale GPU changes.
// - Not CPU dirty; otherwise we could overwrite CPU changes with stale GPU changes.
// - Not GPU dirty; otherwise we could overwrite GPU changes with stale image data.
if (False(image.flags & ImageFlagBits::GpuModified) ||
True(image.flags & ImageFlagBits::CpuDirty)) {
True(image.flags & ImageFlagBits::Dirty)) {
return false;
}
ASSERT_MSG(device_addr == image.info.guest_address,
@ -628,8 +629,8 @@ bool BufferCache::SynchronizeBufferFromImage(Buffer& buffer, VAddr device_addr,
const u32 depth =
image.info.props.is_volume ? std::max(image.info.size.depth >> m, 1u) : 1u;
const auto& [mip_size, mip_pitch, mip_height, mip_ofs] = image.info.mips_layout[m];
offset += mip_ofs * num_layers;
if (offset + (mip_size * num_layers) > max_offset) {
offset += mip_ofs;
if (offset + mip_size > max_offset) {
break;
}
copies.push_back({

View file

@ -394,7 +394,7 @@ void Image::CopyImage(const Image& image) {
vk::AccessFlagBits2::eShaderRead | vk::AccessFlagBits2::eTransferRead, {});
}
void Image::CopyMip(const Image& image, u32 mip) {
void Image::CopyMip(const Image& image, u32 mip, u32 slice) {
scheduler->EndRendering();
Transit(vk::ImageLayout::eTransferDstOptimal, vk::AccessFlagBits2::eTransferWrite, {});
@ -407,18 +407,19 @@ void Image::CopyMip(const Image& image, u32 mip) {
ASSERT(mip_w == image.info.size.width);
ASSERT(mip_h == image.info.size.height);
const u32 num_layers = std::min(image.info.resources.layers, info.resources.layers);
const vk::ImageCopy image_copy{
.srcSubresource{
.aspectMask = image.aspect_mask,
.mipLevel = 0,
.baseArrayLayer = 0,
.layerCount = image.info.resources.layers,
.layerCount = num_layers,
},
.dstSubresource{
.aspectMask = image.aspect_mask,
.mipLevel = mip,
.baseArrayLayer = 0,
.layerCount = info.resources.layers,
.baseArrayLayer = slice,
.layerCount = num_layers,
},
.extent = {mip_w, mip_h, mip_d},
};

View file

@ -104,7 +104,7 @@ struct Image {
void Upload(vk::Buffer buffer, u64 offset);
void CopyImage(const Image& image);
void CopyMip(const Image& image, u32 mip);
void CopyMip(const Image& src_image, u32 mip, u32 slice);
bool IsTracked() {
return track_addr != 0 && track_addr_end != 0;

View file

@ -208,15 +208,14 @@ void ImageInfo::UpdateSize() {
mip_info.pitch = std::max(mip_info.pitch * 4, 32u);
mip_info.height = std::max(mip_info.height * 4, 32u);
}
mip_info.size *= mip_d;
mip_info.size *= mip_d * resources.layers;
mip_info.offset = guest_size;
mips_layout.emplace_back(mip_info);
guest_size += mip_info.size;
}
guest_size *= resources.layers;
}
int ImageInfo::IsMipOf(const ImageInfo& info) const {
s32 ImageInfo::MipOf(const ImageInfo& info) const {
if (!IsCompatible(info)) {
return -1;
}
@ -237,7 +236,12 @@ int ImageInfo::IsMipOf(const ImageInfo& info) const {
// Find mip
auto mip = -1;
for (auto m = 0; m < info.mips_layout.size(); ++m) {
if (guest_address == (info.guest_address + info.mips_layout[m].offset)) {
const auto& [mip_size, mip_pitch, mip_height, mip_ofs] = info.mips_layout[m];
const VAddr mip_base = info.guest_address + mip_ofs;
const VAddr mip_end = mip_base + mip_size;
const u32 slice_size = mip_size / info.resources.layers;
if (guest_address >= mip_base && guest_address < mip_end &&
(guest_address - mip_base) % slice_size == 0) {
mip = m;
break;
}
@ -269,7 +273,7 @@ int ImageInfo::IsMipOf(const ImageInfo& info) const {
return mip;
}
int ImageInfo::IsSliceOf(const ImageInfo& info) const {
s32 ImageInfo::SliceOf(const ImageInfo& info, s32 mip) const {
if (!IsCompatible(info)) {
return -1;
}
@ -285,13 +289,13 @@ int ImageInfo::IsSliceOf(const ImageInfo& info) const {
}
// Check for size alignment.
const bool slice_size = info.guest_size / info.resources.layers;
const u32 slice_size = info.mips_layout[mip].size / info.resources.layers;
if (guest_size % slice_size != 0) {
return -1;
}
// Ensure that address is aligned too.
const auto addr_diff = guest_address - info.guest_address;
const auto addr_diff = guest_address - (info.guest_address + info.mips_layout[mip].offset);
if ((addr_diff % guest_size) != 0) {
return -1;
}

View file

@ -30,8 +30,8 @@ struct ImageInfo {
bool IsDepthStencil() const;
bool HasStencil() const;
int IsMipOf(const ImageInfo& info) const;
int IsSliceOf(const ImageInfo& info) const;
s32 MipOf(const ImageInfo& info) const;
s32 SliceOf(const ImageInfo& info, s32 mip) const;
/// Verifies if images are compatible for subresource merging.
bool IsCompatible(const ImageInfo& info) const {

View file

@ -223,16 +223,13 @@ std::tuple<ImageId, int, int> TextureCache::ResolveOverlap(const ImageInfo& imag
// Right overlap, the image requested is a possible subresource of the image from cache.
if (image_info.guest_address > tex_cache_image.info.guest_address) {
if (auto mip = image_info.IsMipOf(tex_cache_image.info); mip >= 0) {
return {cache_image_id, mip, -1};
if (auto mip = image_info.MipOf(tex_cache_image.info); mip >= 0) {
if (auto slice = image_info.SliceOf(tex_cache_image.info, mip); slice >= 0) {
return {cache_image_id, mip, slice};
}
}
if (auto slice = image_info.IsSliceOf(tex_cache_image.info); slice >= 0) {
return {cache_image_id, -1, slice};
}
// TODO: slice and mip
// Image isn't a subresource but a chance overlap.
if (safe_to_delete) {
FreeImage(cache_image_id);
}
@ -240,31 +237,33 @@ std::tuple<ImageId, int, int> TextureCache::ResolveOverlap(const ImageInfo& imag
return {{}, -1, -1};
} else {
// Left overlap, the image from cache is a possible subresource of the image requested
if (auto mip = tex_cache_image.info.IsMipOf(image_info); mip >= 0) {
if (tex_cache_image.binding.is_target) {
// We have a larger image created and a separate one, representing a subres of it,
// bound as render target. In this case we need to rebind render target.
tex_cache_image.binding.needs_rebind = 1u;
if (merged_image_id) {
GetImage(merged_image_id).binding.is_target = 1u;
if (auto mip = tex_cache_image.info.MipOf(image_info); mip >= 0) {
if (auto slice = tex_cache_image.info.SliceOf(image_info, mip); slice >= 0) {
if (tex_cache_image.binding.is_target) {
// We have a larger image created and a separate one, representing a subres of it,
// bound as render target. In this case we need to rebind render target.
tex_cache_image.binding.needs_rebind = 1u;
if (merged_image_id) {
GetImage(merged_image_id).binding.is_target = 1u;
}
FreeImage(cache_image_id);
return {merged_image_id, -1, -1};
}
FreeImage(cache_image_id);
return {merged_image_id, -1, -1};
}
// We need to have a larger, already allocated image to copy this one into
if (merged_image_id) {
tex_cache_image.Transit(vk::ImageLayout::eTransferSrcOptimal,
vk::AccessFlagBits2::eTransferRead, {});
// We need to have a larger, already allocated image to copy this one into
if (merged_image_id) {
tex_cache_image.Transit(vk::ImageLayout::eTransferSrcOptimal,
vk::AccessFlagBits2::eTransferRead, {});
const auto num_mips_to_copy = tex_cache_image.info.resources.levels;
ASSERT(num_mips_to_copy == 1);
const auto num_mips_to_copy = tex_cache_image.info.resources.levels;
ASSERT(num_mips_to_copy == 1);
auto& merged_image = slot_images[merged_image_id];
merged_image.CopyMip(tex_cache_image, mip, slice);
auto& merged_image = slot_images[merged_image_id];
merged_image.CopyMip(tex_cache_image, mip);
FreeImage(cache_image_id);
FreeImage(cache_image_id);
}
}
}
}
@ -374,12 +373,16 @@ ImageId TextureCache::FindImage(BaseDesc& desc, FindFlags flags) {
RegisterImage(image_id);
}
Image& image = slot_images[image_id];
image.tick_accessed_last = scheduler.CurrentTick();
// If the image requested is a subresource of the image from cache record its location.
if (view_mip > 0) {
desc.view_info.range.base.level = view_mip;
}
Image& image = slot_images[image_id];
image.tick_accessed_last = scheduler.CurrentTick();
if (view_slice > 0) {
desc.view_info.range.base.layer = view_slice;
}
return image_id;
}
@ -526,7 +529,7 @@ void TextureCache::RefreshImage(Image& image, Vulkan::Scheduler* custom_schedule
}
image_copy.push_back({
.bufferOffset = mip.offset * num_layers,
.bufferOffset = mip.offset,
.bufferRowLength = static_cast<u32>(mip.pitch),
.bufferImageHeight = static_cast<u32>(mip.height),
.imageSubresource{