mirror of
https://github.com/RPCS3/rpcs3.git
synced 2025-10-01 21:59:35 +00:00
vk: Rewrite command buffer chains
This commit is contained in:
parent
574e934bf3
commit
b791d90b35
6 changed files with 131 additions and 138 deletions
|
@ -113,7 +113,7 @@ namespace vk
|
|||
{
|
||||
m_async_command_queue.emplace_back();
|
||||
m_current_cb = &m_async_command_queue.back();
|
||||
m_current_cb->create(m_command_pool, true);
|
||||
m_current_cb->create(m_command_pool);
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -407,19 +407,12 @@ VKGSRender::VKGSRender() : GSRender()
|
|||
|
||||
//create command buffer...
|
||||
m_command_buffer_pool.create((*m_device), m_device->get_graphics_queue_family());
|
||||
|
||||
for (auto &cb : m_primary_cb_list)
|
||||
{
|
||||
cb.create(m_command_buffer_pool);
|
||||
cb.init_fence(*m_device);
|
||||
}
|
||||
|
||||
m_current_command_buffer = &m_primary_cb_list[0];
|
||||
m_primary_cb_list.create(m_command_buffer_pool, vk::command_buffer::access_type_hint::flush_only);
|
||||
m_current_command_buffer = m_primary_cb_list.get();
|
||||
|
||||
//Create secondary command_buffer for parallel operations
|
||||
m_secondary_command_buffer_pool.create((*m_device), m_device->get_graphics_queue_family());
|
||||
m_secondary_command_buffer.create(m_secondary_command_buffer_pool, true);
|
||||
m_secondary_command_buffer.access_hint = vk::command_buffer::access_type_hint::all;
|
||||
m_secondary_cb_list.create(m_secondary_command_buffer_pool, vk::command_buffer::access_type_hint::all);
|
||||
|
||||
//Precalculated stuff
|
||||
std::tie(pipeline_layout, descriptor_layouts) = get_shared_pipeline_layout(*m_device);
|
||||
|
@ -709,12 +702,10 @@ VKGSRender::~VKGSRender()
|
|||
m_cond_render_buffer.reset();
|
||||
|
||||
// Command buffer
|
||||
for (auto &cb : m_primary_cb_list)
|
||||
cb.destroy();
|
||||
m_primary_cb_list.destroy();
|
||||
m_secondary_cb_list.destroy();
|
||||
|
||||
m_command_buffer_pool.destroy();
|
||||
|
||||
m_secondary_command_buffer.destroy();
|
||||
m_secondary_command_buffer_pool.destroy();
|
||||
|
||||
// Global resources
|
||||
|
@ -734,10 +725,8 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
|||
{
|
||||
vk::texture_cache::thrashed_set result;
|
||||
{
|
||||
std::lock_guard lock(m_secondary_cb_guard);
|
||||
|
||||
const rsx::invalidation_cause cause = is_writing ? rsx::invalidation_cause::deferred_write : rsx::invalidation_cause::deferred_read;
|
||||
result = m_texture_cache.invalidate_address(m_secondary_command_buffer, address, cause);
|
||||
result = m_texture_cache.invalidate_address(*m_secondary_cb_list.get(), address, cause);
|
||||
}
|
||||
|
||||
if (result.invalidate_samplers)
|
||||
|
@ -802,7 +791,7 @@ bool VKGSRender::on_access_violation(u32 address, bool is_writing)
|
|||
m_flush_requests.producer_wait();
|
||||
}
|
||||
|
||||
m_texture_cache.flush_all(m_secondary_command_buffer, result);
|
||||
m_texture_cache.flush_all(*m_secondary_cb_list.next(), result);
|
||||
|
||||
if (has_queue_ref)
|
||||
{
|
||||
|
@ -818,7 +807,7 @@ void VKGSRender::on_invalidate_memory_range(const utils::address_range &range, r
|
|||
{
|
||||
std::lock_guard lock(m_secondary_cb_guard);
|
||||
|
||||
auto data = m_texture_cache.invalidate_range(m_secondary_command_buffer, range, cause);
|
||||
auto data = m_texture_cache.invalidate_range(*m_secondary_cb_list.next(), range, cause);
|
||||
AUDIT(data.empty());
|
||||
|
||||
if (cause == rsx::invalidation_cause::unmap)
|
||||
|
@ -1050,7 +1039,7 @@ void VKGSRender::check_heap_status(u32 flags)
|
|||
else
|
||||
{
|
||||
// Flush the frame context
|
||||
frame_context_cleanup(target_frame, true);
|
||||
frame_context_cleanup(target_frame);
|
||||
}
|
||||
|
||||
m_frame_stats.flip_time += m_profiler.duration();
|
||||
|
@ -1062,15 +1051,12 @@ void VKGSRender::check_present_status()
|
|||
while (!m_queued_frames.empty())
|
||||
{
|
||||
auto ctx = m_queued_frames.front();
|
||||
if (ctx->swap_command_buffer->pending)
|
||||
if (!ctx->swap_command_buffer->poke())
|
||||
{
|
||||
if (!ctx->swap_command_buffer->poke())
|
||||
{
|
||||
return;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
frame_context_cleanup(ctx, true);
|
||||
frame_context_cleanup(ctx);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -1425,20 +1411,15 @@ void VKGSRender::clear_surface(u32 mask)
|
|||
|
||||
void VKGSRender::flush_command_queue(bool hard_sync, bool do_not_switch)
|
||||
{
|
||||
close_and_submit_command_buffer(m_current_command_buffer->submit_fence);
|
||||
close_and_submit_command_buffer();
|
||||
|
||||
if (hard_sync)
|
||||
{
|
||||
// wait for the latest instruction to execute
|
||||
m_current_command_buffer->pending = true;
|
||||
m_current_command_buffer->reset();
|
||||
|
||||
// Clear all command buffer statuses
|
||||
for (auto &cb : m_primary_cb_list)
|
||||
{
|
||||
if (cb.pending)
|
||||
cb.poke();
|
||||
}
|
||||
m_primary_cb_list.poke_all();
|
||||
|
||||
// Drain present queue
|
||||
while (!m_queued_frames.empty())
|
||||
|
@ -1448,24 +1429,12 @@ void VKGSRender::flush_command_queue(bool hard_sync, bool do_not_switch)
|
|||
|
||||
m_flush_requests.clear_pending_flag();
|
||||
}
|
||||
else
|
||||
{
|
||||
// Mark this queue as pending and proceed
|
||||
m_current_command_buffer->pending = true;
|
||||
}
|
||||
|
||||
if (!do_not_switch)
|
||||
{
|
||||
// Grab next cb in line and make it usable
|
||||
// NOTE: Even in the case of a hard sync, this is required to free any waiters on the CB (ZCULL)
|
||||
m_current_cb_index = (m_current_cb_index + 1) % VK_MAX_ASYNC_CB_COUNT;
|
||||
m_current_command_buffer = &m_primary_cb_list[m_current_cb_index];
|
||||
|
||||
if (!m_current_command_buffer->poke())
|
||||
{
|
||||
rsx_log.error("CB chain has run out of free entries!");
|
||||
}
|
||||
|
||||
m_current_command_buffer = m_primary_cb_list.next();
|
||||
m_current_command_buffer->reset();
|
||||
}
|
||||
else
|
||||
|
@ -2075,24 +2044,24 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
|
|||
m_texture_upload_buffer_ring_info.is_dirty() ||
|
||||
m_raster_env_ring_info.is_dirty())
|
||||
{
|
||||
std::lock_guard lock(m_secondary_cb_guard);
|
||||
m_secondary_command_buffer.begin();
|
||||
auto secondary_command_buffer = m_secondary_cb_list.next();
|
||||
secondary_command_buffer->begin();
|
||||
|
||||
m_attrib_ring_info.sync(m_secondary_command_buffer);
|
||||
m_fragment_env_ring_info.sync(m_secondary_command_buffer);
|
||||
m_vertex_env_ring_info.sync(m_secondary_command_buffer);
|
||||
m_fragment_texture_params_ring_info.sync(m_secondary_command_buffer);
|
||||
m_vertex_layout_ring_info.sync(m_secondary_command_buffer);
|
||||
m_fragment_constants_ring_info.sync(m_secondary_command_buffer);
|
||||
m_index_buffer_ring_info.sync(m_secondary_command_buffer);
|
||||
m_transform_constants_ring_info.sync(m_secondary_command_buffer);
|
||||
m_texture_upload_buffer_ring_info.sync(m_secondary_command_buffer);
|
||||
m_raster_env_ring_info.sync(m_secondary_command_buffer);
|
||||
m_attrib_ring_info.sync(*secondary_command_buffer);
|
||||
m_fragment_env_ring_info.sync(*secondary_command_buffer);
|
||||
m_vertex_env_ring_info.sync(*secondary_command_buffer);
|
||||
m_fragment_texture_params_ring_info.sync(*secondary_command_buffer);
|
||||
m_vertex_layout_ring_info.sync(*secondary_command_buffer);
|
||||
m_fragment_constants_ring_info.sync(*secondary_command_buffer);
|
||||
m_index_buffer_ring_info.sync(*secondary_command_buffer);
|
||||
m_transform_constants_ring_info.sync(*secondary_command_buffer);
|
||||
m_texture_upload_buffer_ring_info.sync(*secondary_command_buffer);
|
||||
m_raster_env_ring_info.sync(*secondary_command_buffer);
|
||||
|
||||
m_secondary_command_buffer.end();
|
||||
secondary_command_buffer->end();
|
||||
|
||||
vk::queue_submit_t submit_info{ m_device->get_graphics_queue(), nullptr };
|
||||
m_secondary_command_buffer.submit(submit_info, force_flush);
|
||||
secondary_command_buffer->submit(submit_info, force_flush);
|
||||
}
|
||||
|
||||
vk::clear_status_interrupt(vk::heap_dirty);
|
||||
|
@ -2172,11 +2141,6 @@ void VKGSRender::close_and_submit_command_buffer(vk::fence* pFence, VkSemaphore
|
|||
async_scheduler.flush(secondary_submit_info, force_flush);
|
||||
}
|
||||
|
||||
if (force_flush)
|
||||
{
|
||||
ensure(m_current_command_buffer->submit_fence->flushed);
|
||||
}
|
||||
|
||||
m_queue_status.clear(flush_queue_state::flushing);
|
||||
}
|
||||
|
||||
|
|
|
@ -66,28 +66,12 @@ namespace vk
|
|||
|
||||
struct command_buffer_chunk: public vk::command_buffer
|
||||
{
|
||||
vk::fence* submit_fence = nullptr;
|
||||
VkDevice m_device = VK_NULL_HANDLE;
|
||||
|
||||
atomic_t<bool> pending = { false };
|
||||
u64 eid_tag = 0;
|
||||
u64 reset_id = 0;
|
||||
shared_mutex guard_mutex;
|
||||
|
||||
command_buffer_chunk() = default;
|
||||
|
||||
void init_fence(VkDevice dev)
|
||||
{
|
||||
m_device = dev;
|
||||
submit_fence = new vk::fence(dev);
|
||||
}
|
||||
|
||||
void destroy()
|
||||
{
|
||||
vk::command_buffer::destroy();
|
||||
delete submit_fence;
|
||||
}
|
||||
|
||||
void tag()
|
||||
{
|
||||
eid_tag = vk::get_event_id();
|
||||
|
@ -95,11 +79,10 @@ namespace vk
|
|||
|
||||
void reset()
|
||||
{
|
||||
if (pending)
|
||||
poke();
|
||||
|
||||
if (pending)
|
||||
if (is_pending && !poke())
|
||||
{
|
||||
wait(FRAME_PRESENT_TIMEOUT);
|
||||
}
|
||||
|
||||
++reset_id;
|
||||
CHECK_RESULT(vkResetCommandBuffer(commands, 0));
|
||||
|
@ -109,46 +92,52 @@ namespace vk
|
|||
{
|
||||
reader_lock lock(guard_mutex);
|
||||
|
||||
if (!pending)
|
||||
if (!is_pending)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if (!submit_fence->flushed)
|
||||
if (!m_submit_fence->flushed)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if (vkGetFenceStatus(m_device, submit_fence->handle) == VK_SUCCESS)
|
||||
if (vkGetFenceStatus(pool->get_owner(), m_submit_fence->handle) == VK_SUCCESS)
|
||||
{
|
||||
lock.upgrade();
|
||||
|
||||
if (pending)
|
||||
if (is_pending)
|
||||
{
|
||||
submit_fence->reset();
|
||||
m_submit_fence->reset();
|
||||
vk::on_event_completed(eid_tag);
|
||||
|
||||
pending = false;
|
||||
is_pending = false;
|
||||
eid_tag = 0;
|
||||
}
|
||||
}
|
||||
|
||||
return !pending;
|
||||
return !is_pending;
|
||||
}
|
||||
|
||||
VkResult wait(u64 timeout = 0ull)
|
||||
{
|
||||
reader_lock lock(guard_mutex);
|
||||
|
||||
if (!pending)
|
||||
if (!is_pending)
|
||||
{
|
||||
return VK_SUCCESS;
|
||||
}
|
||||
|
||||
const auto ret = vk::wait_for_fence(submit_fence, timeout);
|
||||
const auto ret = vk::wait_for_fence(m_submit_fence, timeout);
|
||||
|
||||
lock.upgrade();
|
||||
|
||||
if (pending)
|
||||
if (is_pending)
|
||||
{
|
||||
submit_fence->reset();
|
||||
m_submit_fence->reset();
|
||||
vk::on_event_completed(eid_tag);
|
||||
|
||||
pending = false;
|
||||
is_pending = false;
|
||||
eid_tag = 0;
|
||||
}
|
||||
|
||||
|
@ -159,10 +148,12 @@ namespace vk
|
|||
{
|
||||
reader_lock lock(guard_mutex);
|
||||
|
||||
if (!pending)
|
||||
if (!is_pending)
|
||||
{
|
||||
return;
|
||||
}
|
||||
|
||||
submit_fence->wait_flush();
|
||||
m_submit_fence->wait_flush();
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -337,6 +328,59 @@ namespace vk
|
|||
{
|
||||
u32 subdraw_id;
|
||||
};
|
||||
|
||||
template<int Count>
|
||||
class command_buffer_chain
|
||||
{
|
||||
atomic_t<u32> m_current_index = 0;
|
||||
std::array<vk::command_buffer_chunk, VK_MAX_ASYNC_CB_COUNT> m_cb_list;
|
||||
|
||||
public:
|
||||
command_buffer_chain() = default;
|
||||
|
||||
void create(command_pool& pool, vk::command_buffer::access_type_hint access)
|
||||
{
|
||||
for (auto& cb : m_cb_list)
|
||||
{
|
||||
cb.create(pool);
|
||||
cb.access_hint = access;
|
||||
}
|
||||
}
|
||||
|
||||
void destroy()
|
||||
{
|
||||
for (auto& cb : m_cb_list)
|
||||
{
|
||||
cb.destroy();
|
||||
}
|
||||
}
|
||||
|
||||
void poke_all()
|
||||
{
|
||||
for (auto& cb : m_cb_list)
|
||||
{
|
||||
cb.poke();
|
||||
}
|
||||
}
|
||||
|
||||
inline command_buffer_chunk* next()
|
||||
{
|
||||
const auto result_id = ++m_current_index % VK_MAX_ASYNC_CB_COUNT;
|
||||
auto result = &m_cb_list[result_id];
|
||||
|
||||
if (!result->poke())
|
||||
{
|
||||
rsx_log.error("CB chain has run out of free entries!");
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
inline command_buffer_chunk* get()
|
||||
{
|
||||
return &m_cb_list[m_current_index];
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
using namespace vk::vmm_allocation_pool_; // clang workaround.
|
||||
|
@ -416,7 +460,6 @@ private:
|
|||
vk::render_device *m_device;
|
||||
|
||||
//Vulkan internals
|
||||
vk::command_pool m_command_buffer_pool;
|
||||
std::unique_ptr<vk::query_pool_manager> m_occlusion_query_manager;
|
||||
bool m_occlusion_query_active = false;
|
||||
rsx::reports::occlusion_query_info *m_active_query_info = nullptr;
|
||||
|
@ -424,10 +467,10 @@ private:
|
|||
|
||||
shared_mutex m_secondary_cb_guard;
|
||||
vk::command_pool m_secondary_command_buffer_pool;
|
||||
vk::command_buffer m_secondary_command_buffer; //command buffer used for setup operations
|
||||
vk::command_buffer_chain<VK_MAX_ASYNC_CB_COUNT> m_secondary_cb_list;
|
||||
|
||||
u32 m_current_cb_index = 0;
|
||||
std::array<vk::command_buffer_chunk, VK_MAX_ASYNC_CB_COUNT> m_primary_cb_list;
|
||||
vk::command_pool m_command_buffer_pool;
|
||||
vk::command_buffer_chain<VK_MAX_ASYNC_CB_COUNT> m_primary_cb_list;
|
||||
vk::command_buffer_chunk* m_current_command_buffer = nullptr;
|
||||
VkSemaphore m_dangling_semaphore_signal = VK_NULL_HANDLE;
|
||||
|
||||
|
@ -521,7 +564,7 @@ private:
|
|||
|
||||
void flush_command_queue(bool hard_sync = false, bool do_not_switch = false);
|
||||
void queue_swap_request();
|
||||
void frame_context_cleanup(vk::frame_context_t *ctx, bool free_resources = false);
|
||||
void frame_context_cleanup(vk::frame_context_t *ctx);
|
||||
void advance_queued_frames();
|
||||
void present(vk::frame_context_t *ctx);
|
||||
void reinitialize_swapchain();
|
||||
|
|
|
@ -23,8 +23,7 @@ void VKGSRender::reinitialize_swapchain()
|
|||
}
|
||||
|
||||
// NOTE: This operation will create a hard sync point
|
||||
close_and_submit_command_buffer(m_current_command_buffer->submit_fence);
|
||||
m_current_command_buffer->pending = true;
|
||||
close_and_submit_command_buffer();
|
||||
m_current_command_buffer->reset();
|
||||
|
||||
for (auto &ctx : frame_context_storage)
|
||||
|
@ -33,7 +32,7 @@ void VKGSRender::reinitialize_swapchain()
|
|||
continue;
|
||||
|
||||
// Release present image by presenting it
|
||||
frame_context_cleanup(&ctx, true);
|
||||
frame_context_cleanup(&ctx);
|
||||
}
|
||||
|
||||
// Discard the current upscaling pipeline if any
|
||||
|
@ -156,25 +155,21 @@ void VKGSRender::queue_swap_request()
|
|||
if (m_swapchain->is_headless())
|
||||
{
|
||||
m_swapchain->end_frame(*m_current_command_buffer, m_current_frame->present_image);
|
||||
close_and_submit_command_buffer(m_current_command_buffer->submit_fence);
|
||||
close_and_submit_command_buffer();
|
||||
}
|
||||
else
|
||||
{
|
||||
close_and_submit_command_buffer(m_current_command_buffer->submit_fence,
|
||||
close_and_submit_command_buffer(nullptr,
|
||||
m_current_frame->acquire_signal_semaphore,
|
||||
m_current_frame->present_wait_semaphore,
|
||||
VK_PIPELINE_STAGE_COLOR_ATTACHMENT_OUTPUT_BIT | VK_PIPELINE_STAGE_TRANSFER_BIT);
|
||||
}
|
||||
|
||||
// Signal pending state as the command queue is now closed
|
||||
m_current_frame->swap_command_buffer->pending = true;
|
||||
|
||||
// Set up a present request for this frame as well
|
||||
present(m_current_frame);
|
||||
|
||||
// Grab next cb in line and make it usable
|
||||
m_current_cb_index = (m_current_cb_index + 1) % VK_MAX_ASYNC_CB_COUNT;
|
||||
m_current_command_buffer = &m_primary_cb_list[m_current_cb_index];
|
||||
m_current_command_buffer = m_primary_cb_list.next();
|
||||
m_current_command_buffer->reset();
|
||||
m_current_command_buffer->begin();
|
||||
|
||||
|
@ -182,23 +177,19 @@ void VKGSRender::queue_swap_request()
|
|||
advance_queued_frames();
|
||||
}
|
||||
|
||||
void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx, bool free_resources)
|
||||
void VKGSRender::frame_context_cleanup(vk::frame_context_t *ctx)
|
||||
{
|
||||
ensure(ctx->swap_command_buffer);
|
||||
|
||||
if (ctx->swap_command_buffer->pending)
|
||||
// Perform hard swap here
|
||||
if (ctx->swap_command_buffer->wait(FRAME_PRESENT_TIMEOUT) != VK_SUCCESS)
|
||||
{
|
||||
// Perform hard swap here
|
||||
if (ctx->swap_command_buffer->wait(FRAME_PRESENT_TIMEOUT) != VK_SUCCESS)
|
||||
{
|
||||
// Lost surface/device, release swapchain
|
||||
swapchain_unavailable = true;
|
||||
}
|
||||
|
||||
free_resources = true;
|
||||
// Lost surface/device, release swapchain
|
||||
swapchain_unavailable = true;
|
||||
}
|
||||
|
||||
if (free_resources)
|
||||
// Resource cleanup.
|
||||
// TODO: This is some outdated crap.
|
||||
{
|
||||
if (m_text_writer)
|
||||
{
|
||||
|
@ -406,7 +397,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
|
|||
if (m_current_frame->swap_command_buffer)
|
||||
{
|
||||
// Its possible this flip request is triggered by overlays and the flip queue is in undefined state
|
||||
frame_context_cleanup(m_current_frame, true);
|
||||
frame_context_cleanup(m_current_frame);
|
||||
}
|
||||
|
||||
// Swap aux storage and current frame; aux storage should always be ready for use at all times
|
||||
|
@ -422,7 +413,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
|
|||
}
|
||||
|
||||
// There were no draws and back-to-back flips happened
|
||||
frame_context_cleanup(m_current_frame, true);
|
||||
frame_context_cleanup(m_current_frame);
|
||||
}
|
||||
|
||||
if (info.skip_frame || swapchain_unavailable)
|
||||
|
@ -435,7 +426,7 @@ void VKGSRender::flip(const rsx::display_flip_info_t& info)
|
|||
m_current_frame->swap_command_buffer = m_current_command_buffer;
|
||||
flush_command_queue(true);
|
||||
vk::advance_frame_counter();
|
||||
frame_context_cleanup(m_current_frame, true);
|
||||
frame_context_cleanup(m_current_frame);
|
||||
}
|
||||
|
||||
m_frame->flip(m_context);
|
||||
|
|
|
@ -45,7 +45,7 @@ namespace vk
|
|||
return pool;
|
||||
}
|
||||
|
||||
void command_buffer::create(command_pool& cmd_pool, bool auto_reset)
|
||||
void command_buffer::create(command_pool& cmd_pool)
|
||||
{
|
||||
VkCommandBufferAllocateInfo infos = {};
|
||||
infos.sType = VK_STRUCTURE_TYPE_COMMAND_BUFFER_ALLOCATE_INFO;
|
||||
|
@ -54,11 +54,7 @@ namespace vk
|
|||
infos.level = VK_COMMAND_BUFFER_LEVEL_PRIMARY;
|
||||
CHECK_RESULT(vkAllocateCommandBuffers(cmd_pool.get_owner(), &infos, &commands));
|
||||
|
||||
if (auto_reset)
|
||||
{
|
||||
m_submit_fence = new fence(cmd_pool.get_owner());
|
||||
}
|
||||
|
||||
m_submit_fence = new fence(cmd_pool.get_owner());
|
||||
pool = &cmd_pool;
|
||||
}
|
||||
|
||||
|
|
|
@ -63,12 +63,11 @@ namespace vk
|
|||
|
||||
class command_buffer
|
||||
{
|
||||
private:
|
||||
protected:
|
||||
bool is_open = false;
|
||||
bool is_pending = false;
|
||||
fence* m_submit_fence = nullptr;
|
||||
|
||||
protected:
|
||||
command_pool* pool = nullptr;
|
||||
VkCommandBuffer commands = nullptr;
|
||||
|
||||
|
@ -95,7 +94,7 @@ namespace vk
|
|||
command_buffer() = default;
|
||||
~command_buffer() = default;
|
||||
|
||||
void create(command_pool& cmd_pool, bool auto_reset = false);
|
||||
void create(command_pool& cmd_pool);
|
||||
void destroy();
|
||||
|
||||
void begin();
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue