result: see what it looks like to convert kernel to use result conds instead of guards

This commit is contained in:
Michael Scire 2022-01-24 10:33:46 -08:00
commit 4659a5e793
19 changed files with 471 additions and 567 deletions

View file

@ -178,7 +178,7 @@ namespace ams::kern::arch::arm64 {
/* Initialize the base page table. */
MESOSPHERE_R_ABORT_UNLESS(KPageTableBase::InitializeForKernel(true, table, start, end));
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::InitializeForProcess(u32 id, ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KPageTableManager *pt_manager, KResourceLimit *resource_limit) {
@ -187,7 +187,7 @@ namespace ams::kern::arch::arm64 {
/* Get an ASID */
m_asid = g_asid_manager.Reserve();
auto asid_guard = SCOPE_GUARD { g_asid_manager.Release(m_asid); };
ON_RESULT_FAILURE { g_asid_manager.Release(m_asid); };
/* Set our manager. */
m_manager = pt_manager;
@ -196,7 +196,7 @@ namespace ams::kern::arch::arm64 {
const KVirtualAddress new_table = m_manager->Allocate();
R_UNLESS(new_table != Null<KVirtualAddress>, svc::ResultOutOfResource());
m_ttbr = EncodeTtbr(GetPageTablePhysicalAddress(new_table), m_asid);
auto table_guard = SCOPE_GUARD { m_manager->Free(new_table); };
ON_RESULT_FAILURE_2 { m_manager->Free(new_table); };
/* Initialize our base table. */
const size_t as_width = GetAddressSpaceWidth(as_type);
@ -204,13 +204,9 @@ namespace ams::kern::arch::arm64 {
const KProcessAddress as_end = (1ul << as_width);
R_TRY(KPageTableBase::InitializeForProcess(as_type, enable_aslr, enable_das_merge, from_back, pool, GetVoidPointer(new_table), as_start, as_end, code_address, code_size, mem_block_slab_manager, block_info_manager, resource_limit));
/* We succeeded! */
table_guard.Cancel();
asid_guard.Cancel();
/* Note that we've updated the table (since we created it). */
this->NoteUpdated();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::Finalize() {
@ -316,7 +312,7 @@ namespace ams::kern::arch::arm64 {
/* Release our asid. */
g_asid_manager.Release(m_asid);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::OperateImpl(PageLinkedList *page_list, KProcessAddress virt_addr, size_t num_pages, KPhysicalAddress phys_addr, bool is_pa_valid, const KPageProperties properties, OperationType operation, bool reuse_ll) {
@ -334,17 +330,17 @@ namespace ams::kern::arch::arm64 {
}
if (operation == OperationType_Unmap) {
return this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll);
R_RETURN(this->Unmap(virt_addr, num_pages, page_list, false, reuse_ll));
} else {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_Map:
return this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll);
R_RETURN(this->MapContiguous(virt_addr, phys_addr, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
case OperationType_ChangePermissions:
return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll);
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, false, page_list, reuse_ll));
case OperationType_ChangePermissionsAndRefresh:
return this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll);
R_RETURN(this->ChangePermissions(virt_addr, num_pages, entry_template, properties.disable_merge_attributes, true, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -361,7 +357,7 @@ namespace ams::kern::arch::arm64 {
auto entry_template = this->GetEntryTemplate(properties);
switch (operation) {
case OperationType_MapGroup:
return this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll);
R_RETURN(this->MapGroup(virt_addr, page_group, num_pages, entry_template, properties.disable_merge_attributes == DisableMergeAttribute_DisableHead, page_list, reuse_ll));
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
}
}
@ -388,7 +384,7 @@ namespace ams::kern::arch::arm64 {
phys_addr += L1BlockSize;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapL2Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -447,7 +443,7 @@ namespace ams::kern::arch::arm64 {
this->GetPageTableManager().Open(l2_virt, l2_open_count);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapL3Blocks(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -503,7 +499,8 @@ namespace ams::kern::arch::arm64 {
} else if (this->GetPageTableManager().IsInPageTableHeap(l2_virt) && l2_open_count > 0) {
this->GetPageTableManager().Open(l2_virt, l2_open_count);
}
return svc::ResultOutOfResource();
R_THROW(svc::ResultOutOfResource());
}
/* Set the entry. */
@ -551,7 +548,7 @@ namespace ams::kern::arch::arm64 {
this->GetPageTableManager().Open(l3_virt, l3_open_count);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::Unmap(KProcessAddress virt_addr, size_t num_pages, PageLinkedList *page_list, bool force, bool reuse_ll) {
@ -563,13 +560,13 @@ namespace ams::kern::arch::arm64 {
if (!force) {
const size_t size = num_pages * PageSize;
R_TRY(this->SeparatePages(virt_addr, std::min(util::GetAlignment(GetInteger(virt_addr)), size), page_list, reuse_ll));
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
if (num_pages > 1) {
const auto end_page = virt_addr + size;
const auto last_page = end_page - PageSize;
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
merge_guard.Cancel();
}
}
@ -717,7 +714,7 @@ namespace ams::kern::arch::arm64 {
this->NoteUpdated();
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapContiguous(KProcessAddress virt_addr, KPhysicalAddress phys_addr, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -731,7 +728,7 @@ namespace ams::kern::arch::arm64 {
/* Map the pages, using a guard to ensure we don't leak. */
{
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
if (num_pages < ContiguousPageSize / PageSize) {
R_TRY(this->Map(virt_addr, phys_addr, num_pages, entry_template, disable_head_merge && virt_addr == orig_virt_addr, L3BlockSize, page_list, reuse_ll));
@ -778,9 +775,6 @@ namespace ams::kern::arch::arm64 {
}
}
}
/* We successfully mapped, so cancel our guard. */
map_guard.Cancel();
}
/* Perform what coalescing we can. */
@ -794,7 +788,7 @@ namespace ams::kern::arch::arm64 {
Kernel::GetMemoryManager().Open(orig_phys_addr, num_pages);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::MapGroup(KProcessAddress virt_addr, const KPageGroup &pg, size_t num_pages, PageTableEntry entry_template, bool disable_head_merge, PageLinkedList *page_list, bool reuse_ll) {
@ -810,7 +804,7 @@ namespace ams::kern::arch::arm64 {
/* Map the pages, using a guard to ensure we don't leak. */
{
auto map_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(this->Unmap(orig_virt_addr, num_pages, page_list, true, true)); };
if (num_pages < ContiguousPageSize / PageSize) {
for (const auto &block : pg) {
@ -875,9 +869,6 @@ namespace ams::kern::arch::arm64 {
}
}
}
/* We successfully mapped, so cancel our guard. */
map_guard.Cancel();
}
MESOSPHERE_ASSERT(mapped_pages == num_pages);
@ -889,7 +880,7 @@ namespace ams::kern::arch::arm64 {
/* We succeeded! We want to persist the reference to the pages. */
spg.CancelClose();
return ResultSuccess();
R_SUCCEED();
}
bool KPageTable::MergePages(KProcessAddress virt_addr, PageLinkedList *page_list) {
@ -1184,18 +1175,17 @@ namespace ams::kern::arch::arm64 {
}
/* We're done! */
return ResultSuccess();
R_SUCCEED();
}
Result KPageTable::SeparatePages(KProcessAddress virt_addr, size_t block_size, PageLinkedList *page_list, bool reuse_ll) {
MESOSPHERE_ASSERT(this->IsLockedByCurrentThread());
/* Try to separate pages, re-merging if we fail. */
auto guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
R_TRY(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
guard.Cancel();
/* If we fail while separating, re-merge. */
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
return ResultSuccess();
/* Try to separate pages. */
R_RETURN(this->SeparatePagesImpl(virt_addr, block_size, page_list, reuse_ll));
}
Result KPageTable::ChangePermissions(KProcessAddress virt_addr, size_t num_pages, PageTableEntry entry_template, DisableMergeAttribute disable_merge_attr, bool refresh_mapping, PageLinkedList *page_list, bool reuse_ll) {
@ -1208,9 +1198,9 @@ namespace ams::kern::arch::arm64 {
const auto end_page = virt_addr + size;
const auto last_page = end_page - PageSize;
auto merge_guard = SCOPE_GUARD { this->MergePages(virt_addr, page_list); };
ON_RESULT_FAILURE { this->MergePages(virt_addr, page_list); };
R_TRY(this->SeparatePages(last_page, std::min(util::GetAlignment(GetInteger(end_page)), size), page_list, reuse_ll));
merge_guard.Cancel();
}
/* ===================================================== */
@ -1426,7 +1416,7 @@ namespace ams::kern::arch::arm64 {
this->MergePages(virt_addr + (num_pages - 1) * PageSize, page_list);
}
return ResultSuccess();
R_SUCCEED();
}
void KPageTable::FinalizeUpdateImpl(PageLinkedList *page_list) {

View file

@ -788,7 +788,7 @@ namespace ams::kern::board::nintendo::nx {
}
/* Ensure that we clean up the tables on failure. */
auto table_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
for (size_t i = start_index; i <= end_index; ++i) {
if (m_tables[i] != Null<KVirtualAddress> && ptm.Close(m_tables[i], 1)) {
ptm.Free(m_tables[i]);
@ -834,8 +834,7 @@ namespace ams::kern::board::nintendo::nx {
}
/* We succeeded. */
table_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
void KDevicePageTable::Finalize() {
@ -915,14 +914,15 @@ namespace ams::kern::board::nintendo::nx {
if (ReadMcRegister(reg_offset) != new_val) {
WriteMcRegister(reg_offset, old_val);
SmmuSynchronizationBarrier();
return svc::ResultNotFound();
R_THROW(svc::ResultNotFound());
}
}
/* Mark the device as attached. */
m_attached_device |= (1ul << device_name);
return ResultSuccess();
R_SUCCEED();
}
Result KDevicePageTable::Detach(ams::svc::DeviceName device_name) {
@ -962,7 +962,7 @@ namespace ams::kern::board::nintendo::nx {
/* Mark the device as detached. */
m_attached_device &= ~(1ul << device_name);
return ResultSuccess();
R_SUCCEED();
}
bool KDevicePageTable::IsFree(KDeviceVirtualAddress address, u64 size) const {
@ -1112,7 +1112,7 @@ namespace ams::kern::board::nintendo::nx {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KDevicePageTable::MapImpl(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
@ -1120,7 +1120,7 @@ namespace ams::kern::board::nintendo::nx {
R_UNLESS(this->IsFree(device_address, size), svc::ResultInvalidCurrentMemory());
/* Ensure that if we fail, we unmap anything we mapped. */
auto unmap_guard = SCOPE_GUARD { this->UnmapImpl(device_address, size, false); };
ON_RESULT_FAILURE { this->UnmapImpl(device_address, size, false); };
/* Iterate, mapping device pages. */
KDeviceVirtualAddress cur_addr = device_address;
@ -1148,10 +1148,7 @@ namespace ams::kern::board::nintendo::nx {
mapped_size += cur_size;
}
/* We're done, so cancel our guard. */
unmap_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
void KDevicePageTable::UnmapImpl(KDeviceVirtualAddress address, u64 size, bool force) {
@ -1423,7 +1420,7 @@ namespace ams::kern::board::nintendo::nx {
MESOSPHERE_ASSERT(((device_address + size - 1) & ~DeviceVirtualAddressMask) == 0);
/* Map the pages. */
return this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned);
R_RETURN(this->MapImpl(page_table, process_address, size, device_address, device_perm, is_aligned));
}
Result KDevicePageTable::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, KDeviceVirtualAddress device_address) {
@ -1437,7 +1434,7 @@ namespace ams::kern::board::nintendo::nx {
/* Unmap the pages. */
this->UnmapImpl(device_address, size, false);
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -256,7 +256,7 @@ namespace ams::kern::board::nintendo::nx {
g_secure_applet_memory_used = true;
*out = g_secure_applet_memory_address;
return ResultSuccess();
R_SUCCEED();
}
void FreeSecureMemoryForApplet(KVirtualAddress address, size_t size) {
@ -475,7 +475,7 @@ namespace ams::kern::board::nintendo::nx {
R_UNLESS(AMS_LIKELY(util::IsAligned(address, sizeof(u32))), svc::ResultInvalidAddress());
R_UNLESS(AMS_LIKELY(IsRegisterAccessibleToUser(address)), svc::ResultInvalidAddress());
R_UNLESS(AMS_LIKELY(smc::ReadWriteRegister(out, address, mask, value)), svc::ResultInvalidAddress());
return ResultSuccess();
R_SUCCEED();
}
/* Randomness. */
@ -622,7 +622,7 @@ namespace ams::kern::board::nintendo::nx {
Result KSystemControl::AllocateSecureMemory(KVirtualAddress *out, size_t size, u32 pool) {
/* Applet secure memory is handled separately. */
if (pool == KMemoryManager::Pool_Applet) {
return AllocateSecureMemoryForApplet(out, size);
R_RETURN(AllocateSecureMemoryForApplet(out, size));
}
/* Ensure the size is aligned. */
@ -635,7 +635,7 @@ namespace ams::kern::board::nintendo::nx {
R_UNLESS(paddr != Null<KPhysicalAddress>, svc::ResultOutOfMemory());
/* Ensure we don't leak references to the memory on error. */
auto mem_guard = SCOPE_GUARD { Kernel::GetMemoryManager().Close(paddr, num_pages); };
ON_RESULT_FAILURE { Kernel::GetMemoryManager().Close(paddr, num_pages); };
/* If the memory isn't already secure, set it as secure. */
if (pool != KMemoryManager::Pool_System) {
@ -644,9 +644,8 @@ namespace ams::kern::board::nintendo::nx {
}
/* We succeeded. */
mem_guard.Cancel();
*out = KPageTable::GetHeapVirtualAddress(paddr);
return ResultSuccess();
R_SUCCEED();
}
void KSystemControl::FreeSecureMemory(KVirtualAddress address, size_t size, u32 pool) {

View file

@ -77,10 +77,8 @@ namespace ams::kern {
/* Try to allocate a session from unused slab memory. */
session = KSession::CreateFromUnusedSlabMemory();
R_UNLESS(session != nullptr, svc::ResultLimitReached());
ON_RESULT_FAILURE { session->Close(); };
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
auto session_guard = SCOPE_GUARD { session->Close(); };
{
/* We want to add two KSessionRequests to the heap, to prevent request exhaustion. */
for (size_t i = 0; i < 2; ++i) {
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
@ -88,8 +86,6 @@ namespace ams::kern {
request->Close();
}
}
session_guard.Cancel();
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
@ -99,8 +95,9 @@ namespace ams::kern {
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
/* Update the session counts. */
auto count_guard = SCOPE_GUARD { session->Close(); };
{
ON_RESULT_FAILURE { session->Close(); };
/* Atomically increment the number of sessions. */
s32 new_sessions;
{
@ -123,7 +120,6 @@ namespace ams::kern {
} while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
}
}
count_guard.Cancel();
/* Initialize the session. */
session->Initialize(this, m_parent->GetName());
@ -133,7 +129,7 @@ namespace ams::kern {
/* Register the session. */
KSession::Register(session);
auto session_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
session->GetClientSession().Close();
session->GetServerSession().Close();
};
@ -142,9 +138,8 @@ namespace ams::kern {
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
/* We succeeded, so set the output. */
session_guard.Cancel();
*out = std::addressof(session->GetClientSession());
return ResultSuccess();
R_SUCCEED();
}
Result KClientPort::CreateLightSession(KLightClientSession **out) {
@ -175,8 +170,9 @@ namespace ams::kern {
R_UNLESS(session != nullptr, svc::ResultOutOfResource());
/* Update the session counts. */
auto count_guard = SCOPE_GUARD { session->Close(); };
{
ON_RESULT_FAILURE { session->Close(); };
/* Atomically increment the number of sessions. */
s32 new_sessions;
{
@ -199,7 +195,6 @@ namespace ams::kern {
} while (!m_peak_sessions.CompareExchangeWeak<std::memory_order_relaxed>(peak, new_sessions));
}
}
count_guard.Cancel();
/* Initialize the session. */
session->Initialize(this, m_parent->GetName());
@ -209,7 +204,7 @@ namespace ams::kern {
/* Register the session. */
KLightSession::Register(session);
auto session_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
session->GetClientSession().Close();
session->GetServerSession().Close();
};
@ -218,9 +213,8 @@ namespace ams::kern {
R_TRY(m_parent->EnqueueSession(std::addressof(session->GetServerSession())));
/* We succeeded, so set the output. */
session_guard.Cancel();
*out = std::addressof(session->GetClientSession());
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -35,7 +35,7 @@ namespace ams::kern {
m_space_size = size;
m_is_initialized = true;
return ResultSuccess();
R_SUCCEED();
}
void KDeviceAddressSpace::Finalize() {
@ -50,7 +50,7 @@ namespace ams::kern {
KScopedLightLock lk(m_lock);
/* Attach. */
return m_table.Attach(device_name, m_space_address, m_space_size);
R_RETURN(m_table.Attach(device_name, m_space_address, m_space_size));
}
Result KDeviceAddressSpace::Detach(ams::svc::DeviceName device_name) {
@ -58,7 +58,7 @@ namespace ams::kern {
KScopedLightLock lk(m_lock);
/* Detach. */
return m_table.Detach(device_name);
R_RETURN(m_table.Detach(device_name));
}
Result KDeviceAddressSpace::Map(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address, ams::svc::MemoryPermission device_perm, bool is_aligned) {
@ -75,7 +75,7 @@ namespace ams::kern {
R_TRY(page_table->LockForMapDeviceAddressSpace(process_address, size, ConvertToKMemoryPermission(device_perm), is_aligned));
/* Ensure that if we fail, we don't keep unmapped pages locked. */
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size)); };
/* Map the pages. */
{
@ -84,18 +84,14 @@ namespace ams::kern {
/* Ensure that we unmap the pages if we fail to update the protections. */
/* NOTE: Nintendo does not check the result of this unmap call. */
auto map_guard = SCOPE_GUARD { m_table.Unmap(device_address, size); };
ON_RESULT_FAILURE { m_table.Unmap(device_address, size); };
/* Update the protections in accordance with how much we mapped. */
R_TRY(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size));
/* We succeeded, so cancel our guard. */
map_guard.Cancel();
}
/* We succeeded, so we don't need to unlock our pages. */
unlock_guard.Cancel();
return ResultSuccess();
/* We succeeded. */
R_SUCCEED();
}
Result KDeviceAddressSpace::Unmap(KProcessPageTable *page_table, KProcessAddress process_address, size_t size, u64 device_address) {
@ -111,20 +107,19 @@ namespace ams::kern {
/* Lock the pages. */
R_TRY(page_table->LockForUnmapDeviceAddressSpace(process_address, size));
/* If we fail to unmap, we want to do a partial unlock. */
/* Unmap the pages. */
{
auto unlock_guard = SCOPE_GUARD { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size)); };
/* If we fail to unmap, we want to do a partial unlock. */
ON_RESULT_FAILURE { MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpacePartialMap(process_address, size)); };
/* Unmap. */
/* Perform the unmap. */
R_TRY(m_table.Unmap(page_table, process_address, size, device_address));
unlock_guard.Cancel();
}
/* Unlock the pages. */
MESOSPHERE_R_ABORT_UNLESS(page_table->UnlockForDeviceAddressSpace(process_address, size));
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -45,7 +45,7 @@ namespace ams::kern {
/* Mark initialized. */
m_is_initialized = true;
return ResultSuccess();
R_SUCCEED();
}
void KInterruptEvent::Finalize() {
@ -69,7 +69,7 @@ namespace ams::kern {
/* Clear the interrupt. */
Kernel::GetInterruptManager().ClearInterrupt(m_interrupt_id, m_core_id);
return ResultSuccess();
R_SUCCEED();
}
Result KInterruptEventTask::Register(s32 interrupt_id, s32 core_id, bool level, KInterruptEvent *event) {
@ -91,7 +91,7 @@ namespace ams::kern {
}
/* Ensure that the task is cleaned up if anything goes wrong. */
auto task_guard = SCOPE_GUARD { if (allocated) { KInterruptEventTask::Free(task); } };
ON_RESULT_FAILURE { if (allocated) { KInterruptEventTask::Free(task); } };
/* Register/bind the interrupt task. */
{
@ -110,9 +110,7 @@ namespace ams::kern {
g_interrupt_event_task_table[interrupt_id] = task;
}
/* We successfully registered, so we don't need to free the task. */
task_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
void KInterruptEventTask::Unregister(s32 interrupt_id, s32 core_id) {

View file

@ -171,7 +171,7 @@ namespace ams::kern {
manager->InitializeOptimizedMemory();
}
return ResultSuccess();
R_SUCCEED();
}
void KMemoryManager::FinalizeOptimizedMemory(u64 process_id, Pool pool) {
@ -236,7 +236,7 @@ namespace ams::kern {
R_UNLESS(0 <= heap_index, svc::ResultOutOfMemory());
/* Ensure that we don't leave anything un-freed. */
auto group_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
for (const auto &it : *out) {
auto &manager = this->GetManager(it.GetAddress());
const size_t num_pages = std::min(it.GetNumPages(), (manager.GetEndAddress() - it.GetAddress()) / PageSize);
@ -256,12 +256,11 @@ namespace ams::kern {
break;
}
/* Safely add it to our group. */
{
auto block_guard = SCOPE_GUARD { cur_manager->Free(allocated_block, pages_per_alloc); };
/* Ensure we don't leak the block if we fail. */
ON_RESULT_FAILURE { cur_manager->Free(allocated_block, pages_per_alloc); };
/* Add the block to our group. */
R_TRY(out->AddBlock(allocated_block, pages_per_alloc));
block_guard.Cancel();
}
/* Maintain the optimized memory bitmap, if we should. */
if (unoptimized) {
@ -277,8 +276,7 @@ namespace ams::kern {
R_UNLESS(num_pages == 0, svc::ResultOutOfMemory());
/* We succeeded! */
group_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpen(KPageGroup *out, size_t num_pages, u32 option) {
@ -313,7 +311,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KMemoryManager::AllocateAndOpenForProcess(KPageGroup *out, size_t num_pages, u32 option, u64 process_id, u8 fill_pattern) {
@ -419,7 +417,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
size_t KMemoryManager::Impl::Initialize(KPhysicalAddress address, size_t size, KVirtualAddress management, KVirtualAddress management_end, Pool p) {

View file

@ -121,9 +121,7 @@ namespace ams::kern {
m_impl.InitializeForKernel(table, start, end);
/* Initialize our memory block manager. */
return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager);
return ResultSuccess();
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
}
Result KPageTableBase::InitializeForProcess(ams::svc::CreateProcessFlag as_type, bool enable_aslr, bool enable_das_merge, bool from_back, KMemoryManager::Pool pool, void *table, KProcessAddress start, KProcessAddress end, KProcessAddress code_address, size_t code_size, KMemoryBlockSlabManager *mem_block_slab_manager, KBlockInfoManager *block_info_manager, KResourceLimit *resource_limit) {
@ -132,8 +130,6 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(code_address < code_address + code_size);
MESOSPHERE_ABORT_UNLESS(code_address + code_size - 1 <= end - 1);
/* Declare variables to hold our region sizes. */
/* Define helpers. */
auto GetSpaceStart = [&](KAddressSpaceInfo::Type type) ALWAYS_INLINE_LAMBDA {
return KAddressSpaceInfo::GetAddressSpaceStart(m_address_space_width, type);
@ -331,9 +327,7 @@ namespace ams::kern {
m_impl.InitializeForProcess(table, GetInteger(start), GetInteger(end));
/* Initialize our memory block manager. */
return m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager);
return ResultSuccess();
R_RETURN(m_memory_block_manager.Initialize(m_address_space_start, m_address_space_end, m_memory_block_slab_manager));
}
@ -472,7 +466,7 @@ namespace ams::kern {
R_UNLESS((info.m_permission & perm_mask) == perm, svc::ResultInvalidCurrentMemory());
R_UNLESS((info.m_attribute & attr_mask) == attr, svc::ResultInvalidCurrentMemory());
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryStateContiguous(size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) const {
@ -508,7 +502,7 @@ namespace ams::kern {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CheckMemoryState(KMemoryState *out_state, KMemoryPermission *out_perm, KMemoryAttribute *out_attr, size_t *out_blocks_needed, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, u32 ignore_attr) const {
@ -562,7 +556,7 @@ namespace ams::kern {
if (out_blocks_needed != nullptr) {
*out_blocks_needed = blocks_for_start_align + blocks_for_end_align;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::LockMemoryAndOpen(KPageGroup *out_pg, KPhysicalAddress *out_paddr, KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr) {
@ -625,7 +619,7 @@ namespace ams::kern {
out_pg->Open();
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnlockMemory(KProcessAddress addr, size_t size, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr, KMemoryPermission new_perm, u32 lock_attr, const KPageGroup *pg) {
@ -673,7 +667,7 @@ namespace ams::kern {
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::QueryInfoImpl(KMemoryInfo *out_info, ams::svc::PageInfo *out_page, KProcessAddress address) const {
@ -686,7 +680,7 @@ namespace ams::kern {
*out_info = block->GetMemoryInfo();
out_page->flags = 0;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::QueryMappingImpl(KProcessAddress *out, KPhysicalAddress address, size_t size, KMemoryState state) const {
@ -726,7 +720,7 @@ namespace ams::kern {
if (R_SUCCEEDED(this->CheckMemoryState(mapped_address, size, KMemoryState_All, state, KMemoryPermission_UserRead, KMemoryPermission_UserRead, KMemoryAttribute_None, KMemoryAttribute_None))) {
/* It is! */
*out = mapped_address;
return ResultSuccess();
R_SUCCEED();
}
}
@ -756,7 +750,7 @@ namespace ams::kern {
/* We found the region. */
*out = mapped_address;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
@ -803,7 +797,7 @@ namespace ams::kern {
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Ensure that we unprotect the source pages on failure. */
auto unprot_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
const KPageProperties unprotect_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableHeadBodyTail };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
};
@ -812,15 +806,12 @@ namespace ams::kern {
const KPageProperties dst_map_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_map_properties, false));
/* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */
unprot_guard.Cancel();
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_src_perm, new_src_attr, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_Stack, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnmapMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
@ -869,7 +860,7 @@ namespace ams::kern {
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
/* Ensure that we re-map the aliased pages on failure. */
auto remap_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
};
@ -877,15 +868,12 @@ namespace ams::kern {
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */
remap_guard.Cancel();
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
@ -935,7 +923,7 @@ namespace ams::kern {
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* Ensure that we unprotect the source pages on failure. */
auto unprot_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
const KPageProperties unprotect_properties = { src_perm, false, false, DisableMergeAttribute_EnableHeadBodyTail };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, unprotect_properties, OperationType_ChangePermissions, true));
};
@ -944,15 +932,12 @@ namespace ams::kern {
const KPageProperties dst_properties = { new_perm, false, false, DisableMergeAttribute_DisableHead };
R_TRY(this->MapPageGroupImpl(updater.GetPageList(), dst_address, pg, dst_properties, false));
/* We successfully mapped the alias pages, so we don't need to unprotect the src pages on failure. */
unprot_guard.Cancel();
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, src_state, new_perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Locked, KMemoryBlockDisableMergeAttribute_None);
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_AliasCode, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnmapCodeMemory(KProcessAddress dst_address, KProcessAddress src_address, size_t size) {
@ -1034,7 +1019,7 @@ namespace ams::kern {
R_TRY(this->Operate(updater.GetPageList(), dst_address, num_pages, Null<KPhysicalAddress>, false, dst_unmap_properties, OperationType_Unmap, false));
/* Ensure that we re-map the aliased pages on failure. */
auto remap_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
this->RemapPageGroup(updater.GetPageList(), dst_address, size, pg);
};
@ -1042,9 +1027,6 @@ namespace ams::kern {
const KPageProperties src_properties = { KMemoryPermission_UserReadWrite, false, false, DisableMergeAttribute_EnableAndMergeHeadBodyTail };
R_TRY(this->Operate(updater.GetPageList(), src_address, num_pages, Null<KPhysicalAddress>, false, src_properties, OperationType_ChangePermissions, false));
/* We successfully changed the permissions for the source pages, so we don't need to re-map the dst pages on failure. */
remap_guard.Cancel();
/* Apply the memory block updates. */
m_memory_block_manager.Update(std::addressof(dst_allocator), dst_address, num_pages, KMemoryState_None, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
m_memory_block_manager.Update(std::addressof(src_allocator), src_address, num_pages, KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Locked);
@ -1053,7 +1035,7 @@ namespace ams::kern {
reprotected_pages = true;
}
return ResultSuccess();
R_SUCCEED();
}
KProcessAddress KPageTableBase::FindFreeArea(KProcessAddress region_start, size_t region_num_pages, size_t num_pages, size_t alignment, size_t offset, size_t guard_pages) const {
@ -1149,7 +1131,7 @@ namespace ams::kern {
/* Map the pages. */
const KPageProperties properties = { perm, false, false, DisableMergeAttribute_None };
return this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false);
R_RETURN(this->Operate(page_list, address, num_pages, pg, properties, OperationType_MapGroup, false));
}
Result KPageTableBase::MapPageGroupImpl(PageLinkedList *page_list, KProcessAddress address, const KPageGroup &pg, const KPageProperties properties, bool reuse_ll) {
@ -1160,7 +1142,7 @@ namespace ams::kern {
KProcessAddress cur_address = address;
/* Ensure that we clean up on failure. */
auto mapping_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
MESOSPHERE_ABORT_UNLESS(!reuse_ll);
if (cur_address != start_address) {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
@ -1177,8 +1159,7 @@ namespace ams::kern {
}
/* We succeeded! */
mapping_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
void KPageTableBase::RemapPageGroup(PageLinkedList *page_list, KProcessAddress address, size_t size, const KPageGroup &pg) {
@ -1301,7 +1282,7 @@ namespace ams::kern {
R_UNLESS(IsHeapPhysicalAddress(cur_addr), svc::ResultInvalidCurrentMemory());
R_TRY(pg.AddBlock(cur_addr, cur_pages));
return ResultSuccess();
R_SUCCEED();
}
bool KPageTableBase::IsValidPageGroup(const KPageGroup &pg, KProcessAddress addr, size_t num_pages) {
@ -1433,7 +1414,7 @@ namespace ams::kern {
.size = size,
};
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::SetMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
@ -1467,7 +1448,7 @@ namespace ams::kern {
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, new_perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::SetProcessMemoryPermission(KProcessAddress addr, size_t size, ams::svc::MemoryPermission svc_perm) {
@ -1533,7 +1514,7 @@ namespace ams::kern {
cpu::InvalidateEntireInstructionCache();
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::SetMemoryAttribute(KProcessAddress addr, size_t size, u32 mask, u32 attr) {
@ -1573,7 +1554,7 @@ namespace ams::kern {
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, old_state, old_perm, new_attr, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::SetHeapSize(KProcessAddress *out, size_t size) {
@ -1627,11 +1608,11 @@ namespace ams::kern {
/* Set the output. */
*out = m_heap_region_start;
return ResultSuccess();
R_SUCCEED();
} else if (size == static_cast<size_t>(m_current_heap_end - m_heap_region_start)) {
/* The size requested is exactly the current size. */
*out = m_heap_region_start;
return ResultSuccess();
R_SUCCEED();
} else {
/* We have to allocate memory. Determine how much to allocate and where while the table is locked. */
cur_address = m_current_heap_end;
@ -1692,7 +1673,7 @@ namespace ams::kern {
/* Set the output. */
*out = m_heap_region_start;
return ResultSuccess();
R_SUCCEED();
}
}
@ -1705,7 +1686,7 @@ namespace ams::kern {
m_max_heap_size = size;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::QueryInfo(KMemoryInfo *out_info, ams::svc::PageInfo *out_page_info, KProcessAddress addr) const {
@ -1727,12 +1708,12 @@ namespace ams::kern {
};
out_page_info->flags = 0;
return ResultSuccess();
R_SUCCEED();
}
/* Otherwise, lock the table and query. */
KScopedLightLock lk(m_general_lock);
return this->QueryInfoImpl(out_info, out_page_info, addr);
R_RETURN(this->QueryInfoImpl(out_info, out_page_info, addr));
}
Result KPageTableBase::QueryPhysicalAddress(ams::svc::PhysicalMemoryInfo *out, KProcessAddress address) const {
@ -1808,7 +1789,7 @@ namespace ams::kern {
out->physical_address = GetInteger(phys_addr);
out->virtual_address = GetInteger(virt_addr);
out->size = phys_size;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapIoImpl(KProcessAddress *out, PageLinkedList *page_list, KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
@ -1882,7 +1863,7 @@ namespace ams::kern {
/* Set the output address. */
*out = addr;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapIo(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
@ -1905,7 +1886,7 @@ namespace ams::kern {
m_memory_block_manager.Update(std::addressof(allocator), addr, size / PageSize, KMemoryState_Io, perm, KMemoryAttribute_Locked, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size, ams::svc::MemoryMapping mapping, ams::svc::MemoryPermission svc_perm) {
@ -1935,7 +1916,7 @@ namespace ams::kern {
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Io, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnmapIoRegion(KProcessAddress dst_address, KPhysicalAddress phys_addr, size_t size) {
@ -1986,7 +1967,7 @@ namespace ams::kern {
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapStatic(KPhysicalAddress phys_addr, size_t size, KMemoryPermission perm) {
@ -2054,7 +2035,7 @@ namespace ams::kern {
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, KMemoryState_Static, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapRegion(KMemoryRegionType region_type, KMemoryPermission perm) {
@ -2070,7 +2051,7 @@ namespace ams::kern {
R_CONVERT(svc::ResultInvalidAddress, svc::ResultOutOfRange())
} R_END_TRY_CATCH;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapPages(KProcessAddress *out_addr, size_t num_pages, size_t alignment, KPhysicalAddress phys_addr, bool is_pa_valid, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
@ -2111,7 +2092,7 @@ namespace ams::kern {
/* We successfully mapped the pages. */
*out_addr = addr;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapPages(KProcessAddress address, size_t num_pages, KMemoryState state, KMemoryPermission perm) {
@ -2140,7 +2121,7 @@ namespace ams::kern {
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnmapPages(KProcessAddress address, size_t num_pages, KMemoryState state) {
@ -2170,7 +2151,7 @@ namespace ams::kern {
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapPageGroup(KProcessAddress *out_addr, const KPageGroup &pg, KProcessAddress region_start, size_t region_num_pages, KMemoryState state, KMemoryPermission perm) {
@ -2207,7 +2188,7 @@ namespace ams::kern {
/* We successfully mapped the pages. */
*out_addr = addr;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapPageGroup(KProcessAddress addr, const KPageGroup &pg, KMemoryState state, KMemoryPermission perm) {
@ -2241,7 +2222,7 @@ namespace ams::kern {
m_memory_block_manager.Update(std::addressof(allocator), addr, num_pages, state, perm, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_Normal, KMemoryBlockDisableMergeAttribute_None);
/* We successfully mapped the pages. */
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnmapPageGroup(KProcessAddress address, const KPageGroup &pg, KMemoryState state) {
@ -2277,7 +2258,7 @@ namespace ams::kern {
/* Update the blocks. */
m_memory_block_manager.Update(std::addressof(allocator), address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MakeAndOpenPageGroup(KPageGroup *out, KProcessAddress address, size_t num_pages, u32 state_mask, u32 state, u32 perm_mask, u32 perm, u32 attr_mask, u32 attr) {
@ -2300,7 +2281,7 @@ namespace ams::kern {
/* Open a new reference to the pages in the group. */
out->Open();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::InvalidateProcessDataCache(KProcessAddress address, size_t size) {
@ -2367,7 +2348,7 @@ namespace ams::kern {
cpu::InvalidateDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::ReadDebugMemory(void *buffer, KProcessAddress address, size_t size) {
@ -2420,7 +2401,7 @@ namespace ams::kern {
R_UNLESS(UserspaceAccess::CopyMemoryToUser(buffer, copy_src, cur_size), svc::ResultInvalidPointer());
}
return ResultSuccess();
R_SUCCEED();
};
/* Iterate. */
@ -2453,7 +2434,7 @@ namespace ams::kern {
/* Perform copy for the last block. */
R_TRY(PerformCopy());
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::WriteDebugMemory(KProcessAddress address, const void *buffer, size_t size) {
@ -2505,7 +2486,7 @@ namespace ams::kern {
cpu::StoreDataCache(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
}
return ResultSuccess();
R_SUCCEED();
};
/* Iterate. */
@ -2541,7 +2522,7 @@ namespace ams::kern {
/* Invalidate the entire instruction cache, as this svc allows modifying executable pages. */
cpu::InvalidateEntireInstructionCache();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::ReadIoMemoryImpl(void *buffer, KPhysicalAddress phys_addr, size_t size) {
@ -2586,7 +2567,7 @@ namespace ams::kern {
break;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::WriteIoMemoryImpl(KPhysicalAddress phys_addr, const void *buffer, size_t size) {
@ -2631,7 +2612,7 @@ namespace ams::kern {
break;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::ReadDebugIoMemory(void *buffer, KProcessAddress address, size_t size) {
@ -2667,7 +2648,7 @@ namespace ams::kern {
dst += cur_size;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::WriteDebugIoMemory(KProcessAddress address, const void *buffer, size_t size) {
@ -2703,7 +2684,7 @@ namespace ams::kern {
src += cur_size;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::LockForMapDeviceAddressSpace(KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
@ -2727,7 +2708,7 @@ namespace ams::kern {
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::ShareToDevice, KMemoryPermission_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::LockForUnmapDeviceAddressSpace(KProcessAddress address, size_t size) {
@ -2755,7 +2736,7 @@ namespace ams::kern {
const KMemoryBlockManager::MemoryBlockLockFunction lock_func = m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForShare : &KMemoryBlock::UpdateDeviceDisableMergeStateForShareRight;
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, lock_func, KMemoryPermission_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnlockForDeviceAddressSpace(KProcessAddress address, size_t size) {
@ -2782,7 +2763,7 @@ namespace ams::kern {
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, &KMemoryBlock::UnshareToDevice, KMemoryPermission_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnlockForDeviceAddressSpacePartialMap(KProcessAddress address, size_t size) {
@ -2809,7 +2790,7 @@ namespace ams::kern {
/* Update the memory blocks. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), address, num_pages, m_enable_device_address_space_merge ? &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshare : &KMemoryBlock::UpdateDeviceDisableMergeStateForUnshareRight, KMemoryPermission_None);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::OpenMemoryRangeForMapDeviceAddressSpace(KPageTableBase::MemoryRange *out, KProcessAddress address, size_t size, KMemoryPermission perm, bool is_aligned) {
@ -2827,7 +2808,7 @@ namespace ams::kern {
/* We got the range, so open it. */
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::OpenMemoryRangeForUnmapDeviceAddressSpace(MemoryRange *out, KProcessAddress address, size_t size) {
@ -2844,7 +2825,7 @@ namespace ams::kern {
/* We got the range, so open it. */
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::LockForIpcUserBuffer(KPhysicalAddress *out, KProcessAddress address, size_t size) {
@ -2915,7 +2896,7 @@ namespace ams::kern {
/* We got the range, so open it. */
Kernel::GetMemoryManager().Open(out->address, out->size / PageSize);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromLinearToUser(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
@ -2961,7 +2942,7 @@ namespace ams::kern {
R_UNLESS(UserspaceAccess::CopyMemoryToUser(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size), svc::ResultInvalidCurrentMemory());
}
return ResultSuccess();
R_SUCCEED();
};
/* Iterate. */
@ -2995,7 +2976,7 @@ namespace ams::kern {
R_TRY(PerformCopy());
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromLinearToKernel(KProcessAddress dst_addr, size_t size, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
@ -3030,7 +3011,7 @@ namespace ams::kern {
/* Copy the data. */
std::memcpy(GetVoidPointer(dst_addr), GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), cur_size);
return ResultSuccess();
R_SUCCEED();
};
/* Iterate. */
@ -3064,7 +3045,7 @@ namespace ams::kern {
R_TRY(PerformCopy());
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromUserToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
@ -3110,7 +3091,7 @@ namespace ams::kern {
R_UNLESS(UserspaceAccess::CopyMemoryFromUser(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size), svc::ResultInvalidCurrentMemory());
}
return ResultSuccess();
R_SUCCEED();
};
/* Iterate. */
@ -3144,7 +3125,7 @@ namespace ams::kern {
R_TRY(PerformCopy());
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromKernelToLinear(KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr) {
@ -3179,7 +3160,7 @@ namespace ams::kern {
/* Copy the data. */
std::memcpy(GetVoidPointer(GetLinearMappedVirtualAddress(cur_addr)), GetVoidPointer(src_addr), cur_size);
return ResultSuccess();
R_SUCCEED();
};
/* Iterate. */
@ -3213,7 +3194,7 @@ namespace ams::kern {
R_TRY(PerformCopy());
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromHeapToHeap(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
@ -3330,7 +3311,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CopyMemoryFromHeapToHeapWithoutCheckDestination(KPageTableBase &dst_page_table, KProcessAddress dst_addr, size_t size, u32 dst_state_mask, u32 dst_state, KMemoryPermission dst_test_perm, u32 dst_attr_mask, u32 dst_attr, KProcessAddress src_addr, u32 src_state_mask, u32 src_state, KMemoryPermission src_test_perm, u32 src_attr_mask, u32 src_attr) {
@ -3449,7 +3430,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
#pragma GCC push_options
@ -3492,12 +3473,12 @@ namespace ams::kern {
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
default:
return svc::ResultInvalidCombination();
R_THROW(svc::ResultInvalidCombination());
}
/* Ensure that on failure, we roll back appropriately. */
size_t mapped_size = 0;
auto cleanup_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (mapped_size > 0) {
this->CleanupForIpcClientOnServerSetupFailure(page_list, mapping_src_start, mapped_size, src_perm);
}
@ -3547,15 +3528,12 @@ namespace ams::kern {
MESOSPHERE_ABORT_UNLESS(it != m_memory_block_manager.end());
}
/* We succeeded, so no need to cleanup. */
cleanup_guard.Cancel();
if (out_blocks_needed != nullptr) {
MESOSPHERE_ASSERT(blocks_needed <= KMemoryBlockManagerUpdateAllocator::MaxBlocks);
*out_blocks_needed = blocks_needed;
}
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::SetupForIpcServer(KProcessAddress *out_addr, size_t size, KProcessAddress src_addr, KMemoryPermission test_perm, KMemoryState dst_state, KPageTableBase &src_page_table, bool send) {
@ -3621,7 +3599,7 @@ namespace ams::kern {
}
};
auto cleanup_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (cur_mapped_addr != dst_addr) {
const KPageProperties unmap_properties = { KMemoryPermission_None, false, false, DisableMergeAttribute_None };
MESOSPHERE_R_ABORT_UNLESS(this->Operate(updater.GetPageList(), dst_addr, (cur_mapped_addr - dst_addr) / PageSize, Null<KPhysicalAddress>, false, unmap_properties, OperationType_Unmap, true));
@ -3767,9 +3745,8 @@ namespace ams::kern {
*out_addr = dst_addr + (src_start - aligned_src_start);
/* We succeeded. */
cleanup_guard.Cancel();
memory_reservation.Commit();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::SetupForIpc(KProcessAddress *out_dst_addr, size_t size, KProcessAddress src_addr, KPageTableBase &src_page_table, KMemoryPermission test_perm, KMemoryState dst_state, bool send) {
@ -3798,7 +3775,7 @@ namespace ams::kern {
/* Ensure that we clean up appropriately if we fail after this. */
const auto src_perm = static_cast<KMemoryPermission>((test_perm == KMemoryPermission_UserReadWrite) ? KMemoryPermission_KernelReadWrite | KMemoryPermission_NotMapped : KMemoryPermission_UserRead);
auto cleanup_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (src_map_end > src_map_start) {
src_page_table.CleanupForIpcClientOnServerSetupFailure(updater.GetPageList(), src_map_start, src_map_size, src_perm);
}
@ -3813,10 +3790,7 @@ namespace ams::kern {
src_page_table.m_memory_block_manager.UpdateLock(std::addressof(allocator), src_map_start, (src_map_end - src_map_start) / PageSize, &KMemoryBlock::LockForIpc, src_perm);
}
/* We succeeded, so cancel our cleanup guard. */
cleanup_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CleanupForIpcServer(KProcessAddress address, size_t size, KMemoryState dst_state) {
@ -3857,7 +3831,7 @@ namespace ams::kern {
const size_t mapping_size = (mapping_start < mapping_end) ? mapping_end - mapping_start : 0;
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, aligned_size - mapping_size);
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::CleanupForIpcClient(KProcessAddress address, size_t size, KMemoryState dst_state) {
@ -3890,7 +3864,7 @@ namespace ams::kern {
test_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
default:
return svc::ResultInvalidCombination();
R_THROW(svc::ResultInvalidCombination());
}
/* Lock the table. */
@ -3902,7 +3876,7 @@ namespace ams::kern {
/* Ensure that on failure, we roll back appropriately. */
size_t mapped_size = 0;
auto unmap_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (mapped_size > 0) {
/* Determine where the mapping ends. */
const auto mapped_end = GetInteger(mapping_start) + mapped_size;
@ -4040,10 +4014,7 @@ namespace ams::kern {
/* Unlock the pages. */
m_memory_block_manager.UpdateLock(std::addressof(allocator), mapping_start, mapping_size / PageSize, &KMemoryBlock::UnlockForIpc, KMemoryPermission_None);
/* We succeeded, so no need to unmap. */
unmap_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
void KPageTableBase::CleanupForIpcClientOnServerSetupFailure(PageLinkedList *page_list, KProcessAddress address, size_t size, KMemoryPermission prot_perm) {
@ -4237,7 +4208,7 @@ namespace ams::kern {
/* Reset the current tracking address, and make sure we clean up on failure. */
cur_address = address;
auto unmap_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (cur_address > address) {
const KProcessAddress last_unmap_address = cur_address - 1;
@ -4340,10 +4311,7 @@ namespace ams::kern {
KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None,
KMemoryState_Normal, KMemoryPermission_UserReadWrite, KMemoryAttribute_None);
/* Cancel our guard. */
unmap_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
}
}
@ -4479,7 +4447,7 @@ namespace ams::kern {
/* Reset the current tracking address, and make sure we clean up on failure. */
cur_address = address;
auto remap_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (cur_address > address) {
const KProcessAddress last_map_address = cur_address - 1;
cur_address = address;
@ -4574,8 +4542,7 @@ namespace ams::kern {
m_memory_block_manager.Update(std::addressof(allocator), address, size / PageSize, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_None);
/* We succeeded. */
remap_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::MapPhysicalMemoryUnsafe(KProcessAddress address, size_t size) {
@ -4583,7 +4550,7 @@ namespace ams::kern {
R_UNLESS(Kernel::GetUnsafeMemory().TryReserve(size), svc::ResultLimitReached());
/* Ensure we release our reservation on failure. */
auto reserve_guard = SCOPE_GUARD { Kernel::GetUnsafeMemory().Release(size); };
ON_RESULT_FAILURE { Kernel::GetUnsafeMemory().Release(size); };
/* Create a page group for the new memory. */
KPageGroup pg(m_block_info_manager);
@ -4628,8 +4595,7 @@ namespace ams::kern {
m_mapped_unsafe_physical_memory += size;
/* We succeeded. */
reserve_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
}
@ -4666,7 +4632,7 @@ namespace ams::kern {
/* Update our mapped unsafe size. */
m_mapped_unsafe_physical_memory -= size;
return ResultSuccess();
R_SUCCEED();
}
Result KPageTableBase::UnmapProcessMemory(KProcessAddress dst_address, size_t size, KPageTableBase &src_page_table, KProcessAddress src_address) {
@ -4770,7 +4736,7 @@ namespace ams::kern {
/* Apply the memory block update. */
m_memory_block_manager.Update(std::addressof(allocator), dst_address, num_pages, KMemoryState_Free, KMemoryPermission_None, KMemoryAttribute_None, KMemoryBlockDisableMergeAttribute_None, KMemoryBlockDisableMergeAttribute_Normal);
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -79,12 +79,12 @@ namespace ams::kern {
/* Terminate and close the thread. */
ON_SCOPE_EXIT { cur_child->Close(); };
if (Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
return terminate_result;
if (const Result terminate_result = cur_child->Terminate(); svc::ResultTerminationRequested::Includes(terminate_result)) {
R_THROW(terminate_result);
}
}
return ResultSuccess();
R_SUCCEED();
}
class ThreadQueueImplForKProcessEnterUserException final : public KThreadQueue {
@ -259,7 +259,7 @@ namespace ams::kern {
/* We're initialized! */
m_is_initialized = true;
return ResultSuccess();
R_SUCCEED();
}
Result KProcess::Initialize(const ams::svc::CreateProcessParameter &params, const KPageGroup &pg, const u32 *caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool, bool immortal) {
@ -287,7 +287,7 @@ namespace ams::kern {
auto *pt_manager = std::addressof(is_app ? Kernel::GetApplicationPageTableManager() : Kernel::GetSystemPageTableManager());
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager, res_limit));
}
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
ON_RESULT_FAILURE { m_page_table.Finalize(); };
/* Ensure we can insert the code region. */
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
@ -310,8 +310,7 @@ namespace ams::kern {
m_resource_limit->Open();
/* We succeeded! */
pt_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
Result KProcess::Initialize(const ams::svc::CreateProcessParameter &params, svc::KUserPointer<const u32 *> user_caps, s32 num_caps, KResourceLimit *res_limit, KMemoryManager::Pool pool) {
@ -372,7 +371,7 @@ namespace ams::kern {
}
/* Ensure we don't leak any secure memory we allocated. */
auto sys_resource_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (m_system_resource_address != Null<KVirtualAddress>) {
/* Check that we have no outstanding allocations. */
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
@ -397,7 +396,7 @@ namespace ams::kern {
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager, res_limit));
}
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
ON_RESULT_FAILURE_2 { m_page_table.Finalize(); };
/* Ensure we can insert the code region. */
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
@ -424,12 +423,9 @@ namespace ams::kern {
/* Open a reference to the resource limit. */
m_resource_limit->Open();
/* We succeeded, so commit our memory reservation and cancel our guards. */
sys_resource_guard.Cancel();
pt_guard.Cancel();
/* We succeeded, so commit our memory reservation. */
memory_reservation.Commit();
return ResultSuccess();
R_SUCCEED();
}
void KProcess::DoWorkerTaskImpl() {
@ -457,7 +453,7 @@ namespace ams::kern {
};
/* Terminate child threads other than the current one. */
return TerminateChildren(this, GetCurrentThreadPointer());
R_RETURN(TerminateChildren(this, GetCurrentThreadPointer()));
}
void KProcess::FinishTermination() {
@ -556,7 +552,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
@ -590,7 +586,7 @@ namespace ams::kern {
shmem->Open();
info->Open();
return ResultSuccess();
R_SUCCEED();
}
void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
@ -665,14 +661,14 @@ namespace ams::kern {
}
*out = tlr;
return ResultSuccess();
R_SUCCEED();
}
}
/* Allocate a new page. */
tlp = KThreadLocalPage::Allocate();
R_UNLESS(tlp != nullptr, svc::ResultOutOfMemory());
auto tlp_guard = SCOPE_GUARD { KThreadLocalPage::Free(tlp); };
ON_RESULT_FAILURE { KThreadLocalPage::Free(tlp); };
/* Initialize the new page. */
R_TRY(tlp->Initialize(this));
@ -692,9 +688,8 @@ namespace ams::kern {
}
/* We succeeded! */
tlp_guard.Cancel();
*out = tlr;
return ResultSuccess();
R_SUCCEED();
}
Result KProcess::DeleteThreadLocalRegion(KProcessAddress addr) {
@ -742,7 +737,7 @@ namespace ams::kern {
KThreadLocalPage::Free(page_to_free);
}
return ResultSuccess();
R_SUCCEED();
}
void *KProcess::GetThreadLocalRegionPointer(KProcessAddress addr) {
@ -961,7 +956,7 @@ namespace ams::kern {
}
/* Ensure our stack is safe to clean up on exit. */
auto stack_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
if (m_main_thread_stack_size) {
MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack));
m_main_thread_stack_size = 0;
@ -973,7 +968,7 @@ namespace ams::kern {
/* Initialize our handle table. */
R_TRY(this->InitializeHandleTable(m_capabilities.GetHandleTableSize()));
auto ht_guard = SCOPE_GUARD { this->FinalizeHandleTable(); };
ON_RESULT_FAILURE_2 { this->FinalizeHandleTable(); };
/* Create a new thread for the process. */
KThread *main_thread = KThread::Create();
@ -996,7 +991,7 @@ namespace ams::kern {
/* Update our state. */
this->ChangeState((state == State_Created) ? State_Running : State_RunningAttached);
auto state_guard = SCOPE_GUARD { this->ChangeState(state); };
ON_RESULT_FAILURE_2 { this->ChangeState(state); };
/* Run our thread. */
R_TRY(main_thread->Run());
@ -1004,16 +999,13 @@ namespace ams::kern {
/* Open a reference to represent that we're running. */
this->Open();
/* We succeeded! Cancel our guards. */
state_guard.Cancel();
ht_guard.Cancel();
stack_guard.Cancel();
/* We succeeded! Commit our memory reservation. */
mem_reservation.Commit();
/* Note for debug that we're running a new process. */
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
return ResultSuccess();
R_SUCCEED();
}
Result KProcess::Reset() {
@ -1029,7 +1021,7 @@ namespace ams::kern {
/* Clear signaled. */
m_is_signaled = false;
return ResultSuccess();
R_SUCCEED();
}
Result KProcess::SetActivity(ams::svc::ProcessActivity activity) {
@ -1071,7 +1063,7 @@ namespace ams::kern {
this->SetSuspended(false);
}
return ResultSuccess();
R_SUCCEED();
}
void KProcess::PinCurrentThread() {
@ -1145,7 +1137,7 @@ namespace ams::kern {
/* We successfully iterated the list. */
*out_num_threads = count;
return ResultSuccess();
R_SUCCEED();
}
KProcess::State KProcess::SetDebugObject(void *debug_object) {
@ -1343,7 +1335,7 @@ namespace ams::kern {
/* We successfully iterated the list. */
*out_num_processes = count;
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -230,7 +230,7 @@ namespace ams::kern {
}
}
return result;
R_RETURN(result);
}
ALWAYS_INLINE Result ProcessReceiveMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) {
@ -278,7 +278,7 @@ namespace ams::kern {
/* Set the output descriptor. */
dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast<void *>(recv_pointer), recv_size, src_desc.GetIndex()));
return ResultSuccess();
R_SUCCEED();
}
constexpr ALWAYS_INLINE Result GetMapAliasMemoryState(KMemoryState &out, ipc::MessageBuffer::MapAliasDescriptor::Attribute attr) {
@ -286,10 +286,10 @@ namespace ams::kern {
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_Ipc: out = KMemoryState_Ipc; break;
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonSecureIpc: out = KMemoryState_NonSecureIpc; break;
case ipc::MessageBuffer::MapAliasDescriptor::Attribute_NonDeviceIpc: out = KMemoryState_NonDeviceIpc; break;
default: return svc::ResultInvalidCombination();
default: R_THROW(svc::ResultInvalidCombination());
}
return ResultSuccess();
R_SUCCEED();
}
constexpr ALWAYS_INLINE Result GetMapAliasTestStateAndAttributeMask(u32 &out_state, u32 &out_attr_mask, KMemoryState state) {
@ -307,10 +307,10 @@ namespace ams::kern {
out_attr_mask = KMemoryAttribute_Uncached | KMemoryAttribute_Locked;
break;
default:
return svc::ResultInvalidCombination();
R_THROW(svc::ResultInvalidCombination());
}
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE void CleanupSpecialData(KProcess &dst_process, u32 *dst_msg_ptr, size_t dst_buffer_size) {
@ -388,7 +388,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result CleanupServerMap(KSessionRequest *request, KProcess *server_process) {
@ -413,7 +413,7 @@ namespace ams::kern {
R_TRY(server_page_table.CleanupForIpcServer(request->GetExchangeServerAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
}
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result CleanupClientMap(KSessionRequest *request, KProcessPageTable *client_page_table) {
@ -435,7 +435,7 @@ namespace ams::kern {
R_TRY(client_page_table->CleanupForIpcClient(request->GetExchangeClientAddress(i), request->GetExchangeSize(i), request->GetExchangeMemoryState(i)));
}
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result CleanupMap(KSessionRequest *request, KProcess *server_process, KProcessPageTable *client_page_table) {
@ -445,7 +445,7 @@ namespace ams::kern {
/* Cleanup the client map. */
R_TRY(CleanupClientMap(request, client_page_table));
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result ProcessReceiveMessageMapAliasDescriptors(int &offset, KProcessPageTable &dst_page_table, KProcessPageTable &src_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, KSessionRequest *request, KMemoryPermission perm, bool send) {
@ -471,7 +471,7 @@ namespace ams::kern {
R_TRY(dst_page_table.SetupForIpc(std::addressof(dst_address), size, src_address, src_page_table, perm, dst_state, send));
/* Ensure that we clean up on failure. */
auto setup_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
dst_page_table.CleanupForIpcServer(dst_address, size, dst_state);
src_page_table.CleanupForIpcClient(src_address, size, dst_state);
};
@ -484,15 +484,12 @@ namespace ams::kern {
} else {
R_TRY(request->PushReceive(src_address, dst_address, size, dst_state));
}
/* We successfully pushed the mapping. */
setup_guard.Cancel();
}
/* Set the output descriptor. */
dst_msg.Set(cur_offset, ipc::MessageBuffer::MapAliasDescriptor(GetVoidPointer(dst_address), size, src_desc.GetAttribute()));
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result ReceiveMessage(bool &recv_list_broken, uintptr_t dst_message_buffer, size_t dst_buffer_size, KPhysicalAddress dst_message_paddr, KThread &src_thread, uintptr_t src_message_buffer, size_t src_buffer_size, KServerSession *session, KSessionRequest *request) {
@ -579,7 +576,7 @@ namespace ams::kern {
int offset = dst_msg.Set(src_header);
/* Set up a guard to make sure that we end up in a clean state on error. */
auto cleanup_guard = SCOPE_GUARD {
ON_RESULT_FAILURE {
/* Cleanup mappings. */
CleanupMap(request, std::addressof(dst_process), std::addressof(src_page_table));
@ -678,8 +675,7 @@ namespace ams::kern {
}
/* We succeeded! */
cleanup_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result ProcessSendMessageReceiveMapping(KProcessPageTable &dst_page_table, KProcessAddress client_address, KProcessAddress server_address, size_t size, KMemoryState src_state) {
@ -720,7 +716,7 @@ namespace ams::kern {
mapping_src_end));
}
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result ProcessSendMessagePointerDescriptors(int &offset, int &pointer_key, KProcessPageTable &dst_page_table, const ipc::MessageBuffer &dst_msg, const ipc::MessageBuffer &src_msg, const ReceiveList &dst_recv_list, bool dst_user) {
@ -759,7 +755,7 @@ namespace ams::kern {
/* Set the output descriptor. */
dst_msg.Set(cur_offset, ipc::MessageBuffer::PointerDescriptor(reinterpret_cast<void *>(recv_pointer), recv_size, src_desc.GetIndex()));
return ResultSuccess();
R_SUCCEED();
}
ALWAYS_INLINE Result SendMessage(uintptr_t src_message_buffer, size_t src_buffer_size, KPhysicalAddress src_message_paddr, KThread &dst_thread, uintptr_t dst_message_buffer, size_t dst_buffer_size, KServerSession *session, KSessionRequest *request) {
@ -820,8 +816,10 @@ namespace ams::kern {
int pointer_key = 0;
bool processed_special_data = false;
/* Set up a guard to make sure that we end up in a clean state on error. */
auto cleanup_guard = SCOPE_GUARD {
/* Send the message. */
{
/* Make sure that we end up in a clean state on error. */
ON_RESULT_FAILURE {
/* Cleanup special data. */
if (processed_special_data) {
if (src_header.GetHasSpecialHeader()) {
@ -933,11 +931,10 @@ namespace ams::kern {
src_message_buffer + offset_words));
}
}
}
/* We succeeded. Perform cleanup with validation. */
cleanup_guard.Cancel();
return CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table));
/* Perform (and validate) any remaining cleanup. */
R_RETURN(CleanupMap(request, std::addressof(src_process), std::addressof(dst_page_table)));
}
ALWAYS_INLINE void ReplyAsyncError(KProcess *to_process, uintptr_t to_msg_buf, size_t to_msg_buf_size, Result result) {
@ -1065,7 +1062,7 @@ namespace ams::kern {
}
}
return result;
R_RETURN(result);
}
Result KServerSession::SendReply(uintptr_t server_message, uintptr_t server_buffer_size, KPhysicalAddress server_message_paddr) {
@ -1161,7 +1158,7 @@ namespace ams::kern {
}
}
return result;
R_RETURN(result);
}
Result KServerSession::OnRequest(KSessionRequest *request) {
@ -1200,7 +1197,7 @@ namespace ams::kern {
GetCurrentThread().BeginWait(std::addressof(wait_queue));
}
return GetCurrentThread().GetWaitResult();
R_RETURN(GetCurrentThread().GetWaitResult());
}
bool KServerSession::IsSignaledImpl() const {

View file

@ -235,7 +235,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KThread::InitializeThread(KThread *thread, KThreadFunction func, uintptr_t arg, KProcessAddress user_stack_top, s32 prio, s32 core, KProcess *owner, ThreadType type) {
@ -250,23 +250,23 @@ namespace ams::kern {
/* Map the stack page. */
KProcessAddress stack_top = Null<KProcessAddress>;
{
/* If we fail to map, avoid leaking the page. */
ON_RESULT_FAILURE { KPageBuffer::Free(page); };
/* Perform the mapping. */
KProcessAddress stack_bottom = Null<KProcessAddress>;
auto page_guard = SCOPE_GUARD { KPageBuffer::Free(page); };
R_TRY(Kernel::GetKernelPageTable().MapPages(std::addressof(stack_bottom), 1, PageSize, page->GetPhysicalAddress(), stack_region.GetAddress(),
stack_region.GetSize() / PageSize, KMemoryState_Kernel, KMemoryPermission_KernelReadWrite));
page_guard.Cancel();
/* Calculate top of the stack. */
stack_top = stack_bottom + PageSize;
}
/* Initialize the thread. */
auto map_guard = SCOPE_GUARD { CleanupKernelStack(GetInteger(stack_top)); };
R_TRY(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
map_guard.Cancel();
/* If we fail, cleanup the stack we mapped. */
ON_RESULT_FAILURE { CleanupKernelStack(GetInteger(stack_top)); };
return ResultSuccess();
/* Initialize the thread. */
R_RETURN(thread->Initialize(func, arg, GetVoidPointer(stack_top), user_stack_top, prio, core, owner, type));
}
void KThread::PostDestroy(uintptr_t arg) {
@ -576,7 +576,7 @@ namespace ams::kern {
*out_affinity_mask = m_virtual_affinity_mask;
}
return ResultSuccess();
R_SUCCEED();
}
Result KThread::GetPhysicalCoreMask(int32_t *out_ideal_core, u64 *out_affinity_mask) {
@ -595,7 +595,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
Result KThread::SetCoreMask(int32_t core_id, u64 v_affinity_mask) {
@ -700,7 +700,7 @@ namespace ams::kern {
} while (retry_update);
}
return ResultSuccess();
R_SUCCEED();
}
void KThread::SetBasePriority(s32 priority) {
@ -752,7 +752,7 @@ namespace ams::kern {
m_base_priority = IdleThreadPriority;
KScheduler::OnThreadPriorityChanged(this, old_priority);
return ResultSuccess();
R_SUCCEED();
}
void KThread::RequestSuspend(SuspendType type) {
@ -923,7 +923,7 @@ namespace ams::kern {
} while (thread_is_current);
}
return ResultSuccess();
R_SUCCEED();
}
Result KThread::GetThreadContext3(ams::svc::ThreadContext *out) {
@ -944,7 +944,7 @@ namespace ams::kern {
}
}
return ResultSuccess();
R_SUCCEED();
}
void KThread::AddWaiterImpl(KThread *thread) {
@ -1121,7 +1121,7 @@ namespace ams::kern {
/* Set our state and finish. */
this->SetState(KThread::ThreadState_Runnable);
return ResultSuccess();
R_SUCCEED();
}
}
@ -1165,15 +1165,15 @@ namespace ams::kern {
MESOSPHERE_ASSERT_THIS();
MESOSPHERE_ASSERT(this != GetCurrentThreadPointer());
/* Request the thread terminate. */
/* Request the thread terminate if it hasn't already. */
if (const auto new_state = this->RequestTerminate(); new_state != ThreadState_Terminated) {
/* If the thread isn't terminated, wait for it to terminate. */
s32 index;
KSynchronizationObject *objects[] = { this };
return KSynchronizationObject::Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite);
} else {
return ResultSuccess();
R_TRY(KSynchronizationObject::Wait(std::addressof(index), objects, 1, ams::svc::WaitInfinite));
}
R_SUCCEED();
}
KThread::ThreadState KThread::RequestTerminate() {
@ -1248,7 +1248,7 @@ namespace ams::kern {
/* Check if the thread should terminate. */
if (this->IsTerminationRequested()) {
slp.CancelSleep();
return svc::ResultTerminationRequested();
R_THROW(svc::ResultTerminationRequested());
}
/* Wait for the sleep to end. */
@ -1256,7 +1256,7 @@ namespace ams::kern {
this->BeginWait(std::addressof(wait_queue));
}
return ResultSuccess();
R_SUCCEED();
}
void KThread::BeginWait(KThreadQueue *queue) {
@ -1357,7 +1357,7 @@ namespace ams::kern {
/* We successfully iterated the list. */
*out_num_threads = count;
return ResultSuccess();
R_SUCCEED();
}
}

View file

@ -26,14 +26,10 @@ namespace ams::kern {
/* Allocate a new page. */
KPageBuffer *page_buf = KPageBuffer::Allocate();
R_UNLESS(page_buf != nullptr, svc::ResultOutOfMemory());
auto page_buf_guard = SCOPE_GUARD { KPageBuffer::Free(page_buf); };
ON_RESULT_FAILURE { KPageBuffer::Free(page_buf); };
/* Map the address in. */
R_TRY(m_owner->GetPageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite));
/* We succeeded. */
page_buf_guard.Cancel();
return ResultSuccess();
R_RETURN(m_owner->GetPageTable().MapPages(std::addressof(m_virt_addr), 1, PageSize, page_buf->GetPhysicalAddress(), KMemoryState_ThreadLocal, KMemoryPermission_UserReadWrite));
}
Result KThreadLocalPage::Finalize() {
@ -48,7 +44,7 @@ namespace ams::kern {
/* Free the page. */
KPageBuffer::Free(KPageBuffer::FromPhysicalAddress(phys_addr));
return ResultSuccess();
R_SUCCEED();
}
KProcessAddress KThreadLocalPage::Reserve() {

View file

@ -29,7 +29,7 @@ namespace ams::kern::svc {
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
R_UNLESS(event.IsNotNull(), svc::ResultInvalidHandle());
return event->Signal();
R_RETURN(event->Signal());
}
Result ClearEvent(ams::svc::Handle event_handle) {
@ -40,7 +40,7 @@ namespace ams::kern::svc {
{
KScopedAutoObject event = handle_table.GetObject<KEvent>(event_handle);
if (event.IsNotNull()) {
return event->Clear();
R_RETURN(event->Clear());
}
}
@ -49,14 +49,14 @@ namespace ams::kern::svc {
KScopedAutoObject readable_event = handle_table.GetObject<KReadableEvent>(event_handle);
if (readable_event.IsNotNull()) {
if (auto * const interrupt_event = readable_event->DynamicCast<KInterruptEvent *>(); interrupt_event != nullptr) {
return interrupt_event->Clear();
R_RETURN(interrupt_event->Clear());
} else {
return readable_event->Clear();
R_RETURN(readable_event->Clear());
}
}
}
return svc::ResultInvalidHandle();
R_THROW(svc::ResultInvalidHandle());
}
Result CreateEvent(ams::svc::Handle *out_write, ams::svc::Handle *out_read) {
@ -107,14 +107,10 @@ namespace ams::kern::svc {
R_TRY(handle_table.Add(out_write, event));
/* Ensure that we maintaing a clean handle state on exit. */
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_write); };
ON_RESULT_FAILURE { handle_table.Remove(*out_write); };
/* Add the readable event to the handle table. */
R_TRY(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
/* We succeeded! */
handle_guard.Cancel();
return ResultSuccess();
R_RETURN(handle_table.Add(out_read, std::addressof(event->GetReadableEvent())));
}
}
@ -122,29 +118,29 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result SignalEvent64(ams::svc::Handle event_handle) {
return SignalEvent(event_handle);
R_RETURN(SignalEvent(event_handle));
}
Result ClearEvent64(ams::svc::Handle event_handle) {
return ClearEvent(event_handle);
R_RETURN(ClearEvent(event_handle));
}
Result CreateEvent64(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) {
return CreateEvent(out_write_handle, out_read_handle);
R_RETURN(CreateEvent(out_write_handle, out_read_handle));
}
/* ============================= 64From32 ABI ============================= */
Result SignalEvent64From32(ams::svc::Handle event_handle) {
return SignalEvent(event_handle);
R_RETURN(SignalEvent(event_handle));
}
Result ClearEvent64From32(ams::svc::Handle event_handle) {
return ClearEvent(event_handle);
R_RETURN(ClearEvent(event_handle));
}
Result CreateEvent64From32(ams::svc::Handle *out_write_handle, ams::svc::Handle *out_read_handle) {
return CreateEvent(out_write_handle, out_read_handle);
R_RETURN(CreateEvent(out_write_handle, out_read_handle));
}
}

View file

@ -34,7 +34,7 @@ namespace ams::kern::svc {
MESOSPHERE_ASSERT(parent.IsNotNull());
/* Send the request. */
return session->SendSyncRequest(message, buffer_size);
R_RETURN(session->SendSyncRequest(message, buffer_size));
}
ALWAYS_INLINE Result ReplyAndReceiveImpl(int32_t *out_index, uintptr_t message, size_t buffer_size, KPhysicalAddress message_paddr, KSynchronizationObject **objs, int32_t num_objects, ams::svc::Handle reply_target, int64_t timeout_ns) {
@ -44,13 +44,10 @@ namespace ams::kern::svc {
R_UNLESS(session.IsNotNull(), svc::ResultInvalidHandle());
/* If we fail to reply, we want to set the output index to -1. */
auto reply_idx_guard = SCOPE_GUARD { *out_index = -1; };
ON_RESULT_FAILURE { *out_index = -1; };
/* Send the reply. */
R_TRY(session->SendReply(message, buffer_size, message_paddr));
/* Cancel our guard. */
reply_idx_guard.Cancel();
}
/* Receive a message. */
@ -81,7 +78,7 @@ namespace ams::kern::svc {
s32 index;
Result result = KSynchronizationObject::Wait(std::addressof(index), objs, num_objects, timeout);
if (svc::ResultTimedOut::Includes(result)) {
return result;
R_THROW(result);
}
/* Receive the request. */
@ -96,7 +93,7 @@ namespace ams::kern::svc {
}
*out_index = index;
return result;
R_RETURN(result);
}
}
}
@ -129,11 +126,11 @@ namespace ams::kern::svc {
}
};
return ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, objs, num_handles, reply_target, timeout_ns);
R_RETURN(ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, objs, num_handles, reply_target, timeout_ns));
}
ALWAYS_INLINE Result SendSyncRequest(ams::svc::Handle session_handle) {
return SendSyncRequestImpl(0, 0, session_handle);
R_RETURN(SendSyncRequestImpl(0, 0, session_handle));
}
ALWAYS_INLINE Result SendSyncRequestWithUserBuffer(uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) {
@ -149,16 +146,17 @@ namespace ams::kern::svc {
/* Lock the mesage buffer. */
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
/* Ensure that even if we fail, we unlock the message buffer when done. */
auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
{
/* If we fail to send the message, unlock the message buffer. */
ON_RESULT_FAILURE { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
/* Send the request. */
MESOSPHERE_ASSERT(message != 0);
R_TRY(SendSyncRequestImpl(message, buffer_size, session_handle));
}
/* We sent the request successfully, so cancel our guard and check the unlock result. */
unlock_guard.Cancel();
return page_table.UnlockForIpcUserBuffer(message, buffer_size);
/* We successfully processed, so try to unlock the message buffer. */
R_RETURN(page_table.UnlockForIpcUserBuffer(message, buffer_size));
}
ALWAYS_INLINE Result SendAsyncRequestWithUserBufferImpl(ams::svc::Handle *out_event_handle, uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) {
@ -201,14 +199,10 @@ namespace ams::kern::svc {
R_TRY(handle_table.Add(out_event_handle, std::addressof(event->GetReadableEvent())));
/* Ensure that if we fail to send the request, we close the readable handle. */
auto read_guard = SCOPE_GUARD { handle_table.Remove(*out_event_handle); };
ON_RESULT_FAILURE { handle_table.Remove(*out_event_handle); };
/* Send the async request. */
R_TRY(session->SendAsyncRequest(event, message, buffer_size));
/* We succeeded. */
read_guard.Cancel();
return ResultSuccess();
R_RETURN(session->SendAsyncRequest(event, message, buffer_size));
}
ALWAYS_INLINE Result SendAsyncRequestWithUserBuffer(ams::svc::Handle *out_event_handle, uintptr_t message, size_t buffer_size, ams::svc::Handle session_handle) {
@ -224,23 +218,18 @@ namespace ams::kern::svc {
/* Lock the mesage buffer. */
R_TRY(page_table.LockForIpcUserBuffer(nullptr, message, buffer_size));
/* Ensure that if we fail, we unlock the message buffer. */
auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
/* Ensure that if we fail and aren't terminating that we unlock the user buffer. */
ON_RESULT_FAILURE_BESIDES(svc::ResultTerminationRequested) {
page_table.UnlockForIpcUserBuffer(message, buffer_size);
};
/* Send the request. */
MESOSPHERE_ASSERT(message != 0);
const Result result = SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle);
/* If the request succeeds (or the thread is terminating), don't unlock the user buffer. */
if (R_SUCCEEDED(result) || svc::ResultTerminationRequested::Includes(result)) {
unlock_guard.Cancel();
}
return result;
R_RETURN(SendAsyncRequestWithUserBufferImpl(out_event_handle, message, buffer_size, session_handle));
}
ALWAYS_INLINE Result ReplyAndReceive(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
return ReplyAndReceiveImpl(out_index, 0, 0, Null<KPhysicalAddress>, handles, num_handles, reply_target, timeout_ns);
R_RETURN(ReplyAndReceiveImpl(out_index, 0, 0, Null<KPhysicalAddress>, handles, num_handles, reply_target, timeout_ns));
}
ALWAYS_INLINE Result ReplyAndReceiveWithUserBuffer(int32_t *out_index, uintptr_t message, size_t buffer_size, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
@ -257,16 +246,17 @@ namespace ams::kern::svc {
KPhysicalAddress message_paddr;
R_TRY(page_table.LockForIpcUserBuffer(std::addressof(message_paddr), message, buffer_size));
/* Ensure that even if we fail, we unlock the message buffer when done. */
auto unlock_guard = SCOPE_GUARD { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
{
/* If we fail to send the message, unlock the message buffer. */
ON_RESULT_FAILURE { page_table.UnlockForIpcUserBuffer(message, buffer_size); };
/* Send the request. */
/* Reply/Receive the request. */
MESOSPHERE_ASSERT(message != 0);
R_TRY(ReplyAndReceiveImpl(out_index, message, buffer_size, message_paddr, handles, num_handles, reply_target, timeout_ns));
}
/* We sent the request successfully, so cancel our guard and check the unlock result. */
unlock_guard.Cancel();
return page_table.UnlockForIpcUserBuffer(message, buffer_size);
/* We successfully processed, so try to unlock the message buffer. */
R_RETURN(page_table.UnlockForIpcUserBuffer(message, buffer_size));
}
}
@ -274,45 +264,45 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result SendSyncRequest64(ams::svc::Handle session_handle) {
return SendSyncRequest(session_handle);
R_RETURN(SendSyncRequest(session_handle));
}
Result SendSyncRequestWithUserBuffer64(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
return SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle);
R_RETURN(SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle));
}
Result SendAsyncRequestWithUserBuffer64(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
return SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle);
R_RETURN(SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle));
}
Result ReplyAndReceive64(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
return ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns);
R_RETURN(ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns));
}
Result ReplyAndReceiveWithUserBuffer64(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
return ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
R_RETURN(ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns));
}
/* ============================= 64From32 ABI ============================= */
Result SendSyncRequest64From32(ams::svc::Handle session_handle) {
return SendSyncRequest(session_handle);
R_RETURN(SendSyncRequest(session_handle));
}
Result SendSyncRequestWithUserBuffer64From32(ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
return SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle);
R_RETURN(SendSyncRequestWithUserBuffer(message_buffer, message_buffer_size, session_handle));
}
Result SendAsyncRequestWithUserBuffer64From32(ams::svc::Handle *out_event_handle, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, ams::svc::Handle session_handle) {
return SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle);
R_RETURN(SendAsyncRequestWithUserBuffer(out_event_handle, message_buffer, message_buffer_size, session_handle));
}
Result ReplyAndReceive64From32(int32_t *out_index, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
return ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns);
R_RETURN(ReplyAndReceive(out_index, handles, num_handles, reply_target, timeout_ns));
}
Result ReplyAndReceiveWithUserBuffer64From32(int32_t *out_index, ams::svc::Address message_buffer, ams::svc::Size message_buffer_size, KUserPointer<const ams::svc::Handle *> handles, int32_t num_handles, ams::svc::Handle reply_target, int64_t timeout_ns) {
return ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns);
R_RETURN(ReplyAndReceiveWithUserBuffer(out_index, message_buffer, message_buffer_size, handles, num_handles, reply_target, timeout_ns));
}
}

View file

@ -52,13 +52,10 @@ namespace ams::kern::svc {
/* Register the handle in the table. */
R_TRY(handle_table.Add(out_server_handle, std::addressof(port->GetServerPort())));
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_server_handle); };
ON_RESULT_FAILURE { handle_table.Remove(*out_server_handle); };
/* Create a new object name. */
R_TRY(KObjectName::NewFromName(std::addressof(port->GetClientPort()), name));
/* We succeeded, so don't leak the handle. */
handle_guard.Cancel();
} else /* if (max_sessions == 0) */ {
/* Ensure that this else case is correct. */
MESOSPHERE_AUDIT(max_sessions == 0);
@ -70,7 +67,7 @@ namespace ams::kern::svc {
R_TRY(KObjectName::Delete<KClientPort>(name));
}
return ResultSuccess();
R_SUCCEED();
}
Result CreatePort(ams::svc::Handle *out_server, ams::svc::Handle *out_client, int32_t max_sessions, bool is_light, uintptr_t name) {
@ -100,14 +97,10 @@ namespace ams::kern::svc {
R_TRY(handle_table.Add(out_client, std::addressof(port->GetClientPort())));
/* Ensure that we maintaing a clean handle state on exit. */
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_client); };
ON_RESULT_FAILURE { handle_table.Remove(*out_client); };
/* Add the server to the handle table. */
R_TRY(handle_table.Add(out_server, std::addressof(port->GetServerPort())));
/* We succeeded! */
handle_guard.Cancel();
return ResultSuccess();
R_RETURN(handle_table.Add(out_server, std::addressof(port->GetServerPort())));
}
Result ConnectToNamedPort(ams::svc::Handle *out, KUserPointer<const char *> user_name) {
@ -128,7 +121,7 @@ namespace ams::kern::svc {
/* Reserve a handle for the port. */
/* NOTE: Nintendo really does write directly to the output handle here. */
R_TRY(handle_table.Reserve(out));
auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); };
ON_RESULT_FAILURE { handle_table.Unreserve(*out); };
/* Create a session. */
KClientSession *session;
@ -139,8 +132,7 @@ namespace ams::kern::svc {
session->Close();
/* We succeeded. */
handle_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
Result ConnectToPort(ams::svc::Handle *out, ams::svc::Handle port) {
@ -154,7 +146,7 @@ namespace ams::kern::svc {
/* Reserve a handle for the port. */
/* NOTE: Nintendo really does write directly to the output handle here. */
R_TRY(handle_table.Reserve(out));
auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); };
ON_RESULT_FAILURE { handle_table.Unreserve(*out); };
/* Create the session. */
KAutoObject *session;
@ -169,8 +161,7 @@ namespace ams::kern::svc {
session->Close();
/* We succeeded. */
handle_guard.Cancel();
return ResultSuccess();
R_SUCCEED();
}
}
@ -178,37 +169,37 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result ConnectToNamedPort64(ams::svc::Handle *out_handle, KUserPointer<const char *> name) {
return ConnectToNamedPort(out_handle, name);
R_RETURN(ConnectToNamedPort(out_handle, name));
}
Result CreatePort64(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) {
return CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name);
R_RETURN(CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name));
}
Result ManageNamedPort64(ams::svc::Handle *out_server_handle, KUserPointer<const char *> name, int32_t max_sessions) {
return ManageNamedPort(out_server_handle, name, max_sessions);
R_RETURN(ManageNamedPort(out_server_handle, name, max_sessions));
}
Result ConnectToPort64(ams::svc::Handle *out_handle, ams::svc::Handle port) {
return ConnectToPort(out_handle, port);
R_RETURN(ConnectToPort(out_handle, port));
}
/* ============================= 64From32 ABI ============================= */
Result ConnectToNamedPort64From32(ams::svc::Handle *out_handle, KUserPointer<const char *> name) {
return ConnectToNamedPort(out_handle, name);
R_RETURN(ConnectToNamedPort(out_handle, name));
}
Result CreatePort64From32(ams::svc::Handle *out_server_handle, ams::svc::Handle *out_client_handle, int32_t max_sessions, bool is_light, ams::svc::Address name) {
return CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name);
R_RETURN(CreatePort(out_server_handle, out_client_handle, max_sessions, is_light, name));
}
Result ManageNamedPort64From32(ams::svc::Handle *out_server_handle, KUserPointer<const char *> name, int32_t max_sessions) {
return ManageNamedPort(out_server_handle, name, max_sessions);
R_RETURN(ManageNamedPort(out_server_handle, name, max_sessions));
}
Result ConnectToPort64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) {
return ConnectToPort(out_handle, port);
R_RETURN(ConnectToPort(out_handle, port));
}
}

View file

@ -43,13 +43,11 @@ namespace ams::kern::svc {
/* Try to allocate a session from unused slab memory. */
session = T::CreateFromUnusedSlabMemory();
R_UNLESS(session != nullptr, svc::ResultLimitReached());
ON_RESULT_FAILURE { session->Close(); };
/* If we're creating a KSession, we want to add two KSessionRequests to the heap, to prevent request exhaustion. */
/* NOTE: Nintendo checks if session->DynamicCast<KSession *>() != nullptr, but there's no reason to not do this statically. */
if constexpr (std::same_as<T, KSession>) {
/* Ensure that if we fail to allocate our session requests, we close the session we created. */
auto session_guard = SCOPE_GUARD { session->Close(); };
{
for (size_t i = 0; i < 2; ++i) {
KSessionRequest *request = KSessionRequest::CreateFromUnusedSlabMemory();
R_UNLESS(request != nullptr, svc::ResultLimitReached());
@ -57,8 +55,6 @@ namespace ams::kern::svc {
request->Close();
}
}
session_guard.Cancel();
}
/* We successfully allocated a session, so add the object we allocated to the resource limit. */
Kernel::GetSystemResourceLimit().Add(ams::svc::LimitableResource_SessionCountMax, 1);
@ -86,21 +82,17 @@ namespace ams::kern::svc {
R_TRY(handle_table.Add(out_server, std::addressof(session->GetServerSession())));
/* Ensure that we maintaing a clean handle state on exit. */
auto handle_guard = SCOPE_GUARD { handle_table.Remove(*out_server); };
ON_RESULT_FAILURE { handle_table.Remove(*out_server); };
/* Add the client session to the handle table. */
R_TRY(handle_table.Add(out_client, std::addressof(session->GetClientSession())));
/* We succeeded! */
handle_guard.Cancel();
return ResultSuccess();
R_RETURN(handle_table.Add(out_client, std::addressof(session->GetClientSession())));
}
Result CreateSession(ams::svc::Handle *out_server, ams::svc::Handle *out_client, bool is_light, uintptr_t name) {
if (is_light) {
return CreateSession<KLightSession>(out_server, out_client, name);
R_RETURN(CreateSession<KLightSession>(out_server, out_client, name));
} else {
return CreateSession<KSession>(out_server, out_client, name);
R_RETURN(CreateSession<KSession>(out_server, out_client, name));
}
}
@ -114,7 +106,7 @@ namespace ams::kern::svc {
/* Reserve an entry for the new session. */
R_TRY(handle_table.Reserve(out));
auto handle_guard = SCOPE_GUARD { handle_table.Unreserve(*out); };
ON_RESULT_FAILURE { handle_table.Unreserve(*out); };
/* Accept the session. */
KAutoObject *session;
@ -129,10 +121,9 @@ namespace ams::kern::svc {
/* Register the session. */
handle_table.Register(*out, session);
handle_guard.Cancel();
session->Close();
return ResultSuccess();
R_SUCCEED();
}
}
@ -140,21 +131,21 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result CreateSession64(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) {
return CreateSession(out_server_session_handle, out_client_session_handle, is_light, name);
R_RETURN(CreateSession(out_server_session_handle, out_client_session_handle, is_light, name));
}
Result AcceptSession64(ams::svc::Handle *out_handle, ams::svc::Handle port) {
return AcceptSession(out_handle, port);
R_RETURN(AcceptSession(out_handle, port));
}
/* ============================= 64From32 ABI ============================= */
Result CreateSession64From32(ams::svc::Handle *out_server_session_handle, ams::svc::Handle *out_client_session_handle, bool is_light, ams::svc::Address name) {
return CreateSession(out_server_session_handle, out_client_session_handle, is_light, name);
R_RETURN(CreateSession(out_server_session_handle, out_client_session_handle, is_light, name));
}
Result AcceptSession64From32(ams::svc::Handle *out_handle, ams::svc::Handle port) {
return AcceptSession(out_handle, port);
R_RETURN(AcceptSession(out_handle, port));
}
}

View file

@ -60,14 +60,10 @@ namespace ams::kern::svc {
R_TRY(process.AddSharedMemory(shmem.GetPointerUnsafe(), address, size));
/* Ensure that we clean up the shared memory if we fail to map it. */
auto guard = SCOPE_GUARD { process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); };
ON_RESULT_FAILURE { process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size); };
/* Map the shared memory. */
R_TRY(shmem->Map(std::addressof(page_table), address, size, std::addressof(process), map_perm));
/* We succeeded. */
guard.Cancel();
return ResultSuccess();
R_RETURN(shmem->Map(std::addressof(page_table), address, size, std::addressof(process), map_perm));
}
Result UnmapSharedMemory(ams::svc::Handle shmem_handle, uintptr_t address, size_t size) {
@ -94,7 +90,7 @@ namespace ams::kern::svc {
/* Remove the shared memory from the process. */
process.RemoveSharedMemory(shmem.GetPointerUnsafe(), address, size);
return ResultSuccess();
R_SUCCEED();
}
Result CreateSharedMemory(ams::svc::Handle *out, size_t size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) {
@ -122,7 +118,7 @@ namespace ams::kern::svc {
/* Add the shared memory to the handle table. */
R_TRY(GetCurrentProcess().GetHandleTable().Add(out, shmem));
return ResultSuccess();
R_SUCCEED();
}
}
@ -130,29 +126,29 @@ namespace ams::kern::svc {
/* ============================= 64 ABI ============================= */
Result MapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) {
return MapSharedMemory(shmem_handle, address, size, map_perm);
R_RETURN(MapSharedMemory(shmem_handle, address, size, map_perm));
}
Result UnmapSharedMemory64(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) {
return UnmapSharedMemory(shmem_handle, address, size);
R_RETURN(UnmapSharedMemory(shmem_handle, address, size));
}
Result CreateSharedMemory64(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) {
return CreateSharedMemory(out_handle, size, owner_perm, remote_perm);
R_RETURN(CreateSharedMemory(out_handle, size, owner_perm, remote_perm));
}
/* ============================= 64From32 ABI ============================= */
Result MapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size, ams::svc::MemoryPermission map_perm) {
return MapSharedMemory(shmem_handle, address, size, map_perm);
R_RETURN(MapSharedMemory(shmem_handle, address, size, map_perm));
}
Result UnmapSharedMemory64From32(ams::svc::Handle shmem_handle, ams::svc::Address address, ams::svc::Size size) {
return UnmapSharedMemory(shmem_handle, address, size);
R_RETURN(UnmapSharedMemory(shmem_handle, address, size));
}
Result CreateSharedMemory64From32(ams::svc::Handle *out_handle, ams::svc::Size size, ams::svc::MemoryPermission owner_perm, ams::svc::MemoryPermission remote_perm) {
return CreateSharedMemory(out_handle, size, owner_perm, remote_perm);
R_RETURN(CreateSharedMemory(out_handle, size, owner_perm, remote_perm));
}
}

View file

@ -484,9 +484,6 @@ namespace ams::ncm {
/* Mark storage id to be committed later. */
commit_list.Push(reader.GetStorageId());
/* We successfully commited this meta, so we want to check for errors when updating. */
update_guard.Cancel();
}
/* Try to update our data. */
@ -724,7 +721,6 @@ namespace ams::ncm {
R_TRY(this->WritePlaceHolder(meta_info.key, out_install_content_info));
/* Don't delete the placeholder. Set state to installed. */
placeholder_guard.Cancel();
out_install_content_info->install_state = InstallState::Installed;
R_SUCCEED();
}

View file

@ -294,8 +294,20 @@ namespace ams::result::impl {
constexpr ALWAYS_INLINE bool EvaluateResultSuccess(const ::ams::Result &r) { return R_SUCCEEDED(r); }
constexpr ALWAYS_INLINE bool EvaluateResultFailure(const ::ams::Result &r) { return R_FAILED(r); }
template<typename R>
constexpr ALWAYS_INLINE bool EvaluateResultIncludedImplForSuccessCompatibility(const ::ams::Result &r) {
if constexpr (std::same_as<R, ::ams::ResultSuccess>) {
return R_SUCCEEDED(r);
} else {
return R::Includes(r);
}
}
template<typename... Rs>
constexpr ALWAYS_INLINE bool EvaluateAnyResultIncludes(const ::ams::Result &r) { return (Rs::Includes(r) || ...); }
constexpr ALWAYS_INLINE bool EvaluateAnyResultIncludes(const ::ams::Result &r) { return (EvaluateResultIncludedImplForSuccessCompatibility<Rs>(r) || ...); }
template<typename... Rs>
constexpr ALWAYS_INLINE bool EvaluateResultNotIncluded(const ::ams::Result &r) { return !EvaluateAnyResultIncludes<Rs...>(r); }
}
@ -305,9 +317,9 @@ namespace ams::result::impl {
[[maybe_unused]] Result __tmp_result_##COUNTER_VALUE = ResultSuccess(); \
::ams::Result &__TmpCurrentResultReference = HasPrevRef_##COUNTER_VALUE ? PrevRef_##COUNTER_VALUE : __tmp_result_##COUNTER_VALUE
#define ON_RESULT_RETURN_IMPL(EVALUATE_RESULT) \
#define ON_RESULT_RETURN_IMPL(...) \
static_assert(std::same_as<decltype(__TmpCurrentResultReference), Result &>); \
auto ANONYMOUS_VARIABLE(RESULT_GUARD_STATE_) = ::ams::result::impl::ResultReferenceForScopedResultGuard<EVALUATE_RESULT>(__TmpCurrentResultReference) + [&]() ALWAYS_INLINE_LAMBDA
auto ANONYMOUS_VARIABLE(RESULT_GUARD_STATE_) = ::ams::result::impl::ResultReferenceForScopedResultGuard<__VA_ARGS__>(__TmpCurrentResultReference) + [&]() ALWAYS_INLINE_LAMBDA
#define ON_RESULT_FAILURE_2 ON_RESULT_RETURN_IMPL(::ams::result::impl::EvaluateResultFailure)
@ -321,11 +333,21 @@ namespace ams::result::impl {
AMS_DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
ON_RESULT_SUCCESS_2
#define ON_RESULT_INCLUDED_2(RS) ON_RESULT_RETURN_IMPL(::ams::result::impl::EvaluateAnyResultIncludes<RS>)
#define ON_RESULT_INCLUDED_2(...) ON_RESULT_RETURN_IMPL(::ams::result::impl::EvaluateAnyResultIncludes<__VA_ARGS__>)
#define ON_RESULT_INCLUDED(RS) \
#define ON_RESULT_INCLUDED(...) \
AMS_DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
ON_RESULT_INCLUDED_2(RS)
ON_RESULT_INCLUDED_2(__VA_ARGS__)
#define ON_RESULT_NOT_INCLUDED_2(...) ON_RESULT_RETURN_IMPL(::ams::result::impl::EvaluateResultNotIncluded<__VA_ARGS__>)
#define ON_RESULT_NOT_INCLUDED(...) \
AMS_DECLARE_CURRENT_RESULT_REFERENCE_AND_STORAGE(__COUNTER__); \
ON_RESULT_NOT_INCLUDED_2(__VA_ARGS__)
#define ON_RESULT_FAILURE_BESIDES(...) ON_RESULT_NOT_INCLUDED(::ams::ResultSuccess, ## __VA_ARGS__)
#define ON_RESULT_FAILURE_BESIDES_2(...) ON_RESULT_NOT_INCLUDED_2(::ams::ResultSuccess, ## __VA_ARGS__)
/* =================================================================== */