mirror of
https://github.com/Atmosphere-NX/Atmosphere.git
synced 2025-08-08 09:08:45 +00:00
kern: refactor to use m_ for member variables
This commit is contained in:
parent
0bf2ade76f
commit
968f50bc07
135 changed files with 3727 additions and 3734 deletions
|
@ -79,43 +79,43 @@ namespace ams::kern {
|
|||
|
||||
void KProcess::Finalize() {
|
||||
/* Delete the process local region. */
|
||||
this->DeleteThreadLocalRegion(this->plr_address);
|
||||
this->DeleteThreadLocalRegion(m_plr_address);
|
||||
|
||||
/* Get the used memory size. */
|
||||
const size_t used_memory_size = this->GetUsedUserPhysicalMemorySize();
|
||||
|
||||
/* Finalize the page table. */
|
||||
this->page_table.Finalize();
|
||||
m_page_table.Finalize();
|
||||
|
||||
/* Free the system resource. */
|
||||
if (this->system_resource_address != Null<KVirtualAddress>) {
|
||||
if (m_system_resource_address != Null<KVirtualAddress>) {
|
||||
/* Check that we have no outstanding allocations. */
|
||||
MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0);
|
||||
|
||||
/* Free the memory. */
|
||||
KSystemControl::FreeSecureMemory(this->system_resource_address, this->system_resource_num_pages * PageSize, this->memory_pool);
|
||||
KSystemControl::FreeSecureMemory(m_system_resource_address, m_system_resource_num_pages * PageSize, m_memory_pool);
|
||||
|
||||
/* Clear our tracking variables. */
|
||||
this->system_resource_address = Null<KVirtualAddress>;
|
||||
this->system_resource_num_pages = 0;
|
||||
m_system_resource_address = Null<KVirtualAddress>;
|
||||
m_system_resource_num_pages = 0;
|
||||
|
||||
/* Finalize optimized memory. If memory wasn't optimized, this is a no-op. */
|
||||
Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), this->memory_pool);
|
||||
Kernel::GetMemoryManager().FinalizeOptimizedMemory(this->GetId(), m_memory_pool);
|
||||
}
|
||||
|
||||
/* Release memory to the resource limit. */
|
||||
if (this->resource_limit != nullptr) {
|
||||
MESOSPHERE_ABORT_UNLESS(used_memory_size >= this->memory_release_hint);
|
||||
this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - this->memory_release_hint);
|
||||
this->resource_limit->Close();
|
||||
if (m_resource_limit != nullptr) {
|
||||
MESOSPHERE_ABORT_UNLESS(used_memory_size >= m_memory_release_hint);
|
||||
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, used_memory_size, used_memory_size - m_memory_release_hint);
|
||||
m_resource_limit->Close();
|
||||
}
|
||||
|
||||
/* Free all shared memory infos. */
|
||||
{
|
||||
auto it = this->shared_memory_list.begin();
|
||||
while (it != this->shared_memory_list.end()) {
|
||||
auto it = m_shared_memory_list.begin();
|
||||
while (it != m_shared_memory_list.end()) {
|
||||
KSharedMemoryInfo *info = std::addressof(*it);
|
||||
KSharedMemory *shmem = info->GetSharedMemory();
|
||||
|
||||
|
@ -124,28 +124,28 @@ namespace ams::kern {
|
|||
}
|
||||
shmem->Close();
|
||||
|
||||
it = this->shared_memory_list.erase(it);
|
||||
it = m_shared_memory_list.erase(it);
|
||||
KSharedMemoryInfo::Free(info);
|
||||
}
|
||||
}
|
||||
|
||||
/* Close all references to our betas. */
|
||||
{
|
||||
auto it = this->beta_list.begin();
|
||||
while (it != this->beta_list.end()) {
|
||||
auto it = m_beta_list.begin();
|
||||
while (it != m_beta_list.end()) {
|
||||
KBeta *beta = std::addressof(*it);
|
||||
it = this->beta_list.erase(it);
|
||||
it = m_beta_list.erase(it);
|
||||
|
||||
beta->Close();
|
||||
}
|
||||
}
|
||||
|
||||
/* Our thread local page list must be empty at this point. */
|
||||
MESOSPHERE_ABORT_UNLESS(this->partially_used_tlp_tree.empty());
|
||||
MESOSPHERE_ABORT_UNLESS(this->fully_used_tlp_tree.empty());
|
||||
MESOSPHERE_ABORT_UNLESS(m_partially_used_tlp_tree.empty());
|
||||
MESOSPHERE_ABORT_UNLESS(m_fully_used_tlp_tree.empty());
|
||||
|
||||
/* Log that we finalized for debug. */
|
||||
MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", this->process_id, this->name);
|
||||
MESOSPHERE_LOG("KProcess::Finalize() pid=%ld name=%-12s\n", m_process_id, m_name);
|
||||
|
||||
/* Perform inherited finalization. */
|
||||
KAutoObjectWithSlabHeapAndContainer<KProcess, KSynchronizationObject>::Finalize();
|
||||
|
@ -153,40 +153,40 @@ namespace ams::kern {
|
|||
|
||||
Result KProcess::Initialize(const ams::svc::CreateProcessParameter ¶ms) {
|
||||
/* Validate that the intended kernel version is high enough for us to support. */
|
||||
R_UNLESS(this->capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination());
|
||||
R_UNLESS(m_capabilities.GetIntendedKernelVersion() >= ams::svc::RequiredKernelVersion, svc::ResultInvalidCombination());
|
||||
|
||||
/* Validate that the intended kernel version isn't too high for us to support. */
|
||||
R_UNLESS(this->capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination());
|
||||
R_UNLESS(m_capabilities.GetIntendedKernelVersion() <= ams::svc::SupportedKernelVersion, svc::ResultInvalidCombination());
|
||||
|
||||
/* Create and clear the process local region. */
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(this->plr_address)));
|
||||
this->plr_heap_address = this->GetThreadLocalRegionPointer(this->plr_address);
|
||||
std::memset(this->plr_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
||||
R_TRY(this->CreateThreadLocalRegion(std::addressof(m_plr_address)));
|
||||
m_plr_heap_address = this->GetThreadLocalRegionPointer(m_plr_address);
|
||||
std::memset(m_plr_heap_address, 0, ams::svc::ThreadLocalRegionSize);
|
||||
|
||||
/* Copy in the name from parameters. */
|
||||
static_assert(sizeof(params.name) < sizeof(this->name));
|
||||
std::memcpy(this->name, params.name, sizeof(params.name));
|
||||
this->name[sizeof(params.name)] = 0;
|
||||
static_assert(sizeof(params.name) < sizeof(m_name));
|
||||
std::memcpy(m_name, params.name, sizeof(params.name));
|
||||
m_name[sizeof(params.name)] = 0;
|
||||
|
||||
/* Set misc fields. */
|
||||
this->state = State_Created;
|
||||
this->main_thread_stack_size = 0;
|
||||
this->creation_time = KHardwareTimer::GetTick();
|
||||
this->used_kernel_memory_size = 0;
|
||||
this->ideal_core_id = 0;
|
||||
this->flags = params.flags;
|
||||
this->version = params.version;
|
||||
this->program_id = params.program_id;
|
||||
this->code_address = params.code_address;
|
||||
this->code_size = params.code_num_pages * PageSize;
|
||||
this->is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
||||
this->is_jit_debug = false;
|
||||
m_state = State_Created;
|
||||
m_main_thread_stack_size = 0;
|
||||
m_creation_time = KHardwareTimer::GetTick();
|
||||
m_used_kernel_memory_size = 0;
|
||||
m_ideal_core_id = 0;
|
||||
m_flags = params.flags;
|
||||
m_version = params.version;
|
||||
m_program_id = params.program_id;
|
||||
m_code_address = params.code_address;
|
||||
m_code_size = params.code_num_pages * PageSize;
|
||||
m_is_application = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
||||
m_is_jit_debug = false;
|
||||
|
||||
/* Set thread fields. */
|
||||
for (size_t i = 0; i < cpu::NumCores; i++) {
|
||||
this->running_threads[i] = nullptr;
|
||||
this->running_thread_idle_counts[i] = 0;
|
||||
this->pinned_threads[i] = nullptr;
|
||||
m_running_threads[i] = nullptr;
|
||||
m_running_thread_idle_counts[i] = 0;
|
||||
m_pinned_threads[i] = nullptr;
|
||||
}
|
||||
|
||||
/* Set max memory based on address space type. */
|
||||
|
@ -194,36 +194,36 @@ namespace ams::kern {
|
|||
case ams::svc::CreateProcessFlag_AddressSpace32Bit:
|
||||
case ams::svc::CreateProcessFlag_AddressSpace64BitDeprecated:
|
||||
case ams::svc::CreateProcessFlag_AddressSpace64Bit:
|
||||
this->max_process_memory = this->page_table.GetHeapRegionSize();
|
||||
m_max_process_memory = m_page_table.GetHeapRegionSize();
|
||||
break;
|
||||
case ams::svc::CreateProcessFlag_AddressSpace32BitWithoutAlias:
|
||||
this->max_process_memory = this->page_table.GetHeapRegionSize() + this->page_table.GetAliasRegionSize();
|
||||
m_max_process_memory = m_page_table.GetHeapRegionSize() + m_page_table.GetAliasRegionSize();
|
||||
break;
|
||||
MESOSPHERE_UNREACHABLE_DEFAULT_CASE();
|
||||
}
|
||||
|
||||
/* Generate random entropy. */
|
||||
KSystemControl::GenerateRandomBytes(this->entropy, sizeof(this->entropy));
|
||||
KSystemControl::GenerateRandomBytes(m_entropy, sizeof(m_entropy));
|
||||
|
||||
/* Clear remaining fields. */
|
||||
this->num_threads = 0;
|
||||
this->peak_num_threads = 0;
|
||||
this->num_created_threads = 0;
|
||||
this->num_process_switches = 0;
|
||||
this->num_thread_switches = 0;
|
||||
this->num_fpu_switches = 0;
|
||||
this->num_supervisor_calls = 0;
|
||||
this->num_ipc_messages = 0;
|
||||
m_num_threads = 0;
|
||||
m_peak_num_threads = 0;
|
||||
m_num_created_threads = 0;
|
||||
m_num_process_switches = 0;
|
||||
m_num_thread_switches = 0;
|
||||
m_num_fpu_switches = 0;
|
||||
m_num_supervisor_calls = 0;
|
||||
m_num_ipc_messages = 0;
|
||||
|
||||
this->is_signaled = false;
|
||||
this->attached_object = nullptr;
|
||||
this->exception_thread = nullptr;
|
||||
this->is_suspended = false;
|
||||
this->memory_release_hint = 0;
|
||||
this->schedule_count = 0;
|
||||
m_is_signaled = false;
|
||||
m_attached_object = nullptr;
|
||||
m_exception_thread = nullptr;
|
||||
m_is_suspended = false;
|
||||
m_memory_release_hint = 0;
|
||||
m_schedule_count = 0;
|
||||
|
||||
/* We're initialized! */
|
||||
this->is_initialized = true;
|
||||
m_is_initialized = true;
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
@ -234,10 +234,10 @@ namespace ams::kern {
|
|||
MESOSPHERE_ABORT_UNLESS((params.code_num_pages * PageSize) / PageSize == static_cast<size_t>(params.code_num_pages));
|
||||
|
||||
/* Set members. */
|
||||
this->memory_pool = pool;
|
||||
this->resource_limit = res_limit;
|
||||
this->system_resource_address = Null<KVirtualAddress>;
|
||||
this->system_resource_num_pages = 0;
|
||||
m_memory_pool = pool;
|
||||
m_resource_limit = res_limit;
|
||||
m_system_resource_address = Null<KVirtualAddress>;
|
||||
m_system_resource_num_pages = 0;
|
||||
|
||||
/* Setup page table. */
|
||||
/* NOTE: Nintendo passes process ID despite not having set it yet. */
|
||||
|
@ -250,29 +250,29 @@ namespace ams::kern {
|
|||
auto *mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||
auto *block_info_manager = std::addressof(Kernel::GetBlockInfoManager());
|
||||
auto *pt_manager = std::addressof(Kernel::GetPageTableManager());
|
||||
R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager));
|
||||
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, params.code_num_pages * PageSize, mem_block_manager, block_info_manager, pt_manager));
|
||||
}
|
||||
auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };
|
||||
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
||||
|
||||
/* Ensure we can insert the code region. */
|
||||
R_UNLESS(this->page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
||||
R_UNLESS(m_page_table.CanContain(params.code_address, params.code_num_pages * PageSize, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
||||
|
||||
/* Map the code region. */
|
||||
R_TRY(this->page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead));
|
||||
R_TRY(m_page_table.MapPageGroup(params.code_address, pg, KMemoryState_Code, KMemoryPermission_KernelRead));
|
||||
|
||||
/* Initialize capabilities. */
|
||||
R_TRY(this->capabilities.Initialize(caps, num_caps, std::addressof(this->page_table)));
|
||||
R_TRY(m_capabilities.Initialize(caps, num_caps, std::addressof(m_page_table)));
|
||||
|
||||
/* Initialize the process id. */
|
||||
this->process_id = g_initial_process_id++;
|
||||
MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= this->process_id);
|
||||
MESOSPHERE_ABORT_UNLESS(this->process_id <= InitialProcessIdMax);
|
||||
m_process_id = g_initial_process_id++;
|
||||
MESOSPHERE_ABORT_UNLESS(InitialProcessIdMin <= m_process_id);
|
||||
MESOSPHERE_ABORT_UNLESS(m_process_id <= InitialProcessIdMax);
|
||||
|
||||
/* Initialize the rest of the process. */
|
||||
R_TRY(this->Initialize(params));
|
||||
|
||||
/* Open a reference to the resource limit. */
|
||||
this->resource_limit->Open();
|
||||
m_resource_limit->Open();
|
||||
|
||||
/* We succeeded! */
|
||||
pt_guard.Cancel();
|
||||
|
@ -284,8 +284,8 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(res_limit != nullptr);
|
||||
|
||||
/* Set pool and resource limit. */
|
||||
this->memory_pool = pool;
|
||||
this->resource_limit = res_limit;
|
||||
m_memory_pool = pool;
|
||||
m_resource_limit = res_limit;
|
||||
|
||||
/* Get the memory sizes. */
|
||||
const size_t code_num_pages = params.code_num_pages;
|
||||
|
@ -302,27 +302,27 @@ namespace ams::kern {
|
|||
KBlockInfoManager *block_info_manager;
|
||||
KPageTableManager *pt_manager;
|
||||
|
||||
this->system_resource_address = Null<KVirtualAddress>;
|
||||
this->system_resource_num_pages = 0;
|
||||
m_system_resource_address = Null<KVirtualAddress>;
|
||||
m_system_resource_num_pages = 0;
|
||||
|
||||
if (system_resource_num_pages != 0) {
|
||||
/* Allocate secure memory. */
|
||||
R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(this->system_resource_address), system_resource_size, pool));
|
||||
R_TRY(KSystemControl::AllocateSecureMemory(std::addressof(m_system_resource_address), system_resource_size, pool));
|
||||
|
||||
/* Set the number of system resource pages. */
|
||||
MESOSPHERE_ASSERT(this->system_resource_address != Null<KVirtualAddress>);
|
||||
this->system_resource_num_pages = system_resource_num_pages;
|
||||
MESOSPHERE_ASSERT(m_system_resource_address != Null<KVirtualAddress>);
|
||||
m_system_resource_num_pages = system_resource_num_pages;
|
||||
|
||||
/* Initialize managers. */
|
||||
const size_t rc_size = util::AlignUp(KPageTableManager::CalculateReferenceCountSize(system_resource_size), PageSize);
|
||||
this->dynamic_page_manager.Initialize(this->system_resource_address + rc_size, system_resource_size - rc_size);
|
||||
this->page_table_manager.Initialize(std::addressof(this->dynamic_page_manager), GetPointer<KPageTableManager::RefCount>(this->system_resource_address));
|
||||
this->memory_block_slab_manager.Initialize(std::addressof(this->dynamic_page_manager));
|
||||
this->block_info_manager.Initialize(std::addressof(this->dynamic_page_manager));
|
||||
m_dynamic_page_manager.Initialize(m_system_resource_address + rc_size, system_resource_size - rc_size);
|
||||
m_page_table_manager.Initialize(std::addressof(m_dynamic_page_manager), GetPointer<KPageTableManager::RefCount>(m_system_resource_address));
|
||||
m_memory_block_slab_manager.Initialize(std::addressof(m_dynamic_page_manager));
|
||||
m_block_info_manager.Initialize(std::addressof(m_dynamic_page_manager));
|
||||
|
||||
mem_block_manager = std::addressof(this->memory_block_slab_manager);
|
||||
block_info_manager = std::addressof(this->block_info_manager);
|
||||
pt_manager = std::addressof(this->page_table_manager);
|
||||
mem_block_manager = std::addressof(m_memory_block_slab_manager);
|
||||
block_info_manager = std::addressof(m_block_info_manager);
|
||||
pt_manager = std::addressof(m_page_table_manager);
|
||||
} else {
|
||||
const bool is_app = (params.flags & ams::svc::CreateProcessFlag_IsApplication);
|
||||
mem_block_manager = std::addressof(is_app ? Kernel::GetApplicationMemoryBlockManager() : Kernel::GetSystemMemoryBlockManager());
|
||||
|
@ -332,18 +332,18 @@ namespace ams::kern {
|
|||
|
||||
/* Ensure we don't leak any secure memory we allocated. */
|
||||
auto sys_resource_guard = SCOPE_GUARD {
|
||||
if (this->system_resource_address != Null<KVirtualAddress>) {
|
||||
if (m_system_resource_address != Null<KVirtualAddress>) {
|
||||
/* Check that we have no outstanding allocations. */
|
||||
MESOSPHERE_ABORT_UNLESS(this->memory_block_slab_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(this->block_info_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(this->page_table_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_memory_block_slab_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_block_info_manager.GetUsed() == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_page_table_manager.GetUsed() == 0);
|
||||
|
||||
/* Free the memory. */
|
||||
KSystemControl::FreeSecureMemory(this->system_resource_address, system_resource_size, pool);
|
||||
KSystemControl::FreeSecureMemory(m_system_resource_address, system_resource_size, pool);
|
||||
|
||||
/* Clear our tracking variables. */
|
||||
this->system_resource_address = Null<KVirtualAddress>;
|
||||
this->system_resource_num_pages = 0;
|
||||
m_system_resource_address = Null<KVirtualAddress>;
|
||||
m_system_resource_num_pages = 0;
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -354,34 +354,34 @@ namespace ams::kern {
|
|||
const auto as_type = static_cast<ams::svc::CreateProcessFlag>(params.flags & ams::svc::CreateProcessFlag_AddressSpaceMask);
|
||||
const bool enable_aslr = (params.flags & ams::svc::CreateProcessFlag_EnableAslr) != 0;
|
||||
const bool enable_das_merge = (params.flags & ams::svc::CreateProcessFlag_DisableDeviceAddressSpaceMerge) == 0;
|
||||
R_TRY(this->page_table.Initialize(this->process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager));
|
||||
R_TRY(m_page_table.Initialize(m_process_id, as_type, enable_aslr, enable_das_merge, !enable_aslr, pool, params.code_address, code_size, mem_block_manager, block_info_manager, pt_manager));
|
||||
}
|
||||
auto pt_guard = SCOPE_GUARD { this->page_table.Finalize(); };
|
||||
auto pt_guard = SCOPE_GUARD { m_page_table.Finalize(); };
|
||||
|
||||
/* Ensure we can insert the code region. */
|
||||
R_UNLESS(this->page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
||||
R_UNLESS(m_page_table.CanContain(params.code_address, code_size, KMemoryState_Code), svc::ResultInvalidMemoryRegion());
|
||||
|
||||
/* Map the code region. */
|
||||
R_TRY(this->page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped)));
|
||||
R_TRY(m_page_table.MapPages(params.code_address, code_num_pages, KMemoryState_Code, static_cast<KMemoryPermission>(KMemoryPermission_KernelRead | KMemoryPermission_NotMapped)));
|
||||
|
||||
/* Initialize capabilities. */
|
||||
R_TRY(this->capabilities.Initialize(user_caps, num_caps, std::addressof(this->page_table)));
|
||||
R_TRY(m_capabilities.Initialize(user_caps, num_caps, std::addressof(m_page_table)));
|
||||
|
||||
/* Initialize the process id. */
|
||||
this->process_id = g_process_id++;
|
||||
MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= this->process_id);
|
||||
MESOSPHERE_ABORT_UNLESS(this->process_id <= ProcessIdMax);
|
||||
m_process_id = g_process_id++;
|
||||
MESOSPHERE_ABORT_UNLESS(ProcessIdMin <= m_process_id);
|
||||
MESOSPHERE_ABORT_UNLESS(m_process_id <= ProcessIdMax);
|
||||
|
||||
/* If we should optimize memory allocations, do so. */
|
||||
if (this->system_resource_address != Null<KVirtualAddress> && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) {
|
||||
R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(this->process_id, pool));
|
||||
if (m_system_resource_address != Null<KVirtualAddress> && (params.flags & ams::svc::CreateProcessFlag_OptimizeMemoryAllocation) != 0) {
|
||||
R_TRY(Kernel::GetMemoryManager().InitializeOptimizedMemory(m_process_id, pool));
|
||||
}
|
||||
|
||||
/* Initialize the rest of the process. */
|
||||
R_TRY(this->Initialize(params));
|
||||
|
||||
/* Open a reference to the resource limit. */
|
||||
this->resource_limit->Open();
|
||||
m_resource_limit->Open();
|
||||
|
||||
/* We succeeded, so commit our memory reservation and cancel our guards. */
|
||||
sys_resource_guard.Cancel();
|
||||
|
@ -407,14 +407,14 @@ namespace ams::kern {
|
|||
TerminateChildren(this, GetCurrentThreadPointer());
|
||||
|
||||
/* Finalize the handle tahble. */
|
||||
this->handle_table.Finalize();
|
||||
m_handle_table.Finalize();
|
||||
}
|
||||
|
||||
void KProcess::FinishTermination() {
|
||||
/* Release resource limit hint. */
|
||||
if (this->resource_limit != nullptr) {
|
||||
this->memory_release_hint = this->GetUsedUserPhysicalMemorySize();
|
||||
this->resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, this->memory_release_hint);
|
||||
if (m_resource_limit != nullptr) {
|
||||
m_memory_release_hint = this->GetUsedUserPhysicalMemorySize();
|
||||
m_resource_limit->Release(ams::svc::LimitableResource_PhysicalMemoryMax, 0, m_memory_release_hint);
|
||||
}
|
||||
|
||||
/* Change state. */
|
||||
|
@ -433,14 +433,14 @@ namespace ams::kern {
|
|||
/* Determine whether we need to start terminating */
|
||||
bool needs_terminate = false;
|
||||
{
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
MESOSPHERE_ASSERT(this->state != State_Created);
|
||||
MESOSPHERE_ASSERT(this->state != State_CreatedAttached);
|
||||
MESOSPHERE_ASSERT(this->state != State_Crashed);
|
||||
MESOSPHERE_ASSERT(this->state != State_Terminated);
|
||||
if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_DebugBreak) {
|
||||
MESOSPHERE_ASSERT(m_state != State_Created);
|
||||
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
|
||||
MESOSPHERE_ASSERT(m_state != State_Crashed);
|
||||
MESOSPHERE_ASSERT(m_state != State_Terminated);
|
||||
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_DebugBreak) {
|
||||
this->ChangeState(State_Terminating);
|
||||
needs_terminate = true;
|
||||
}
|
||||
|
@ -451,7 +451,7 @@ namespace ams::kern {
|
|||
this->StartTermination();
|
||||
|
||||
/* Note for debug that we're exiting the process. */
|
||||
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", this->process_id, this->name);
|
||||
MESOSPHERE_LOG("KProcess::Exit() pid=%ld name=%-12s\n", m_process_id, m_name);
|
||||
|
||||
/* Register the process as a work task. */
|
||||
KWorkerTaskManager::AddTask(KWorkerTaskManager::WorkerType_Exit, this);
|
||||
|
@ -468,15 +468,15 @@ namespace ams::kern {
|
|||
/* Determine whether we need to start terminating */
|
||||
bool needs_terminate = false;
|
||||
{
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
|
||||
/* Check whether we're allowed to terminate. */
|
||||
R_UNLESS(this->state != State_Created, svc::ResultInvalidState());
|
||||
R_UNLESS(this->state != State_CreatedAttached, svc::ResultInvalidState());
|
||||
R_UNLESS(m_state != State_Created, svc::ResultInvalidState());
|
||||
R_UNLESS(m_state != State_CreatedAttached, svc::ResultInvalidState());
|
||||
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_Crashed || this->state == State_DebugBreak) {
|
||||
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
|
||||
this->ChangeState(State_Terminating);
|
||||
needs_terminate = true;
|
||||
}
|
||||
|
@ -488,7 +488,7 @@ namespace ams::kern {
|
|||
this->StartTermination();
|
||||
|
||||
/* Note for debug that we're terminating the process. */
|
||||
MESOSPHERE_LOG("KProcess::Terminate() pid=%ld name=%-12s\n", this->process_id, this->name);
|
||||
MESOSPHERE_LOG("KProcess::Terminate() pid=%ld name=%-12s\n", m_process_id, m_name);
|
||||
|
||||
/* Call the debug callback. */
|
||||
KDebug::OnTerminateProcess(this);
|
||||
|
@ -502,14 +502,14 @@ namespace ams::kern {
|
|||
|
||||
Result KProcess::AddSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
||||
/* Lock ourselves, to prevent concurrent access. */
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
|
||||
/* Address and size parameters aren't used. */
|
||||
MESOSPHERE_UNUSED(address, size);
|
||||
|
||||
/* Try to find an existing info for the memory. */
|
||||
KSharedMemoryInfo *info = nullptr;
|
||||
for (auto it = this->shared_memory_list.begin(); it != this->shared_memory_list.end(); ++it) {
|
||||
for (auto it = m_shared_memory_list.begin(); it != m_shared_memory_list.end(); ++it) {
|
||||
if (it->GetSharedMemory() == shmem) {
|
||||
info = std::addressof(*it);
|
||||
break;
|
||||
|
@ -524,7 +524,7 @@ namespace ams::kern {
|
|||
|
||||
/* Initialize the info and add it to our list. */
|
||||
info->Initialize(shmem);
|
||||
this->shared_memory_list.push_back(*info);
|
||||
m_shared_memory_list.push_back(*info);
|
||||
}
|
||||
|
||||
/* Open a reference to the shared memory and its info. */
|
||||
|
@ -536,15 +536,15 @@ namespace ams::kern {
|
|||
|
||||
void KProcess::RemoveSharedMemory(KSharedMemory *shmem, KProcessAddress address, size_t size) {
|
||||
/* Lock ourselves, to prevent concurrent access. */
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
|
||||
/* Address and size parameters aren't used. */
|
||||
MESOSPHERE_UNUSED(address, size);
|
||||
|
||||
/* Find an existing info for the memory. */
|
||||
KSharedMemoryInfo *info = nullptr;
|
||||
auto it = this->shared_memory_list.begin();
|
||||
for (/* ... */; it != this->shared_memory_list.end(); ++it) {
|
||||
auto it = m_shared_memory_list.begin();
|
||||
for (/* ... */; it != m_shared_memory_list.end(); ++it) {
|
||||
if (it->GetSharedMemory() == shmem) {
|
||||
info = std::addressof(*it);
|
||||
break;
|
||||
|
@ -554,7 +554,7 @@ namespace ams::kern {
|
|||
|
||||
/* Close a reference to the info and its memory. */
|
||||
if (info->Close()) {
|
||||
this->shared_memory_list.erase(it);
|
||||
m_shared_memory_list.erase(it);
|
||||
KSharedMemoryInfo::Free(info);
|
||||
}
|
||||
|
||||
|
@ -569,14 +569,14 @@ namespace ams::kern {
|
|||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (auto it = this->partially_used_tlp_tree.begin(); it != partially_used_tlp_tree.end()) {
|
||||
if (auto it = m_partially_used_tlp_tree.begin(); it != m_partially_used_tlp_tree.end()) {
|
||||
tlr = it->Reserve();
|
||||
MESOSPHERE_ABORT_UNLESS(tlr != Null<KProcessAddress>);
|
||||
|
||||
if (it->IsAllUsed()) {
|
||||
tlp = std::addressof(*it);
|
||||
this->partially_used_tlp_tree.erase(it);
|
||||
this->fully_used_tlp_tree.insert(*tlp);
|
||||
m_partially_used_tlp_tree.erase(it);
|
||||
m_fully_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
|
||||
*out = tlr;
|
||||
|
@ -600,9 +600,9 @@ namespace ams::kern {
|
|||
{
|
||||
KScopedSchedulerLock sl;
|
||||
if (tlp->IsAllUsed()) {
|
||||
this->fully_used_tlp_tree.insert(*tlp);
|
||||
m_fully_used_tlp_tree.insert(*tlp);
|
||||
} else {
|
||||
this->partially_used_tlp_tree.insert(*tlp);
|
||||
m_partially_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -620,22 +620,22 @@ namespace ams::kern {
|
|||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Try to find the page in the partially used list. */
|
||||
auto it = this->partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize)));
|
||||
if (it == this->partially_used_tlp_tree.end()) {
|
||||
auto it = m_partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize)));
|
||||
if (it == m_partially_used_tlp_tree.end()) {
|
||||
/* If we don't find it, it has to be in the fully used list. */
|
||||
it = this->fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize)));
|
||||
R_UNLESS(it != this->fully_used_tlp_tree.end(), svc::ResultInvalidAddress());
|
||||
it = m_fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize)));
|
||||
R_UNLESS(it != m_fully_used_tlp_tree.end(), svc::ResultInvalidAddress());
|
||||
|
||||
/* Release the region. */
|
||||
it->Release(addr);
|
||||
|
||||
/* Move the page out of the fully used list. */
|
||||
KThreadLocalPage *tlp = std::addressof(*it);
|
||||
this->fully_used_tlp_tree.erase(it);
|
||||
m_fully_used_tlp_tree.erase(it);
|
||||
if (tlp->IsAllFree()) {
|
||||
page_to_free = tlp;
|
||||
} else {
|
||||
this->partially_used_tlp_tree.insert(*tlp);
|
||||
m_partially_used_tlp_tree.insert(*tlp);
|
||||
}
|
||||
} else {
|
||||
/* Release the region. */
|
||||
|
@ -644,7 +644,7 @@ namespace ams::kern {
|
|||
/* Handle the all-free case. */
|
||||
KThreadLocalPage *tlp = std::addressof(*it);
|
||||
if (tlp->IsAllFree()) {
|
||||
this->partially_used_tlp_tree.erase(it);
|
||||
m_partially_used_tlp_tree.erase(it);
|
||||
page_to_free = tlp;
|
||||
}
|
||||
}
|
||||
|
@ -664,9 +664,9 @@ namespace ams::kern {
|
|||
KThreadLocalPage *tlp = nullptr;
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
if (auto it = this->partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != this->partially_used_tlp_tree.end()) {
|
||||
if (auto it = m_partially_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != m_partially_used_tlp_tree.end()) {
|
||||
tlp = std::addressof(*it);
|
||||
} else if (auto it = this->fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != this->fully_used_tlp_tree.end()) {
|
||||
} else if (auto it = m_fully_used_tlp_tree.find(KThreadLocalPage(util::AlignDown(GetInteger(addr), PageSize))); it != m_fully_used_tlp_tree.end()) {
|
||||
tlp = std::addressof(*it);
|
||||
} else {
|
||||
return nullptr;
|
||||
|
@ -704,18 +704,18 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
void KProcess::IncrementThreadCount() {
|
||||
MESOSPHERE_ASSERT(this->num_threads >= 0);
|
||||
++this->num_created_threads;
|
||||
MESOSPHERE_ASSERT(m_num_threads >= 0);
|
||||
++m_num_created_threads;
|
||||
|
||||
if (const auto count = ++this->num_threads; count > this->peak_num_threads) {
|
||||
this->peak_num_threads = count;
|
||||
if (const auto count = ++m_num_threads; count > m_peak_num_threads) {
|
||||
m_peak_num_threads = count;
|
||||
}
|
||||
}
|
||||
|
||||
void KProcess::DecrementThreadCount() {
|
||||
MESOSPHERE_ASSERT(this->num_threads > 0);
|
||||
MESOSPHERE_ASSERT(m_num_threads > 0);
|
||||
|
||||
if (const auto count = --this->num_threads; count == 0) {
|
||||
if (const auto count = --m_num_threads; count == 0) {
|
||||
this->Terminate();
|
||||
}
|
||||
}
|
||||
|
@ -726,8 +726,8 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(this == cur_thread->GetOwnerProcess());
|
||||
|
||||
/* Try to claim the exception thread. */
|
||||
if (this->exception_thread != cur_thread) {
|
||||
const uintptr_t address_key = reinterpret_cast<uintptr_t>(std::addressof(this->exception_thread));
|
||||
if (m_exception_thread != cur_thread) {
|
||||
const uintptr_t address_key = reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread));
|
||||
while (true) {
|
||||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
@ -738,14 +738,14 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* If we have no exception thread, we succeeded. */
|
||||
if (this->exception_thread == nullptr) {
|
||||
this->exception_thread = cur_thread;
|
||||
if (m_exception_thread == nullptr) {
|
||||
m_exception_thread = cur_thread;
|
||||
return true;
|
||||
}
|
||||
|
||||
/* Otherwise, wait for us to not have an exception thread. */
|
||||
cur_thread->SetAddressKey(address_key);
|
||||
this->exception_thread->AddWaiter(cur_thread);
|
||||
m_exception_thread->AddWaiter(cur_thread);
|
||||
if (cur_thread->GetState() == KThread::ThreadState_Runnable) {
|
||||
cur_thread->SetState(KThread::ThreadState_Waiting);
|
||||
} else {
|
||||
|
@ -774,12 +774,12 @@ namespace ams::kern {
|
|||
bool KProcess::ReleaseUserException(KThread *thread) {
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (this->exception_thread == thread) {
|
||||
this->exception_thread = nullptr;
|
||||
if (m_exception_thread == thread) {
|
||||
m_exception_thread = nullptr;
|
||||
|
||||
/* Remove waiter thread. */
|
||||
s32 num_waiters;
|
||||
KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(this->exception_thread)));
|
||||
KThread *next = thread->RemoveWaiterByKey(std::addressof(num_waiters), reinterpret_cast<uintptr_t>(std::addressof(m_exception_thread)));
|
||||
if (next != nullptr) {
|
||||
if (next->GetState() == KThread::ThreadState_Waiting) {
|
||||
next->SetState(KThread::ThreadState_Runnable);
|
||||
|
@ -795,30 +795,30 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
void KProcess::RegisterThread(KThread *thread) {
|
||||
KScopedLightLock lk(this->list_lock);
|
||||
KScopedLightLock lk(m_list_lock);
|
||||
|
||||
this->thread_list.push_back(*thread);
|
||||
m_thread_list.push_back(*thread);
|
||||
}
|
||||
|
||||
void KProcess::UnregisterThread(KThread *thread) {
|
||||
KScopedLightLock lk(this->list_lock);
|
||||
KScopedLightLock lk(m_list_lock);
|
||||
|
||||
this->thread_list.erase(this->thread_list.iterator_to(*thread));
|
||||
m_thread_list.erase(m_thread_list.iterator_to(*thread));
|
||||
}
|
||||
|
||||
size_t KProcess::GetUsedUserPhysicalMemorySize() const {
|
||||
const size_t norm_size = this->page_table.GetNormalMemorySize();
|
||||
const size_t other_size = this->code_size + this->main_thread_stack_size;
|
||||
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool);
|
||||
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
||||
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
||||
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(m_system_resource_num_pages * PageSize, m_memory_pool);
|
||||
|
||||
return norm_size + other_size + sec_size;
|
||||
}
|
||||
|
||||
size_t KProcess::GetTotalUserPhysicalMemorySize() const {
|
||||
/* Get the amount of free and used size. */
|
||||
const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
||||
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
||||
const size_t used_size = this->GetUsedNonSystemUserPhysicalMemorySize();
|
||||
const size_t max_size = this->max_process_memory;
|
||||
const size_t max_size = m_max_process_memory;
|
||||
|
||||
if (used_size + free_size > max_size) {
|
||||
return max_size;
|
||||
|
@ -828,18 +828,18 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
size_t KProcess::GetUsedNonSystemUserPhysicalMemorySize() const {
|
||||
const size_t norm_size = this->page_table.GetNormalMemorySize();
|
||||
const size_t other_size = this->code_size + this->main_thread_stack_size;
|
||||
const size_t norm_size = m_page_table.GetNormalMemorySize();
|
||||
const size_t other_size = m_code_size + m_main_thread_stack_size;
|
||||
|
||||
return norm_size + other_size;
|
||||
}
|
||||
|
||||
size_t KProcess::GetTotalNonSystemUserPhysicalMemorySize() const {
|
||||
/* Get the amount of free and used size. */
|
||||
const size_t free_size = this->resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
||||
const size_t free_size = m_resource_limit->GetFreeValue(ams::svc::LimitableResource_PhysicalMemoryMax);
|
||||
const size_t used_size = this->GetUsedUserPhysicalMemorySize();
|
||||
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(this->system_resource_num_pages * PageSize, this->memory_pool);
|
||||
const size_t max_size = this->max_process_memory;
|
||||
const size_t sec_size = KSystemControl::CalculateRequiredSecureMemorySize(m_system_resource_num_pages * PageSize, m_memory_pool);
|
||||
const size_t max_size = m_max_process_memory;
|
||||
|
||||
if (used_size + free_size > max_size) {
|
||||
return max_size - sec_size;
|
||||
|
@ -852,10 +852,10 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Lock ourselves, to prevent concurrent access. */
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
|
||||
/* Validate that we're in a state where we can initialize. */
|
||||
const auto state = this->state;
|
||||
const auto state = m_state;
|
||||
R_UNLESS(state == State_Created || state == State_CreatedAttached, svc::ResultInvalidState());
|
||||
|
||||
/* Place a tentative reservation of a thread for this process. */
|
||||
|
@ -863,12 +863,12 @@ namespace ams::kern {
|
|||
R_UNLESS(thread_reservation.Succeeded(), svc::ResultLimitReached());
|
||||
|
||||
/* Ensure that we haven't already allocated stack. */
|
||||
MESOSPHERE_ABORT_UNLESS(this->main_thread_stack_size == 0);
|
||||
MESOSPHERE_ABORT_UNLESS(m_main_thread_stack_size == 0);
|
||||
|
||||
/* Ensure that we're allocating a valid stack. */
|
||||
stack_size = util::AlignUp(stack_size, PageSize);
|
||||
R_UNLESS(stack_size + this->code_size <= this->max_process_memory, svc::ResultOutOfMemory());
|
||||
R_UNLESS(stack_size + this->code_size >= this->code_size, svc::ResultOutOfMemory());
|
||||
R_UNLESS(stack_size + m_code_size <= m_max_process_memory, svc::ResultOutOfMemory());
|
||||
R_UNLESS(stack_size + m_code_size >= m_code_size, svc::ResultOutOfMemory());
|
||||
|
||||
/* Place a tentative reservation of memory for our new stack. */
|
||||
KScopedResourceReservation mem_reservation(this, ams::svc::LimitableResource_PhysicalMemoryMax, stack_size);
|
||||
|
@ -878,26 +878,26 @@ namespace ams::kern {
|
|||
KProcessAddress stack_top = Null<KProcessAddress>;
|
||||
if (stack_size) {
|
||||
KProcessAddress stack_bottom;
|
||||
R_TRY(this->page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite));
|
||||
R_TRY(m_page_table.MapPages(std::addressof(stack_bottom), stack_size / PageSize, KMemoryState_Stack, KMemoryPermission_UserReadWrite));
|
||||
|
||||
stack_top = stack_bottom + stack_size;
|
||||
this->main_thread_stack_size = stack_size;
|
||||
m_main_thread_stack_size = stack_size;
|
||||
}
|
||||
|
||||
/* Ensure our stack is safe to clean up on exit. */
|
||||
auto stack_guard = SCOPE_GUARD {
|
||||
if (this->main_thread_stack_size) {
|
||||
MESOSPHERE_R_ABORT_UNLESS(this->page_table.UnmapPages(stack_top - this->main_thread_stack_size, this->main_thread_stack_size / PageSize, KMemoryState_Stack));
|
||||
this->main_thread_stack_size = 0;
|
||||
if (m_main_thread_stack_size) {
|
||||
MESOSPHERE_R_ABORT_UNLESS(m_page_table.UnmapPages(stack_top - m_main_thread_stack_size, m_main_thread_stack_size / PageSize, KMemoryState_Stack));
|
||||
m_main_thread_stack_size = 0;
|
||||
}
|
||||
};
|
||||
|
||||
/* Set our maximum heap size. */
|
||||
R_TRY(this->page_table.SetMaxHeapSize(this->max_process_memory - (this->main_thread_stack_size + this->code_size)));
|
||||
R_TRY(m_page_table.SetMaxHeapSize(m_max_process_memory - (m_main_thread_stack_size + m_code_size)));
|
||||
|
||||
/* Initialize our handle table. */
|
||||
R_TRY(this->handle_table.Initialize(this->capabilities.GetHandleTableSize()));
|
||||
auto ht_guard = SCOPE_GUARD { this->handle_table.Finalize(); };
|
||||
R_TRY(m_handle_table.Initialize(m_capabilities.GetHandleTableSize()));
|
||||
auto ht_guard = SCOPE_GUARD { m_handle_table.Finalize(); };
|
||||
|
||||
/* Create a new thread for the process. */
|
||||
KThread *main_thread = KThread::Create();
|
||||
|
@ -905,7 +905,7 @@ namespace ams::kern {
|
|||
auto thread_guard = SCOPE_GUARD { main_thread->Close(); };
|
||||
|
||||
/* Initialize the thread. */
|
||||
R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast<KThreadFunction>(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, this->ideal_core_id, this));
|
||||
R_TRY(KThread::InitializeUserThread(main_thread, reinterpret_cast<KThreadFunction>(GetVoidPointer(this->GetEntryPoint())), 0, stack_top, priority, m_ideal_core_id, this));
|
||||
|
||||
/* Register the thread, and commit our reservation. */
|
||||
KThread::Register(main_thread);
|
||||
|
@ -913,7 +913,7 @@ namespace ams::kern {
|
|||
|
||||
/* Add the thread to our handle table. */
|
||||
ams::svc::Handle thread_handle;
|
||||
R_TRY(this->handle_table.Add(std::addressof(thread_handle), main_thread));
|
||||
R_TRY(m_handle_table.Add(std::addressof(thread_handle), main_thread));
|
||||
|
||||
/* Set the thread arguments. */
|
||||
main_thread->GetContext().SetArguments(0, thread_handle);
|
||||
|
@ -933,7 +933,7 @@ namespace ams::kern {
|
|||
mem_reservation.Commit();
|
||||
|
||||
/* Note for debug that we're running a new process. */
|
||||
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", this->process_id, this->name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
|
||||
MESOSPHERE_LOG("KProcess::Run() pid=%ld name=%-12s thread=%ld affinity=0x%lx ideal_core=%d active_core=%d\n", m_process_id, m_name, main_thread->GetId(), main_thread->GetVirtualAffinityMask(), main_thread->GetIdealVirtualCore(), main_thread->GetActiveCore());
|
||||
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
@ -942,32 +942,32 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT_THIS();
|
||||
|
||||
/* Lock the process and the scheduler. */
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Validate that we're in a state that we can reset. */
|
||||
R_UNLESS(this->state != State_Terminated, svc::ResultInvalidState());
|
||||
R_UNLESS(this->is_signaled, svc::ResultInvalidState());
|
||||
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
|
||||
R_UNLESS(m_is_signaled, svc::ResultInvalidState());
|
||||
|
||||
/* Clear signaled. */
|
||||
this->is_signaled = false;
|
||||
m_is_signaled = false;
|
||||
return ResultSuccess();
|
||||
}
|
||||
|
||||
Result KProcess::SetActivity(ams::svc::ProcessActivity activity) {
|
||||
/* Lock ourselves and the scheduler. */
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock list_lk(this->list_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
KScopedLightLock list_lk(m_list_lock);
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* Validate our state. */
|
||||
R_UNLESS(this->state != State_Terminating, svc::ResultInvalidState());
|
||||
R_UNLESS(this->state != State_Terminated, svc::ResultInvalidState());
|
||||
R_UNLESS(m_state != State_Terminating, svc::ResultInvalidState());
|
||||
R_UNLESS(m_state != State_Terminated, svc::ResultInvalidState());
|
||||
|
||||
/* Either pause or resume. */
|
||||
if (activity == ams::svc::ProcessActivity_Paused) {
|
||||
/* Verify that we're not suspended. */
|
||||
R_UNLESS(!this->is_suspended, svc::ResultInvalidState());
|
||||
R_UNLESS(!m_is_suspended, svc::ResultInvalidState());
|
||||
|
||||
/* Suspend all threads. */
|
||||
auto end = this->GetThreadList().end();
|
||||
|
@ -981,7 +981,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(activity == ams::svc::ProcessActivity_Runnable);
|
||||
|
||||
/* Verify that we're suspended. */
|
||||
R_UNLESS(this->is_suspended, svc::ResultInvalidState());
|
||||
R_UNLESS(m_is_suspended, svc::ResultInvalidState());
|
||||
|
||||
/* Resume all threads. */
|
||||
auto end = this->GetThreadList().end();
|
||||
|
@ -1028,7 +1028,7 @@ namespace ams::kern {
|
|||
|
||||
Result KProcess::GetThreadList(s32 *out_num_threads, ams::kern::svc::KUserPointer<u64 *> out_thread_ids, s32 max_out_count) {
|
||||
/* Lock the list. */
|
||||
KScopedLightLock lk(this->list_lock);
|
||||
KScopedLightLock lk(m_list_lock);
|
||||
|
||||
/* Iterate over the list. */
|
||||
s32 count = 0;
|
||||
|
@ -1059,17 +1059,17 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(debug_object != nullptr);
|
||||
|
||||
/* Cache our state to return it to the debug object. */
|
||||
const auto old_state = this->state;
|
||||
const auto old_state = m_state;
|
||||
|
||||
/* Set the object. */
|
||||
this->attached_object = debug_object;
|
||||
m_attached_object = debug_object;
|
||||
|
||||
/* Check that our state is valid for attach. */
|
||||
MESOSPHERE_ASSERT(this->state == State_Created || this->state == State_Running || this->state == State_Crashed);
|
||||
MESOSPHERE_ASSERT(m_state == State_Created || m_state == State_Running || m_state == State_Crashed);
|
||||
|
||||
/* Update our state. */
|
||||
if (this->state != State_DebugBreak) {
|
||||
if (this->state == State_Created) {
|
||||
if (m_state != State_DebugBreak) {
|
||||
if (m_state == State_Created) {
|
||||
this->ChangeState(State_CreatedAttached);
|
||||
} else {
|
||||
this->ChangeState(State_DebugBreak);
|
||||
|
@ -1084,15 +1084,15 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
/* Clear the attached object. */
|
||||
this->attached_object = nullptr;
|
||||
m_attached_object = nullptr;
|
||||
|
||||
/* Validate that the process is in an attached state. */
|
||||
MESOSPHERE_ASSERT(this->state == State_CreatedAttached || this->state == State_RunningAttached || this->state == State_DebugBreak || this->state == State_Terminating || this->state == State_Terminated);
|
||||
MESOSPHERE_ASSERT(m_state == State_CreatedAttached || m_state == State_RunningAttached || m_state == State_DebugBreak || m_state == State_Terminating || m_state == State_Terminated);
|
||||
|
||||
/* Change the state appropriately. */
|
||||
if (this->state == State_CreatedAttached) {
|
||||
if (m_state == State_CreatedAttached) {
|
||||
this->ChangeState(State_Created);
|
||||
} else if (this->state == State_RunningAttached || this->state == State_DebugBreak) {
|
||||
} else if (m_state == State_RunningAttached || m_state == State_DebugBreak) {
|
||||
/* Disallow transition back to created from running. */
|
||||
if (old_state == State_Created) {
|
||||
old_state = State_Running;
|
||||
|
@ -1107,20 +1107,20 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT(this == GetCurrentProcessPointer());
|
||||
|
||||
/* If we aren't allowed to enter jit debug, don't. */
|
||||
if ((this->flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) {
|
||||
if ((m_flags & ams::svc::CreateProcessFlag_EnableDebug) == 0) {
|
||||
return false;
|
||||
}
|
||||
|
||||
/* We're the current process, so we should be some kind of running. */
|
||||
MESOSPHERE_ASSERT(this->state != State_Created);
|
||||
MESOSPHERE_ASSERT(this->state != State_CreatedAttached);
|
||||
MESOSPHERE_ASSERT(this->state != State_Terminated);
|
||||
MESOSPHERE_ASSERT(m_state != State_Created);
|
||||
MESOSPHERE_ASSERT(m_state != State_CreatedAttached);
|
||||
MESOSPHERE_ASSERT(m_state != State_Terminated);
|
||||
|
||||
/* Try to enter JIT debug. */
|
||||
while (true) {
|
||||
/* Lock ourselves and the scheduler. */
|
||||
KScopedLightLock lk(this->state_lock);
|
||||
KScopedLightLock list_lk(this->list_lock);
|
||||
KScopedLightLock lk(m_state_lock);
|
||||
KScopedLightLock list_lk(m_list_lock);
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
/* If we're attached to a debugger, we're necessarily in debug. */
|
||||
|
@ -1134,12 +1134,12 @@ namespace ams::kern {
|
|||
}
|
||||
|
||||
/* We're not attached to debugger, so check that. */
|
||||
MESOSPHERE_ASSERT(this->state != State_RunningAttached);
|
||||
MESOSPHERE_ASSERT(this->state != State_DebugBreak);
|
||||
MESOSPHERE_ASSERT(m_state != State_RunningAttached);
|
||||
MESOSPHERE_ASSERT(m_state != State_DebugBreak);
|
||||
|
||||
/* If we're terminating, we can't enter debug. */
|
||||
if (this->state != State_Running && this->state != State_Crashed) {
|
||||
MESOSPHERE_ASSERT(this->state == State_Terminating);
|
||||
if (m_state != State_Running && m_state != State_Crashed) {
|
||||
MESOSPHERE_ASSERT(m_state == State_Terminating);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
@ -1160,14 +1160,14 @@ namespace ams::kern {
|
|||
this->ChangeState(State_Crashed);
|
||||
|
||||
/* Enter jit debug. */
|
||||
this->is_jit_debug = true;
|
||||
this->jit_debug_event_type = event;
|
||||
this->jit_debug_exception_type = exception;
|
||||
this->jit_debug_params[0] = param1;
|
||||
this->jit_debug_params[1] = param2;
|
||||
this->jit_debug_params[2] = param3;
|
||||
this->jit_debug_params[3] = param4;
|
||||
this->jit_debug_thread_id = GetCurrentThread().GetId();
|
||||
m_is_jit_debug = true;
|
||||
m_jit_debug_event_type = event;
|
||||
m_jit_debug_exception_type = exception;
|
||||
m_jit_debug_params[0] = param1;
|
||||
m_jit_debug_params[1] = param2;
|
||||
m_jit_debug_params[2] = param3;
|
||||
m_jit_debug_params[3] = param4;
|
||||
m_jit_debug_thread_id = GetCurrentThread().GetId();
|
||||
|
||||
/* Exit our retry loop. */
|
||||
break;
|
||||
|
@ -1177,7 +1177,7 @@ namespace ams::kern {
|
|||
{
|
||||
KScopedSchedulerLock sl;
|
||||
|
||||
if (this->state == State_Running || this->state == State_RunningAttached || this->state == State_Crashed || this->state == State_DebugBreak) {
|
||||
if (m_state == State_Running || m_state == State_RunningAttached || m_state == State_Crashed || m_state == State_DebugBreak) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
@ -1189,8 +1189,8 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
if (this->is_jit_debug) {
|
||||
return KDebugBase::CreateDebugEvent(this->jit_debug_event_type, this->jit_debug_exception_type, this->jit_debug_params[0], this->jit_debug_params[1], this->jit_debug_params[2], this->jit_debug_params[3], this->jit_debug_thread_id);
|
||||
if (m_is_jit_debug) {
|
||||
return KDebugBase::CreateDebugEvent(m_jit_debug_event_type, m_jit_debug_exception_type, m_jit_debug_params[0], m_jit_debug_params[1], m_jit_debug_params[2], m_jit_debug_params[3], m_jit_debug_thread_id);
|
||||
} else {
|
||||
return nullptr;
|
||||
}
|
||||
|
@ -1200,7 +1200,7 @@ namespace ams::kern {
|
|||
MESOSPHERE_ASSERT_THIS();
|
||||
MESOSPHERE_ASSERT(KScheduler::IsSchedulerLockedByCurrentThread());
|
||||
|
||||
this->is_jit_debug = false;
|
||||
m_is_jit_debug = false;
|
||||
}
|
||||
|
||||
KProcess *KProcess::GetProcessFromId(u64 process_id) {
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue