From 85fd13f72462095159c5edd0a034e250a1209ba9 Mon Sep 17 00:00:00 2001 From: Michael Scire Date: Mon, 7 Apr 2025 16:46:58 -0700 Subject: [PATCH] kern: update KernelLdr for 19.0.0 (new checks, dummy function call). Also, fix a few very embarassing mistakes in kernel ldr: * We have been mapping the page table region RWX for a few years now, accidentally. * My attempt at making initial page tables not use bit 58 was broken in multiple ways. --- .../arch/arm64/init/kern_k_init_page_table.hpp | 12 ++++++------ .../arch/arm64/kern_k_page_table_entry.hpp | 3 ++- .../arch/arm64/kern_k_page_table_impl.cpp | 13 ++++++------- .../kernel_ldr/source/kern_init_loader.cpp | 17 +++++++++++++---- 4 files changed, 27 insertions(+), 18 deletions(-) diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp index 805b967db..b653da7bd 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/init/kern_k_init_page_table.hpp @@ -110,7 +110,7 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ - if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { + if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= L1BlockSize); virt_addr += L1BlockSize; @@ -126,7 +126,7 @@ namespace ams::kern::arch::arm64::init { /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { + if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) { const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); @@ -144,7 +144,7 @@ namespace ams::kern::arch::arm64::init { L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block or empty. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty()); const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); @@ -164,7 +164,7 @@ namespace ams::kern::arch::arm64::init { L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ - if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { + if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) { MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= L1BlockSize); if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) { @@ -182,7 +182,7 @@ namespace ams::kern::arch::arm64::init { /* Table, so check if we're mapped in L2. */ L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); - if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { + if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) { const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(static_cast(end_virt_addr - virt_addr) >= advance_size); @@ -202,7 +202,7 @@ namespace ams::kern::arch::arm64::init { L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); /* L3 must be block or empty. */ - MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); + MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty()); const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); diff --git a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp index 6fce69f81..ed32262f9 100644 --- a/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp +++ b/libraries/libmesosphere/include/mesosphere/arch/arm64/kern_k_page_table_entry.hpp @@ -122,7 +122,7 @@ namespace ams::kern::arch::arm64 { /* Construct a new attribute. */ constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m) - : m_attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share) | static_cast(ExtensionFlag_Valid) | static_cast(m)) + : m_attributes(static_cast(perm) | static_cast(AccessFlag_Accessed) | static_cast(p_a) | static_cast(share) | static_cast(m)) { /* ... */ } @@ -205,6 +205,7 @@ namespace ams::kern::arch::arm64 { constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; } constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; } + constexpr ALWAYS_INLINE bool IsMappedEmpty() const { return this->GetBits(0, 2) == 0; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } diff --git a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp index 32dfb00e5..150aa1d1c 100644 --- a/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp +++ b/libraries/libmesosphere/source/arch/arm64/kern_k_page_table_impl.cpp @@ -21,12 +21,6 @@ namespace ams::kern::arch::arm64 { m_table = static_cast(tb); m_is_kernel = true; m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; - } - - void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { - m_table = static_cast(tb); - m_is_kernel = false; - m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; /* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */ PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table }; @@ -68,7 +62,6 @@ namespace ams::kern::arch::arm64 { /* Advance. */ while (true) { /* Advance to the next entry at the current level. */ - ++level_entries[level]; if (!util::IsAligned(reinterpret_cast(++level_entries[level]), PageSize)) { break; } @@ -83,6 +76,12 @@ namespace ams::kern::arch::arm64 { } } + void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) { + m_table = static_cast(tb); + m_is_kernel = false; + m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; + } + L1PageTableEntry *KPageTableImpl::Finalize() { return m_table; } diff --git a/mesosphere/kernel_ldr/source/kern_init_loader.cpp b/mesosphere/kernel_ldr/source/kern_init_loader.cpp index 33fe56393..43979905c 100644 --- a/mesosphere/kernel_ldr/source/kern_init_loader.cpp +++ b/mesosphere/kernel_ldr/source/kern_init_loader.cpp @@ -49,7 +49,7 @@ namespace ams::kern::init::loader { constinit void *g_final_state[2]; - void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) { + void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout, const uintptr_t &ini_base_address) { /* Adjust layout to be correct. */ { const ptrdiff_t layout_offset = reinterpret_cast(layout) - base_address; @@ -74,6 +74,12 @@ namespace ams::kern::init::loader { const uintptr_t diff = GetInteger(correct_base) - base_address; const size_t size = layout->rw_end_offset; + /* Check that the new kernel doesn't overlap with us. */ + MESOSPHERE_INIT_ABORT_UNLESS((GetInteger(correct_base) >= reinterpret_cast(__bin_end__)) || (GetInteger(correct_base) + size <= reinterpret_cast(__bin_start__))); + + /* Check that the new kernel doesn't overlap with the initial process binary. */ + MESOSPHERE_INIT_ABORT_UNLESS((ini_base_address + InitialProcessBinarySizeMax <= GetInteger(correct_base)) || (GetInteger(correct_base) + size <= ini_base_address)); + /* Conversion from KPhysicalAddress to void * is safe here, because MMU is not set up yet. */ std::memmove(reinterpret_cast(GetInteger(correct_base)), reinterpret_cast(base_address), size); base_address += diff; @@ -90,11 +96,11 @@ namespace ams::kern::init::loader { constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast(__bin_start__), PageSize); const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast(__bin_end__), PageSize) - kernel_ldr_base; - init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator, 0); + init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelLdrRWXIdentityAttribute, allocator, 0); /* Map in the page table region as RW- for ourselves. */ constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); - init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator, 0); + init_pt.Map(page_table_region, page_table_region_size, page_table_region, PageTableRegionRWAttribute, allocator, 0); /* Place the L1 table addresses in the relevant system registers. */ cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress()); @@ -165,7 +171,7 @@ namespace ams::kern::init::loader { uintptr_t Main(uintptr_t base_address, KernelLayout *layout, uintptr_t ini_base_address) { /* Relocate the kernel to the correct physical base address. */ /* Base address and layout are passed by reference and modified. */ - RelocateKernelPhysically(base_address, layout); + RelocateKernelPhysically(base_address, layout, ini_base_address); /* Validate kernel layout. */ const uintptr_t rx_offset = layout->rx_offset; @@ -229,6 +235,9 @@ namespace ams::kern::init::loader { /* Setup initial identity mapping. TTBR1 table passed by reference. */ SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, resource_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator, reinterpret_cast(base_address + sysreg_offset)); + /* NOTE: On 19.0.0+, Nintendo calls an unknown function here on init_pt and g_initial_page_allocator. */ + /* This is stubbed in prod KernelLdr. */ + /* Generate a random slide for the kernel's base address. */ const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset);