kern: update KernelLdr for 19.0.0 (new checks, dummy function call).

Also, fix a few very embarassing mistakes in kernel ldr:
* We have been mapping the page table region RWX for a few years now, accidentally.
* My attempt at making initial page tables not use bit 58 was broken in multiple ways.
This commit is contained in:
Michael Scire 2025-04-07 16:46:58 -07:00
parent 4e99a5e08d
commit 85fd13f724
4 changed files with 27 additions and 18 deletions

View file

@ -110,7 +110,7 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize); MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
virt_addr += L1BlockSize; virt_addr += L1BlockSize;
@ -126,7 +126,7 @@ namespace ams::kern::arch::arm64::init {
/* Table, so check if we're mapped in L2. */ /* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size); MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
@ -144,7 +144,7 @@ namespace ams::kern::arch::arm64::init {
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */ /* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty());
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
@ -164,7 +164,7 @@ namespace ams::kern::arch::arm64::init {
L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr); L1PageTableEntry *l1_entry = this->GetL1Entry(virt_addr);
/* If an L1 block is mapped or we're empty, advance by L1BlockSize. */ /* If an L1 block is mapped or we're empty, advance by L1BlockSize. */
if (l1_entry->IsMappedBlock() || l1_entry->IsEmpty()) { if (l1_entry->IsMappedBlock() || l1_entry->IsMappedEmpty()) {
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), L1BlockSize));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize); MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= L1BlockSize);
if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) { if (l1_entry->IsMappedBlock() && block_size == L1BlockSize) {
@ -182,7 +182,7 @@ namespace ams::kern::arch::arm64::init {
/* Table, so check if we're mapped in L2. */ /* Table, so check if we're mapped in L2. */
L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr); L2PageTableEntry *l2_entry = GetL2Entry(l1_entry, virt_addr);
if (l2_entry->IsMappedBlock() || l2_entry->IsEmpty()) { if (l2_entry->IsMappedBlock() || l2_entry->IsMappedEmpty()) {
const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize; const size_t advance_size = (l2_entry->IsMappedBlock() && l2_entry->IsContiguous()) ? L2ContiguousBlockSize : L2BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));
MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size); MESOSPHERE_INIT_ABORT_UNLESS(static_cast<size_t>(end_virt_addr - virt_addr) >= advance_size);
@ -202,7 +202,7 @@ namespace ams::kern::arch::arm64::init {
L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr); L3PageTableEntry *l3_entry = GetL3Entry(l2_entry, virt_addr);
/* L3 must be block or empty. */ /* L3 must be block or empty. */
MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsEmpty()); MESOSPHERE_INIT_ABORT_UNLESS(l3_entry->IsMappedBlock() || l3_entry->IsMappedEmpty());
const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize; const size_t advance_size = (l3_entry->IsMappedBlock() && l3_entry->IsContiguous()) ? L3ContiguousBlockSize : L3BlockSize;
MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size)); MESOSPHERE_INIT_ABORT_UNLESS(util::IsAligned(GetInteger(virt_addr), advance_size));

View file

@ -122,7 +122,7 @@ namespace ams::kern::arch::arm64 {
/* Construct a new attribute. */ /* Construct a new attribute. */
constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m) constexpr explicit ALWAYS_INLINE PageTableEntry(Permission perm, PageAttribute p_a, Shareable share, MappingFlag m)
: m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(ExtensionFlag_Valid) | static_cast<u64>(m)) : m_attributes(static_cast<u64>(perm) | static_cast<u64>(AccessFlag_Accessed) | static_cast<u64>(p_a) | static_cast<u64>(share) | static_cast<u64>(m))
{ {
/* ... */ /* ... */
} }
@ -205,6 +205,7 @@ namespace ams::kern::arch::arm64 {
constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; } constexpr ALWAYS_INLINE bool IsMappedBlock() const { return this->GetBits(0, 2) == 1; }
constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; } constexpr ALWAYS_INLINE bool IsMappedTable() const { return this->GetBits(0, 2) == 3; }
constexpr ALWAYS_INLINE bool IsMappedEmpty() const { return this->GetBits(0, 2) == 0; }
constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; } constexpr ALWAYS_INLINE bool IsMapped() const { return this->GetBits(0, 1) != 0; }
constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; } constexpr ALWAYS_INLINE decltype(auto) SetUserExecuteNever(bool en) { this->SetBit(54, en); return *this; }

View file

@ -21,12 +21,6 @@ namespace ams::kern::arch::arm64 {
m_table = static_cast<L1PageTableEntry *>(tb); m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = true; m_is_kernel = true;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize; m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
}
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
/* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */ /* Page table entries created by KInitialPageTable need to be iterated and modified to ensure KPageTable invariants. */
PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table }; PageTableEntry *level_entries[EntryLevel_Count] = { nullptr, nullptr, m_table };
@ -68,7 +62,6 @@ namespace ams::kern::arch::arm64 {
/* Advance. */ /* Advance. */
while (true) { while (true) {
/* Advance to the next entry at the current level. */ /* Advance to the next entry at the current level. */
++level_entries[level];
if (!util::IsAligned(reinterpret_cast<uintptr_t>(++level_entries[level]), PageSize)) { if (!util::IsAligned(reinterpret_cast<uintptr_t>(++level_entries[level]), PageSize)) {
break; break;
} }
@ -83,6 +76,12 @@ namespace ams::kern::arch::arm64 {
} }
} }
void KPageTableImpl::InitializeForProcess(void *tb, KVirtualAddress start, KVirtualAddress end) {
m_table = static_cast<L1PageTableEntry *>(tb);
m_is_kernel = false;
m_num_entries = util::AlignUp(end - start, L1BlockSize) / L1BlockSize;
}
L1PageTableEntry *KPageTableImpl::Finalize() { L1PageTableEntry *KPageTableImpl::Finalize() {
return m_table; return m_table;
} }

View file

@ -49,7 +49,7 @@ namespace ams::kern::init::loader {
constinit void *g_final_state[2]; constinit void *g_final_state[2];
void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout) { void RelocateKernelPhysically(uintptr_t &base_address, KernelLayout *&layout, const uintptr_t &ini_base_address) {
/* Adjust layout to be correct. */ /* Adjust layout to be correct. */
{ {
const ptrdiff_t layout_offset = reinterpret_cast<uintptr_t>(layout) - base_address; const ptrdiff_t layout_offset = reinterpret_cast<uintptr_t>(layout) - base_address;
@ -74,6 +74,12 @@ namespace ams::kern::init::loader {
const uintptr_t diff = GetInteger(correct_base) - base_address; const uintptr_t diff = GetInteger(correct_base) - base_address;
const size_t size = layout->rw_end_offset; const size_t size = layout->rw_end_offset;
/* Check that the new kernel doesn't overlap with us. */
MESOSPHERE_INIT_ABORT_UNLESS((GetInteger(correct_base) >= reinterpret_cast<uintptr_t>(__bin_end__)) || (GetInteger(correct_base) + size <= reinterpret_cast<uintptr_t>(__bin_start__)));
/* Check that the new kernel doesn't overlap with the initial process binary. */
MESOSPHERE_INIT_ABORT_UNLESS((ini_base_address + InitialProcessBinarySizeMax <= GetInteger(correct_base)) || (GetInteger(correct_base) + size <= ini_base_address));
/* Conversion from KPhysicalAddress to void * is safe here, because MMU is not set up yet. */ /* Conversion from KPhysicalAddress to void * is safe here, because MMU is not set up yet. */
std::memmove(reinterpret_cast<void *>(GetInteger(correct_base)), reinterpret_cast<void *>(base_address), size); std::memmove(reinterpret_cast<void *>(GetInteger(correct_base)), reinterpret_cast<void *>(base_address), size);
base_address += diff; base_address += diff;
@ -90,11 +96,11 @@ namespace ams::kern::init::loader {
constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry KernelLdrRWXIdentityAttribute(PageTableEntry::Permission_KernelRWX, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__bin_start__), PageSize); const uintptr_t kernel_ldr_base = util::AlignDown(reinterpret_cast<uintptr_t>(__bin_start__), PageSize);
const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__bin_end__), PageSize) - kernel_ldr_base; const uintptr_t kernel_ldr_size = util::AlignUp(reinterpret_cast<uintptr_t>(__bin_end__), PageSize) - kernel_ldr_base;
init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelRWXIdentityAttribute, allocator, 0); init_pt.Map(kernel_ldr_base, kernel_ldr_size, kernel_ldr_base, KernelLdrRWXIdentityAttribute, allocator, 0);
/* Map in the page table region as RW- for ourselves. */ /* Map in the page table region as RW- for ourselves. */
constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped); constexpr PageTableEntry PageTableRegionRWAttribute(PageTableEntry::Permission_KernelRW, PageTableEntry::PageAttribute_NormalMemory, PageTableEntry::Shareable_InnerShareable, PageTableEntry::MappingFlag_Mapped);
init_pt.Map(page_table_region, page_table_region_size, page_table_region, KernelRWXIdentityAttribute, allocator, 0); init_pt.Map(page_table_region, page_table_region_size, page_table_region, PageTableRegionRWAttribute, allocator, 0);
/* Place the L1 table addresses in the relevant system registers. */ /* Place the L1 table addresses in the relevant system registers. */
cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress()); cpu::SetTtbr0El1(init_pt.GetTtbr0L1TableAddress());
@ -165,7 +171,7 @@ namespace ams::kern::init::loader {
uintptr_t Main(uintptr_t base_address, KernelLayout *layout, uintptr_t ini_base_address) { uintptr_t Main(uintptr_t base_address, KernelLayout *layout, uintptr_t ini_base_address) {
/* Relocate the kernel to the correct physical base address. */ /* Relocate the kernel to the correct physical base address. */
/* Base address and layout are passed by reference and modified. */ /* Base address and layout are passed by reference and modified. */
RelocateKernelPhysically(base_address, layout); RelocateKernelPhysically(base_address, layout, ini_base_address);
/* Validate kernel layout. */ /* Validate kernel layout. */
const uintptr_t rx_offset = layout->rx_offset; const uintptr_t rx_offset = layout->rx_offset;
@ -229,6 +235,9 @@ namespace ams::kern::init::loader {
/* Setup initial identity mapping. TTBR1 table passed by reference. */ /* Setup initial identity mapping. TTBR1 table passed by reference. */
SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, resource_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator, reinterpret_cast<KernelSystemRegisters *>(base_address + sysreg_offset)); SetupInitialIdentityMapping(init_pt, base_address, bss_end_offset, resource_end_address, InitialPageTableRegionSizeMax, g_initial_page_allocator, reinterpret_cast<KernelSystemRegisters *>(base_address + sysreg_offset));
/* NOTE: On 19.0.0+, Nintendo calls an unknown function here on init_pt and g_initial_page_allocator. */
/* This is stubbed in prod KernelLdr. */
/* Generate a random slide for the kernel's base address. */ /* Generate a random slide for the kernel's base address. */
const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset); const KVirtualAddress virtual_base_address = GetRandomKernelBaseAddress(init_pt, base_address, bss_end_offset);