Kernel: Stop allocating page tables from the super pages pool

We now use the regular "user" physical pages for on-demand page table
allocations. This was by far the biggest source of super physical page
exhaustion, so that bug should be a thing of the past now. :^)

We still have super pages, but they are barely used. They remain useful
for code that requires memory with a low physical address.

Fixes #1000.
This commit is contained in:
Andreas Kling 2020-01-17 22:18:56 +01:00
parent f71fc88393
commit ad1f79fb4a
Notes: sideshowbarker 2024-07-19 10:00:19 +09:00
3 changed files with 19 additions and 34 deletions

View file

@ -45,11 +45,6 @@ void MemoryManager::initialize_paging()
dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3()); dbgprintf("MM: Kernel page directory @ %p\n", kernel_page_directory().cr3());
#endif #endif
m_quickmap_addr = VirtualAddress(0xffe00000);
#ifdef MM_DEBUG
dbgprintf("MM: Quickmap will use %p\n", m_quickmap_addr.get());
#endif
parse_memory_map(); parse_memory_map();
#ifdef MM_DEBUG #ifdef MM_DEBUG
@ -146,7 +141,7 @@ void MemoryManager::protect_kernel_image()
void MemoryManager::setup_low_1mb() void MemoryManager::setup_low_1mb()
{ {
m_low_page_table = allocate_supervisor_physical_page(); m_low_page_table = allocate_user_physical_page(ShouldZeroFill::Yes);
auto* pd_zero = quickmap_pd(kernel_page_directory(), 0); auto* pd_zero = quickmap_pd(kernel_page_directory(), 0);
pd_zero[1].set_present(false); pd_zero[1].set_present(false);
@ -262,7 +257,7 @@ PageTableEntry& MemoryManager::ensure_pte(PageDirectory& page_directory, Virtual
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: PDE %u not present (requested for V%p), allocating\n", page_directory_index, vaddr.get()); dbgprintf("MM: PDE %u not present (requested for V%p), allocating\n", page_directory_index, vaddr.get());
#endif #endif
auto page_table = allocate_supervisor_physical_page(); auto page_table = allocate_user_physical_page(ShouldZeroFill::Yes);
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbgprintf("MM: PD K%p (%s) at P%p allocated page table #%u (for V%p) at P%p\n", dbgprintf("MM: PD K%p (%s) at P%p allocated page table #%u (for V%p) at P%p\n",
&page_directory, &page_directory,
@ -395,7 +390,6 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
#ifdef PAGE_FAULT_DEBUG #ifdef PAGE_FAULT_DEBUG
dbgprintf("MM: handle_page_fault(%w) at V%p\n", fault.code(), fault.vaddr().get()); dbgprintf("MM: handle_page_fault(%w) at V%p\n", fault.code(), fault.vaddr().get());
#endif #endif
ASSERT(fault.vaddr() != m_quickmap_addr);
auto* region = region_from_vaddr(fault.vaddr()); auto* region = region_from_vaddr(fault.vaddr());
if (!region) { if (!region) {
kprintf("NP(error) fault at invalid address V%p\n", fault.vaddr().get()); kprintf("NP(error) fault at invalid address V%p\n", fault.vaddr().get());
@ -663,36 +657,29 @@ u8* MemoryManager::quickmap_page(PhysicalPage& physical_page)
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ASSERT(!m_quickmap_in_use); ASSERT(!m_quickmap_in_use);
m_quickmap_in_use = true; m_quickmap_in_use = true;
auto page_vaddr = m_quickmap_addr;
auto& pte = ensure_pte(kernel_page_directory(), page_vaddr); auto& pte = boot_pd3_pde1023_pt[0];
pte.set_physical_page_base(physical_page.paddr().get()); if (pte.physical_page_base() != physical_page.paddr().as_ptr()) {
pte.set_present(true);
pte.set_writable(true);
pte.set_user_allowed(false);
flush_tlb(page_vaddr);
ASSERT((u32)pte.physical_page_base() == physical_page.paddr().get());
#ifdef MM_DEBUG #ifdef MM_DEBUG
dbg() << "MM: >> quickmap_page " << page_vaddr << " => " << physical_page.paddr() << " @ PTE=" << (void*)pte.raw() << " {" << &pte << "}"; dbgprintf("quickmap_page: Mapping P%p at 0xffe00000 in pte @ %p\n", physical_page.paddr().as_ptr(), &pte);
#endif #endif
return page_vaddr.as_ptr(); pte.set_physical_page_base(physical_page.paddr().get());
pte.set_present(true);
pte.set_writable(true);
pte.set_user_allowed(false);
flush_tlb(VirtualAddress(0xffe00000));
}
return (u8*)0xffe00000;
} }
void MemoryManager::unquickmap_page() void MemoryManager::unquickmap_page()
{ {
ASSERT_INTERRUPTS_DISABLED(); ASSERT_INTERRUPTS_DISABLED();
ASSERT(m_quickmap_in_use); ASSERT(m_quickmap_in_use);
auto page_vaddr = m_quickmap_addr; auto& pte = boot_pd3_pde1023_pt[0];
auto& pte = ensure_pte(kernel_page_directory(), page_vaddr);
#ifdef MM_DEBUG
auto old_physical_address = pte.physical_page_base();
#endif
pte.set_physical_page_base(0); pte.set_physical_page_base(0);
pte.set_present(false); pte.set_present(false);
pte.set_writable(false); flush_tlb(VirtualAddress(0xffe00000));
flush_tlb(page_vaddr);
#ifdef MM_DEBUG
dbg() << "MM: >> unquickmap_page " << page_vaddr << " =/> " << old_physical_address;
#endif
m_quickmap_in_use = false; m_quickmap_in_use = false;
} }

View file

@ -77,7 +77,7 @@ public:
Yes Yes
}; };
RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill); RefPtr<PhysicalPage> allocate_user_physical_page(ShouldZeroFill = ShouldZeroFill::Yes);
RefPtr<PhysicalPage> allocate_supervisor_physical_page(); RefPtr<PhysicalPage> allocate_supervisor_physical_page();
void deallocate_user_physical_page(PhysicalPage&&); void deallocate_user_physical_page(PhysicalPage&&);
void deallocate_supervisor_physical_page(PhysicalPage&&); void deallocate_supervisor_physical_page(PhysicalPage&&);
@ -151,8 +151,6 @@ private:
RefPtr<PageDirectory> m_kernel_page_directory; RefPtr<PageDirectory> m_kernel_page_directory;
RefPtr<PhysicalPage> m_low_page_table; RefPtr<PhysicalPage> m_low_page_table;
VirtualAddress m_quickmap_addr;
unsigned m_user_physical_pages { 0 }; unsigned m_user_physical_pages { 0 };
unsigned m_user_physical_pages_used { 0 }; unsigned m_user_physical_pages_used { 0 };
unsigned m_super_physical_pages { 0 }; unsigned m_super_physical_pages { 0 };

View file

@ -46,9 +46,9 @@ PageDirectory::PageDirectory(Process& process, const RangeAllocator* parent_rang
{ {
// Set up a userspace page directory // Set up a userspace page directory
m_directory_table = MM.allocate_supervisor_physical_page(); m_directory_table = MM.allocate_supervisor_physical_page();
m_directory_pages[0] = MM.allocate_supervisor_physical_page(); m_directory_pages[0] = MM.allocate_user_physical_page();
m_directory_pages[1] = MM.allocate_supervisor_physical_page(); m_directory_pages[1] = MM.allocate_user_physical_page();
m_directory_pages[2] = MM.allocate_supervisor_physical_page(); m_directory_pages[2] = MM.allocate_user_physical_page();
// Share the top 1 GB of kernel-only mappings (>=3GB or >=0xc0000000) // Share the top 1 GB of kernel-only mappings (>=3GB or >=0xc0000000)
m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3]; m_directory_pages[3] = MM.kernel_page_directory().m_directory_pages[3];