Kernel: Refactor Region/PageDirectory ownership model.

Make PageDirectory retainable and have each Region co-own the PageDirectory
they're mapped into. When unmapped, Region has no associated PageDirectory.

This allows Region to automatically unmap itself when destroyed.
This commit is contained in:
Andreas Kling 2019-01-22 05:01:00 +01:00
parent afccbe51a4
commit 2f2f28f212
Notes: sideshowbarker 2024-07-19 15:59:02 +09:00
5 changed files with 60 additions and 68 deletions

View file

@ -19,7 +19,7 @@ MemoryManager& MM
MemoryManager::MemoryManager()
{
m_kernel_page_directory = make<PageDirectory>(PhysicalAddress(0x4000));
m_kernel_page_directory = PageDirectory::create_at_fixed_address(PhysicalAddress(0x4000));
m_page_table_zero = (dword*)0x6000;
initialize_paging();
@ -212,7 +212,7 @@ Region* MemoryManager::region_from_laddr(Process& process, LinearAddress laddr)
return nullptr;
}
bool MemoryManager::zero_page(PageDirectory& page_directory, Region& region, unsigned page_index_in_region)
bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
{
ASSERT_INTERRUPTS_DISABLED();
auto& vmo = region.vmo();
@ -225,11 +225,11 @@ bool MemoryManager::zero_page(PageDirectory& page_directory, Region& region, uns
unquickmap_page();
region.cow_map.set(page_index_in_region, false);
vmo.physical_pages()[page_index_in_region] = move(physical_page);
remap_region_page(page_directory, region, page_index_in_region, true);
remap_region_page(region, page_index_in_region, true);
return true;
}
bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned page_index_in_region)
bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
{
ASSERT_INTERRUPTS_DISABLED();
auto& vmo = region.vmo();
@ -238,7 +238,7 @@ bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned pag
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
#endif
region.cow_map.set(page_index_in_region, false);
remap_region_page(process.page_directory(), region, page_index_in_region, true);
remap_region_page(region, page_index_in_region, true);
return true;
}
@ -256,12 +256,13 @@ bool MemoryManager::copy_on_write(Process& process, Region& region, unsigned pag
vmo.physical_pages()[page_index_in_region] = move(physical_page);
unquickmap_page();
region.cow_map.set(page_index_in_region, false);
remap_region_page(process.page_directory(), region, page_index_in_region, true);
remap_region_page(region, page_index_in_region, true);
return true;
}
bool Region::page_in(PageDirectory& page_directory)
bool Region::page_in()
{
ASSERT(m_page_directory);
ASSERT(!vmo().is_anonymous());
ASSERT(vmo().inode());
#ifdef MM_DEBUG
@ -270,17 +271,18 @@ bool Region::page_in(PageDirectory& page_directory)
for (size_t i = 0; i < page_count(); ++i) {
auto& vmo_page = vmo().physical_pages()[first_page_index() + i];
if (vmo_page.is_null()) {
bool success = MM.page_in_from_inode(page_directory, *this, i);
bool success = MM.page_in_from_inode(*this, i);
if (!success)
return false;
}
MM.remap_region_page(page_directory, *this, i, true);
MM.remap_region_page(*this, i, true);
}
return true;
}
bool MemoryManager::page_in_from_inode(PageDirectory& page_directory, Region& region, unsigned page_index_in_region)
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
{
ASSERT(region.m_page_directory);
auto& vmo = region.vmo();
ASSERT(!vmo.is_anonymous());
ASSERT(vmo.inode());
@ -292,7 +294,7 @@ bool MemoryManager::page_in_from_inode(PageDirectory& page_directory, Region& re
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
return false;
}
remap_region_page(page_directory, region, page_index_in_region, true);
remap_region_page(region, page_index_in_region, true);
byte* dest_ptr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE).asPtr();
#ifdef MM_DEBUG
dbgprintf("MM: page_in_from_inode ready to read from inode, will write to L%x!\n", dest_ptr);
@ -327,17 +329,17 @@ PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault)
if (fault.is_not_present()) {
if (region->vmo().inode()) {
dbgprintf("NP(inode) fault in Region{%p}[%u]\n", region, page_index_in_region);
page_in_from_inode(*current->m_page_directory, *region, page_index_in_region);
page_in_from_inode(*region, page_index_in_region);
return PageFaultResponse::Continue;
} else {
dbgprintf("NP(zero) fault in Region{%p}[%u]\n", region, page_index_in_region);
zero_page(*current->m_page_directory, *region, page_index_in_region);
zero_page(*region, page_index_in_region);
return PageFaultResponse::Continue;
}
} else if (fault.is_protection_violation()) {
if (region->cow_map.get(page_index_in_region)) {
dbgprintf("PV(cow) fault in Region{%p}[%u]\n", region, page_index_in_region);
bool success = copy_on_write(*current, *region, page_index_in_region);
bool success = copy_on_write(*region, page_index_in_region);
ASSERT(success);
return PageFaultResponse::Continue;
}
@ -424,11 +426,12 @@ void MemoryManager::unquickmap_page()
#endif
}
void MemoryManager::remap_region_page(PageDirectory& page_directory, Region& region, unsigned page_index_in_region, bool user_allowed)
void MemoryManager::remap_region_page(Region& region, unsigned page_index_in_region, bool user_allowed)
{
ASSERT(region.m_page_directory);
InterruptDisabler disabler;
auto page_laddr = region.linearAddress.offset(page_index_in_region * PAGE_SIZE);
auto pte = ensure_pte(page_directory, page_laddr);
auto pte = ensure_pte(*region.m_page_directory, page_laddr);
auto& physical_page = region.vmo().physical_pages()[page_index_in_region];
ASSERT(physical_page);
pte.set_physical_page_base(physical_page->paddr().get());
@ -438,9 +441,9 @@ void MemoryManager::remap_region_page(PageDirectory& page_directory, Region& reg
else
pte.set_writable(region.is_writable);
pte.set_user_allowed(user_allowed);
page_directory.flush(page_laddr);
region.m_page_directory->flush(page_laddr);
#ifdef MM_DEBUG
dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", &page_directory, pte.ptr(), region.name.characters(), page_laddr.get(), physical_page->paddr().get(), physical_page.ptr());
dbgprintf("MM: >> remap_region_page (PD=%x, PTE=P%x) '%s' L%x => P%x (@%p)\n", region.m_page_directory->cr3(), pte.ptr(), region.name.characters(), page_laddr.get(), physical_page->paddr().get(), physical_page.ptr());
#endif
}
@ -453,6 +456,7 @@ void MemoryManager::remap_region(Process& process, Region& region)
void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region& region, LinearAddress laddr, bool user_allowed)
{
InterruptDisabler disabler;
region.m_page_directory = page_directory;
auto& vmo = region.vmo();
#ifdef MM_DEBUG
dbgprintf("MM: map_region_at_address will map VMO pages %u - %u (VMO page count: %u)\n", region.first_page_index(), region.last_page_index(), vmo.page_count());
@ -482,42 +486,24 @@ void MemoryManager::map_region_at_address(PageDirectory& page_directory, Region&
}
}
void MemoryManager::unmap_range(PageDirectory& page_directory, LinearAddress laddr, size_t size)
{
ASSERT((size % PAGE_SIZE) == 0);
InterruptDisabler disabler;
size_t numPages = size / PAGE_SIZE;
for (size_t i = 0; i < numPages; ++i) {
auto page_laddr = laddr.offset(i * PAGE_SIZE);
auto pte = ensure_pte(page_directory, page_laddr);
pte.set_physical_page_base(0);
pte.set_present(false);
pte.set_writable(false);
pte.set_user_allowed(false);
page_directory.flush(page_laddr);
#ifdef MM_DEBUG
dbgprintf("MM: << unmap_range L%x =/> 0\n", page_laddr);
#endif
}
}
bool MemoryManager::unmap_region(Process& process, Region& region)
bool MemoryManager::unmap_region(Region& region)
{
ASSERT(region.m_page_directory);
InterruptDisabler disabler;
for (size_t i = 0; i < region.page_count(); ++i) {
auto laddr = region.linearAddress.offset(i * PAGE_SIZE);
auto pte = ensure_pte(process.page_directory(), laddr);
auto pte = ensure_pte(*region.m_page_directory, laddr);
pte.set_physical_page_base(0);
pte.set_present(false);
pte.set_writable(false);
pte.set_user_allowed(false);
process.page_directory().flush(laddr);
region.m_page_directory->flush(laddr);
#ifdef MM_DEBUG
auto& physical_page = region.vmo().physical_pages()[region.first_page_index() + i];
dbgprintf("MM: >> Unmapped L%x => P%x <<\n", laddr, physical_page ? physical_page->paddr().get() : 0);
#endif
}
region.m_page_directory.clear();
return true;
}
@ -620,6 +606,10 @@ Region::Region(LinearAddress a, size_t s, RetainPtr<VMObject>&& vmo, size_t offs
Region::~Region()
{
if (m_page_directory) {
MM.unmap_region(*this);
ASSERT(!m_page_directory);
}
MM.unregister_region(*this);
}
@ -719,7 +709,7 @@ VMObject::~VMObject()
MM.unregister_vmo(*this);
}
int Region::commit(Process& process)
int Region::commit()
{
InterruptDisabler disabler;
#ifdef MM_DEBUG
@ -734,7 +724,7 @@ int Region::commit(Process& process)
return -ENOMEM;
}
vmo().physical_pages()[i] = move(physical_page);
MM.remap_region_page(process.page_directory(), *this, i, true);
MM.remap_region_page(*this, i, true);
}
return 0;
}

View file

@ -56,11 +56,11 @@ private:
PhysicalAddress m_paddr;
};
class PageDirectory {
class PageDirectory : public Retainable<PageDirectory> {
friend class MemoryManager;
public:
PageDirectory();
explicit PageDirectory(PhysicalAddress);
static RetainPtr<PageDirectory> create() { return adopt(*new PageDirectory); }
static RetainPtr<PageDirectory> create_at_fixed_address(PhysicalAddress paddr) { return adopt(*new PageDirectory(paddr)); }
~PageDirectory();
dword cr3() const { return m_directory_page->paddr().get(); }
@ -69,6 +69,9 @@ public:
void flush(LinearAddress);
private:
PageDirectory();
explicit PageDirectory(PhysicalAddress);
RetainPtr<PhysicalPage> m_directory_page;
HashMap<unsigned, RetainPtr<PhysicalPage>> m_physical_pages;
};
@ -145,12 +148,12 @@ public:
return size / PAGE_SIZE;
}
bool page_in(PageDirectory&);
int commit(Process&);
int decommit(Process&);
bool page_in();
int commit();
size_t committed() const;
RetainPtr<PageDirectory> m_page_directory;
LinearAddress linearAddress;
size_t size { 0 };
size_t m_offset_in_vmo { 0 };
@ -179,7 +182,7 @@ public:
PageFaultResponse handle_page_fault(const PageFault&);
bool map_region(Process&, Region&);
bool unmap_region(Process&, Region&);
bool unmap_region(Region&);
void populate_page_directory(PageDirectory&);
@ -203,8 +206,7 @@ private:
void unregister_region(Region&);
void map_region_at_address(PageDirectory&, Region&, LinearAddress, bool user_accessible);
void unmap_range(PageDirectory&, LinearAddress, size_t);
void remap_region_page(PageDirectory&, Region&, unsigned page_index_in_region, bool user_allowed);
void remap_region_page(Region&, unsigned page_index_in_region, bool user_allowed);
void initialize_paging();
void flush_entire_tlb();
@ -219,9 +221,9 @@ private:
static Region* region_from_laddr(Process&, LinearAddress);
bool copy_on_write(Process&, Region&, unsigned page_index_in_region);
bool page_in_from_inode(PageDirectory&, Region&, unsigned page_index_in_region);
bool zero_page(PageDirectory&, Region& region, unsigned page_index_in_region);
bool copy_on_write(Region&, unsigned page_index_in_region);
bool page_in_from_inode(Region&, unsigned page_index_in_region);
bool zero_page(Region& region, unsigned page_index_in_region);
byte* quickmap_page(PhysicalPage&);
void unquickmap_page();
@ -308,7 +310,7 @@ private:
PageTableEntry ensure_pte(PageDirectory&, LinearAddress);
OwnPtr<PageDirectory> m_kernel_page_directory;
RetainPtr<PageDirectory> m_kernel_page_directory;
dword* m_page_table_zero;
LinearAddress m_quickmap_addr;

View file

@ -79,9 +79,9 @@ Region* Process::allocate_region(LinearAddress laddr, size_t size, String&& name
}
laddr.mask(0xfffff000);
m_regions.append(adopt(*new Region(laddr, size, move(name), is_readable, is_writable)));
if (commit)
m_regions.last()->commit(*this);
MM.map_region(*this, *m_regions.last());
if (commit)
m_regions.last()->commit();
return m_regions.last().ptr();
}
@ -121,7 +121,7 @@ bool Process::deallocate_region(Region& region)
InterruptDisabler disabler;
for (size_t i = 0; i < m_regions.size(); ++i) {
if (m_regions[i].ptr() == &region) {
MM.unmap_region(*this, region);
MM.unmap_region(region);
m_regions.remove(i);
return true;
}
@ -303,21 +303,21 @@ int Process::do_exec(const String& path, Vector<String>&& arguments, Vector<Stri
return -ENOTIMPL;
}
auto vmo = VMObject::create_file_backed(descriptor->inode(), descriptor->metadata().size);
vmo->set_name(descriptor->absolute_path());
auto* region = allocate_region_with_vmo(LinearAddress(), descriptor->metadata().size, vmo.copyRef(), 0, "helper", true, false);
dword entry_eip = 0;
// FIXME: Is there a race here?
auto old_page_directory = move(m_page_directory);
m_page_directory = make<PageDirectory>();
m_page_directory = PageDirectory::create();
#ifdef MM_DEBUG
dbgprintf("Process %u exec: PD=%x created\n", pid(), m_page_directory.ptr());
#endif
ProcessPagingScope paging_scope(*this);
auto vmo = VMObject::create_file_backed(descriptor->inode(), descriptor->metadata().size);
vmo->set_name(descriptor->absolute_path());
auto* region = allocate_region_with_vmo(LinearAddress(), descriptor->metadata().size, vmo.copyRef(), 0, "helper", true, false);
// FIXME: Should we consider doing on-demand paging here? Is it actually useful?
bool success = region->page_in(*m_page_directory);
bool success = region->page_in();
ASSERT(success);
{
@ -601,7 +601,7 @@ Process::Process(String&& name, uid_t uid, gid_t gid, pid_t ppid, RingLevel ring
}
}
m_page_directory = make<PageDirectory>();
m_page_directory = PageDirectory::create();
#ifdef MM_DEBUG
dbgprintf("Process %u ctor: PD=%x created\n", pid(), m_page_directory.ptr());
#endif

View file

@ -273,7 +273,7 @@ private:
int alloc_fd();
OwnPtr<PageDirectory> m_page_directory;
RetainPtr<PageDirectory> m_page_directory;
Process* m_prev { nullptr };
Process* m_next { nullptr };

View file

@ -22,7 +22,7 @@ GraphicsBitmap::GraphicsBitmap(Process& process, const Size& size)
auto vmo = VMObject::create_anonymous(size_in_bytes);
m_client_region = process.allocate_region_with_vmo(LinearAddress(), size_in_bytes, vmo.copyRef(), 0, "GraphicsBitmap (client)", true, true);
m_client_region->set_shared(true);
m_client_region->commit(process);
m_client_region->commit();
{
auto& server = WSEventLoop::the().server_process();