mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-04-21 12:05:15 +00:00
Kernel: Clean up a bunch of wrong-looking Region/VMObject code
Since a Region is merely a "window" onto a VMObject, it can both begin and end at a distance from the VMObject's boundaries. Therefore, we should always be computing indices into a VMObject's physical page array by adding the Region's "first_page_index()". There was a whole bunch of code that forgot to do that. This fixes many wrong behaviors for Regions that start part-way into a VMObject.
This commit is contained in:
parent
fe455c5ac4
commit
a221cddeec
Notes:
sideshowbarker
2024-07-19 11:26:56 +09:00
Author: https://github.com/awesomekling Commit: https://github.com/SerenityOS/serenity/commit/a221cddeec4
2 changed files with 25 additions and 23 deletions
|
@ -291,14 +291,15 @@ const Region* MemoryManager::region_from_vaddr(const Process& process, VirtualAd
|
|||
bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
auto& vmo = region.vmobject();
|
||||
auto& vmo_page = vmo.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||
ASSERT(vmo.is_anonymous());
|
||||
ASSERT(region.vmobject().is_anonymous());
|
||||
|
||||
auto& vmobject = region.vmobject();
|
||||
auto& vmobject_physical_page_entry = vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||
|
||||
// NOTE: We don't need to acquire the VMObject's lock.
|
||||
// This function is already exclusive due to interrupts being blocked.
|
||||
|
||||
if (!vmo_page.is_null()) {
|
||||
if (!vmobject_physical_page_entry.is_null()) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf("MM: zero_page() but page already present. Fine with me!\n");
|
||||
#endif
|
||||
|
@ -313,7 +314,7 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
|||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf(" >> ZERO P%p\n", physical_page->paddr().get());
|
||||
#endif
|
||||
vmo.physical_pages()[page_index_in_region] = move(physical_page);
|
||||
vmobject_physical_page_entry = move(physical_page);
|
||||
region.remap_page(page_index_in_region);
|
||||
return true;
|
||||
}
|
||||
|
@ -321,8 +322,9 @@ bool MemoryManager::zero_page(Region& region, unsigned page_index_in_region)
|
|||
bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
||||
{
|
||||
ASSERT_INTERRUPTS_DISABLED();
|
||||
auto& vmo = region.vmobject();
|
||||
if (vmo.physical_pages()[page_index_in_region]->ref_count() == 1) {
|
||||
auto& vmobject = region.vmobject();
|
||||
auto& vmobject_physical_page_entry = vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||
if (vmobject_physical_page_entry->ref_count() == 1) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf(" >> It's a COW page but nobody is sharing it anymore. Remap r/w\n");
|
||||
#endif
|
||||
|
@ -337,7 +339,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
|||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf(" >> It's a COW page and it's time to COW!\n");
|
||||
#endif
|
||||
auto physical_page_to_copy = move(vmo.physical_pages()[page_index_in_region]);
|
||||
auto physical_page_to_copy = move(vmobject_physical_page_entry);
|
||||
auto physical_page = allocate_user_physical_page(ShouldZeroFill::No);
|
||||
u8* dest_ptr = quickmap_page(*physical_page);
|
||||
const u8* src_ptr = region.vaddr().offset(page_index_in_region * PAGE_SIZE).as_ptr();
|
||||
|
@ -345,7 +347,7 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
|||
dbgprintf(" >> COW P%p <- P%p\n", physical_page->paddr().get(), physical_page_to_copy->paddr().get());
|
||||
#endif
|
||||
memcpy(dest_ptr, src_ptr, PAGE_SIZE);
|
||||
vmo.physical_pages()[page_index_in_region] = move(physical_page);
|
||||
vmobject_physical_page_entry = move(physical_page);
|
||||
unquickmap_page();
|
||||
region.set_should_cow(page_index_in_region, false);
|
||||
region.remap_page(page_index_in_region);
|
||||
|
@ -355,24 +357,23 @@ bool MemoryManager::copy_on_write(Region& region, unsigned page_index_in_region)
|
|||
bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_region)
|
||||
{
|
||||
ASSERT(region.page_directory());
|
||||
auto& vmo = region.vmobject();
|
||||
ASSERT(vmo.is_inode());
|
||||
ASSERT(region.vmobject().is_inode());
|
||||
|
||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmo);
|
||||
|
||||
auto& vmo_page = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||
auto& vmobject = region.vmobject();
|
||||
auto& inode_vmobject = static_cast<InodeVMObject&>(vmobject);
|
||||
auto& vmobject_physical_page_entry = inode_vmobject.physical_pages()[region.first_page_index() + page_index_in_region];
|
||||
|
||||
InterruptFlagSaver saver;
|
||||
|
||||
sti();
|
||||
LOCKER(vmo.m_paging_lock);
|
||||
LOCKER(vmobject.m_paging_lock);
|
||||
cli();
|
||||
|
||||
if (!vmo_page.is_null()) {
|
||||
if (!vmobject_physical_page_entry.is_null()) {
|
||||
#ifdef PAGE_FAULT_DEBUG
|
||||
dbgprintf("MM: page_in_from_inode() but page already present. Fine with me!\n");
|
||||
#endif
|
||||
region.remap_page( page_index_in_region);
|
||||
region.remap_page(page_index_in_region);
|
||||
return true;
|
||||
}
|
||||
|
||||
|
@ -395,8 +396,8 @@ bool MemoryManager::page_in_from_inode(Region& region, unsigned page_index_in_re
|
|||
memset(page_buffer + nread, 0, PAGE_SIZE - nread);
|
||||
}
|
||||
cli();
|
||||
vmo_page = allocate_user_physical_page(ShouldZeroFill::No);
|
||||
if (vmo_page.is_null()) {
|
||||
vmobject_physical_page_entry = allocate_user_physical_page(ShouldZeroFill::No);
|
||||
if (vmobject_physical_page_entry.is_null()) {
|
||||
kprintf("MM: page_in_from_inode was unable to allocate a physical page\n");
|
||||
return false;
|
||||
}
|
||||
|
|
|
@ -87,16 +87,17 @@ int Region::commit()
|
|||
#ifdef MM_DEBUG
|
||||
dbgprintf("MM: commit %u pages in Region %p (VMO=%p) at V%p\n", vmobject().page_count(), this, &vmobject(), vaddr().get());
|
||||
#endif
|
||||
for (size_t i = first_page_index(); i <= last_page_index(); ++i) {
|
||||
if (!vmobject().physical_pages()[i].is_null())
|
||||
for (size_t i = 0; i < page_count(); ++i) {
|
||||
auto& vmobject_physical_page_entry = vmobject().physical_pages()[first_page_index() + i];
|
||||
if (!vmobject_physical_page_entry.is_null())
|
||||
continue;
|
||||
auto physical_page = MM.allocate_user_physical_page(MemoryManager::ShouldZeroFill::Yes);
|
||||
if (!physical_page) {
|
||||
kprintf("MM: commit was unable to allocate a physical page\n");
|
||||
return -ENOMEM;
|
||||
}
|
||||
vmobject().physical_pages()[i] = move(physical_page);
|
||||
remap_page(i - first_page_index());
|
||||
vmobject_physical_page_entry = move(physical_page);
|
||||
remap_page(i);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue