From e67402c7027f13fcc03d61f88725470e4c66e820 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Wed, 27 Jan 2021 21:01:45 +0100 Subject: [PATCH] Kernel: Remove Range "valid" state and use Optional instead It's easier to understand VM ranges if they are always valid. We can simply use an empty Optional to encode absence when needed. --- Kernel/Process.cpp | 4 ++-- Kernel/Process.h | 4 ++-- Kernel/Syscalls/execve.cpp | 16 ++++++++-------- Kernel/Syscalls/mmap.cpp | 13 +++++++------ Kernel/Thread.cpp | 4 ++-- Kernel/VM/MemoryManager.cpp | 20 ++++++++++---------- Kernel/VM/RangeAllocator.cpp | 7 ++++--- Kernel/VM/RangeAllocator.h | 8 ++++---- 8 files changed, 39 insertions(+), 37 deletions(-) diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index aa341b00bd0..e4b01c735e9 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -116,7 +116,7 @@ bool Process::in_group(gid_t gid) const return m_gid == gid || m_extra_gids.contains_slow(gid); } -Range Process::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) +Optional Process::allocate_range(VirtualAddress vaddr, size_t size, size_t alignment) { vaddr.mask(PAGE_MASK); size = PAGE_ROUND_UP(size); @@ -195,7 +195,7 @@ bool Process::deallocate_region(Region& region) Region* Process::find_region_from_range(const Range& range) { ScopedSpinLock lock(m_lock); - if (m_region_lookup_cache.range == range && m_region_lookup_cache.region) + if (m_region_lookup_cache.range.has_value() && m_region_lookup_cache.range.value() == range && m_region_lookup_cache.region) return m_region_lookup_cache.region.unsafe_ptr(); size_t size = PAGE_ROUND_UP(range.size()); diff --git a/Kernel/Process.h b/Kernel/Process.h index 1864e80e0e0..3f0fd622d1c 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -512,7 +512,7 @@ private: Process(RefPtr& first_thread, const String& name, uid_t, gid_t, ProcessID ppid, bool is_kernel_process, RefPtr cwd = nullptr, RefPtr executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr); static ProcessID allocate_pid(); - Range allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE); + Optional allocate_range(VirtualAddress, size_t, size_t alignment = PAGE_SIZE); Region& add_region(NonnullOwnPtr); @@ -614,7 +614,7 @@ private: NonnullOwnPtrVector m_regions; struct RegionLookupCache { - Range range; + Optional range; WeakPtr region; }; RegionLookupCache m_region_lookup_cache; diff --git a/Kernel/Syscalls/execve.cpp b/Kernel/Syscalls/execve.cpp index 321017622ce..83ee25b8e88 100644 --- a/Kernel/Syscalls/execve.cpp +++ b/Kernel/Syscalls/execve.cpp @@ -185,12 +185,12 @@ KResultOr Process::load_elf_object(FileDescription& object_ } auto range = allocate_range({}, program_header.size_in_memory()); - if (!range.is_valid()) { + if (!range.has_value()) { ph_load_result = ENOMEM; return IterationDecision::Break; } - auto region_or_error = allocate_region(range, String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve); + auto region_or_error = allocate_region(range.value(), String::formatted("{} (master-tls)", elf_name), PROT_READ | PROT_WRITE, AllocationStrategy::Reserve); if (region_or_error.is_error()) { ph_load_result = region_or_error.error(); return IterationDecision::Break; @@ -226,11 +226,11 @@ KResultOr Process::load_elf_object(FileDescription& object_ prot |= PROT_WRITE; auto region_name = String::formatted("{} (data-{}{})", elf_name, program_header.is_readable() ? "r" : "", program_header.is_writable() ? "w" : ""); auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory()); - if (!range.is_valid()) { + if (!range.has_value()) { ph_load_result = ENOMEM; return IterationDecision::Break; } - auto region_or_error = allocate_region(range, region_name, prot, AllocationStrategy::Reserve); + auto region_or_error = allocate_region(range.value(), region_name, prot, AllocationStrategy::Reserve); if (region_or_error.is_error()) { ph_load_result = region_or_error.error(); return IterationDecision::Break; @@ -263,11 +263,11 @@ KResultOr Process::load_elf_object(FileDescription& object_ if (program_header.is_executable()) prot |= PROT_EXEC; auto range = allocate_range(program_header.vaddr().offset(load_offset), program_header.size_in_memory()); - if (!range.is_valid()) { + if (!range.has_value()) { ph_load_result = ENOMEM; return IterationDecision::Break; } - auto region_or_error = allocate_region_with_vmobject(range, *vmobject, program_header.offset(), elf_name, prot, true); + auto region_or_error = allocate_region_with_vmobject(range.value(), *vmobject, program_header.offset(), elf_name, prot, true); if (region_or_error.is_error()) { ph_load_result = region_or_error.error(); return IterationDecision::Break; @@ -288,12 +288,12 @@ KResultOr Process::load_elf_object(FileDescription& object_ } auto stack_range = allocate_range({}, Thread::default_userspace_stack_size); - if (!stack_range.is_valid()) { + if (!stack_range.has_value()) { dbgln("do_exec: Failed to allocate VM range for stack"); return ENOMEM; } - auto stack_region_or_error = allocate_region(stack_range, "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve); + auto stack_region_or_error = allocate_region(stack_range.value(), "Stack (Main thread)", PROT_READ | PROT_WRITE, AllocationStrategy::Reserve); if (stack_region_or_error.is_error()) return stack_region_or_error.error(); auto& stack_region = *stack_region_or_error.value(); diff --git a/Kernel/Syscalls/mmap.cpp b/Kernel/Syscalls/mmap.cpp index 3e5522fccae..aa555b71f3f 100644 --- a/Kernel/Syscalls/mmap.cpp +++ b/Kernel/Syscalls/mmap.cpp @@ -134,17 +134,18 @@ void* Process::sys$mmap(Userspace user_params) Region* region = nullptr; auto range = allocate_range(VirtualAddress(addr), size, alignment); - if (!range.is_valid()) { + if (!range.has_value()) { if (addr && !map_fixed) { // If there's an address but MAP_FIXED wasn't specified, the address is just a hint. range = allocate_range({}, size, alignment); } - return (void*)-ENOMEM; + if (!range.has_value()) + return (void*)-ENOMEM; } if (map_anonymous) { auto strategy = map_noreserve ? AllocationStrategy::None : AllocationStrategy::Reserve; - auto region_or_error = allocate_region(range, !name.is_null() ? name : "mmap", prot, strategy); + auto region_or_error = allocate_region(range.value(), !name.is_null() ? name : "mmap", prot, strategy); if (region_or_error.is_error()) return (void*)region_or_error.error().error(); region = region_or_error.value(); @@ -170,7 +171,7 @@ void* Process::sys$mmap(Userspace user_params) return (void*)-EACCES; } - auto region_or_error = description->mmap(*this, range, static_cast(offset), prot, map_shared); + auto region_or_error = description->mmap(*this, range.value(), static_cast(offset), prot, map_shared); if (region_or_error.is_error()) return (void*)region_or_error.error().error(); region = region_or_error.value(); @@ -447,10 +448,10 @@ void* Process::sys$allocate_tls(size_t size) ASSERT(main_thread); auto range = allocate_range({}, size); - if (!range.is_valid()) + if (!range.has_value()) return (void*)-ENOMEM; - auto region_or_error = allocate_region(range, String(), PROT_READ | PROT_WRITE); + auto region_or_error = allocate_region(range.value(), String(), PROT_READ | PROT_WRITE); if (region_or_error.is_error()) return (void*)region_or_error.error().error(); diff --git a/Kernel/Thread.cpp b/Kernel/Thread.cpp index 7bef932346c..b2b86979feb 100644 --- a/Kernel/Thread.cpp +++ b/Kernel/Thread.cpp @@ -1059,10 +1059,10 @@ KResult Thread::make_thread_specific_region(Badge) return KSuccess; auto range = process().allocate_range({}, thread_specific_region_size()); - if (!range.is_valid()) + if (!range.has_value()) return ENOMEM; - auto region_or_error = process().allocate_region(range, "Thread-specific", PROT_READ | PROT_WRITE); + auto region_or_error = process().allocate_region(range.value(), "Thread-specific", PROT_READ | PROT_WRITE); if (region_or_error.is_error()) return region_or_error.error(); diff --git a/Kernel/VM/MemoryManager.cpp b/Kernel/VM/MemoryManager.cpp index 7397ccd0854..9642360e430 100644 --- a/Kernel/VM/MemoryManager.cpp +++ b/Kernel/VM/MemoryManager.cpp @@ -399,10 +399,10 @@ OwnPtr MemoryManager::allocate_contiguous_kernel_region(size_t size, con ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = ContiguousVMObject::create_with_size(size); - return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable); } OwnPtr MemoryManager::allocate_kernel_region(size_t size, const StringView& name, u8 access, bool user_accessible, AllocationStrategy strategy, bool cacheable) @@ -410,12 +410,12 @@ OwnPtr MemoryManager::allocate_kernel_region(size_t size, const StringVi ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = AnonymousVMObject::create_with_size(size, strategy); if (!vmobject) return {}; - return allocate_kernel_region_with_vmobject(range, vmobject.release_nonnull(), name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vmobject.release_nonnull(), name, access, user_accessible, cacheable); } OwnPtr MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) @@ -423,12 +423,12 @@ OwnPtr MemoryManager::allocate_kernel_region(PhysicalAddress paddr, size ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); if (!vmobject) return {}; - return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable); } OwnPtr MemoryManager::allocate_kernel_region_identity(PhysicalAddress paddr, size_t size, const StringView& name, u8 access, bool user_accessible, bool cacheable) @@ -436,12 +436,12 @@ OwnPtr MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().identity_range_allocator().allocate_specific(VirtualAddress(paddr.get()), size); - if (!range.is_valid()) + if (!range.has_value()) return {}; auto vmobject = AnonymousVMObject::create_for_physical_range(paddr, size); if (!vmobject) return {}; - return allocate_kernel_region_with_vmobject(range, *vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), *vmobject, name, access, user_accessible, cacheable); } OwnPtr MemoryManager::allocate_user_accessible_kernel_region(size_t size, const StringView& name, u8 access, bool cacheable) @@ -467,9 +467,9 @@ OwnPtr MemoryManager::allocate_kernel_region_with_vmobject(VMObject& vmo ASSERT(!(size % PAGE_SIZE)); ScopedSpinLock lock(s_mm_lock); auto range = kernel_page_directory().range_allocator().allocate_anywhere(size); - if (!range.is_valid()) + if (!range.has_value()) return {}; - return allocate_kernel_region_with_vmobject(range, vmobject, name, access, user_accessible, cacheable); + return allocate_kernel_region_with_vmobject(range.value(), vmobject, name, access, user_accessible, cacheable); } bool MemoryManager::commit_user_physical_pages(size_t page_count) diff --git a/Kernel/VM/RangeAllocator.cpp b/Kernel/VM/RangeAllocator.cpp index ec5174bf04a..c6292cc7151 100644 --- a/Kernel/VM/RangeAllocator.cpp +++ b/Kernel/VM/RangeAllocator.cpp @@ -36,6 +36,7 @@ namespace Kernel { RangeAllocator::RangeAllocator() + : m_total_range({}, 0) { } @@ -105,7 +106,7 @@ void RangeAllocator::carve_at_index(int index, const Range& range) m_available_ranges.insert(index + 1, move(remaining_parts[1])); } -Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment) +Optional RangeAllocator::allocate_anywhere(size_t size, size_t alignment) { if (!size) return {}; @@ -148,7 +149,7 @@ Range RangeAllocator::allocate_anywhere(size_t size, size_t alignment) return {}; } -Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size) +Optional RangeAllocator::allocate_specific(VirtualAddress base, size_t size) { if (!size) return {}; @@ -178,7 +179,7 @@ Range RangeAllocator::allocate_specific(VirtualAddress base, size_t size) return {}; } -void RangeAllocator::deallocate(Range range) +void RangeAllocator::deallocate(const Range& range) { ScopedSpinLock lock(m_lock); ASSERT(m_total_range.contains(range)); diff --git a/Kernel/VM/RangeAllocator.h b/Kernel/VM/RangeAllocator.h index 89292ef3e6c..0839e0bb573 100644 --- a/Kernel/VM/RangeAllocator.h +++ b/Kernel/VM/RangeAllocator.h @@ -38,7 +38,7 @@ class Range { friend class RangeAllocator; public: - Range() { } + Range() = delete; Range(VirtualAddress base, size_t size) : m_base(base) , m_size(size) @@ -85,9 +85,9 @@ public: void initialize_with_range(VirtualAddress, size_t); void initialize_from_parent(const RangeAllocator&); - Range allocate_anywhere(size_t, size_t alignment = PAGE_SIZE); - Range allocate_specific(VirtualAddress, size_t); - void deallocate(Range); + Optional allocate_anywhere(size_t, size_t alignment = PAGE_SIZE); + Optional allocate_specific(VirtualAddress, size_t); + void deallocate(const Range&); void dump() const;