Kernel: Convert MemoryManager to east-const style

This commit is contained in:
Andreas Kling 2021-07-14 13:31:21 +02:00
parent ffc81cbfad
commit dc26c02379
Notes: sideshowbarker 2024-07-18 09:04:07 +09:00
2 changed files with 13 additions and 13 deletions

View file

@ -157,7 +157,7 @@ UNMAP_AFTER_INIT void MemoryManager::register_reserved_ranges()
m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() }); m_reserved_memory_ranges.append(ContiguousReservedMemoryRange { range.start, m_physical_memory_ranges.last().start.get() + m_physical_memory_ranges.last().length - range.start.get() });
} }
bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, const Range& range) const bool MemoryManager::is_allowed_to_mmap_to_userspace(PhysicalAddress start_address, Range const& range) const
{ {
VERIFY(!m_reserved_memory_ranges.is_empty()); VERIFY(!m_reserved_memory_ranges.is_empty());
for (auto& current_range : m_reserved_memory_ranges) { for (auto& current_range : m_reserved_memory_ranges) {
@ -493,7 +493,7 @@ PageTableEntry* MemoryManager::pte(PageDirectory& page_directory, VirtualAddress
u32 page_table_index = (vaddr.get() >> 12) & 0x1ff; u32 page_table_index = (vaddr.get() >> 12) & 0x1ff;
auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index); auto* pd = quickmap_pd(const_cast<PageDirectory&>(page_directory), page_directory_table_index);
const PageDirectoryEntry& pde = pd[page_directory_index]; PageDirectoryEntry const& pde = pd[page_directory_index];
if (!pde.is_present()) if (!pde.is_present())
return nullptr; return nullptr;
@ -616,7 +616,7 @@ Region* MemoryManager::find_region_from_vaddr(VirtualAddress vaddr)
return find_user_region_from_vaddr(*page_directory->space(), vaddr); return find_user_region_from_vaddr(*page_directory->space(), vaddr);
} }
PageFaultResponse MemoryManager::handle_page_fault(const PageFault& fault) PageFaultResponse MemoryManager::handle_page_fault(PageFault const& fault)
{ {
VERIFY_INTERRUPTS_DISABLED(); VERIFY_INTERRUPTS_DISABLED();
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
@ -689,7 +689,7 @@ OwnPtr<Region> MemoryManager::allocate_kernel_region_identity(PhysicalAddress pa
return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable); return allocate_kernel_region_with_vmobject(range.value(), *vm_object, name, access, cacheable);
} }
OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(const Range& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable) OwnPtr<Region> MemoryManager::allocate_kernel_region_with_vmobject(Range const& range, VMObject& vmobject, StringView name, Region::Access access, Region::Cacheable cacheable)
{ {
ScopedSpinLock lock(s_mm_lock); ScopedSpinLock lock(s_mm_lock);
auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable); auto region = Region::try_create_kernel_only(range, vmobject, 0, KString::try_create(name), access, cacheable);
@ -915,7 +915,7 @@ void MemoryManager::flush_tlb_local(VirtualAddress vaddr, size_t page_count)
Processor::flush_tlb_local(vaddr, page_count); Processor::flush_tlb_local(vaddr, page_count);
} }
void MemoryManager::flush_tlb(const PageDirectory* page_directory, VirtualAddress vaddr, size_t page_count) void MemoryManager::flush_tlb(PageDirectory const* page_directory, VirtualAddress vaddr, size_t page_count)
{ {
Processor::flush_tlb(page_directory, vaddr, page_count); Processor::flush_tlb(page_directory, vaddr, page_count);
} }
@ -1008,7 +1008,7 @@ void MemoryManager::unquickmap_page()
mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags); mm_data.m_quickmap_in_use.unlock(mm_data.m_quickmap_prev_flags);
} }
bool MemoryManager::validate_user_stack(const Process& process, VirtualAddress vaddr) const bool MemoryManager::validate_user_stack(Process const& process, VirtualAddress vaddr) const
{ {
if (!is_user_address(vaddr)) if (!is_user_address(vaddr))
return false; return false;

View file

@ -121,7 +121,7 @@ public:
return Processor::current().get_mm_data(); return Processor::current().get_mm_data();
} }
PageFaultResponse handle_page_fault(const PageFault&); PageFaultResponse handle_page_fault(PageFault const&);
void set_page_writable_direct(VirtualAddress, bool); void set_page_writable_direct(VirtualAddress, bool);
@ -131,7 +131,7 @@ public:
static void enter_process_paging_scope(Process&); static void enter_process_paging_scope(Process&);
static void enter_space(Space&); static void enter_space(Space&);
bool validate_user_stack(const Process&, VirtualAddress) const; bool validate_user_stack(Process const&, VirtualAddress) const;
enum class ShouldZeroFill { enum class ShouldZeroFill {
No, No,
@ -151,7 +151,7 @@ public:
OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_identity(PhysicalAddress, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(VMObject&, size_t, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
OwnPtr<Region> allocate_kernel_region_with_vmobject(const Range&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes); OwnPtr<Region> allocate_kernel_region_with_vmobject(Range const&, VMObject&, StringView name, Region::Access access, Region::Cacheable = Region::Cacheable::Yes);
struct SystemMemoryInfo { struct SystemMemoryInfo {
PhysicalSize user_physical_pages { 0 }; PhysicalSize user_physical_pages { 0 };
@ -193,8 +193,8 @@ public:
PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; } PageDirectory& kernel_page_directory() { return *m_kernel_page_directory; }
const Vector<UsedMemoryRange>& used_memory_ranges() { return m_used_memory_ranges; } Vector<UsedMemoryRange> const& used_memory_ranges() { return m_used_memory_ranges; }
bool is_allowed_to_mmap_to_userspace(PhysicalAddress, const Range&) const; bool is_allowed_to_mmap_to_userspace(PhysicalAddress, Range const&) const;
PhysicalPageEntry& get_physical_page_entry(PhysicalAddress); PhysicalPageEntry& get_physical_page_entry(PhysicalAddress);
PhysicalAddress get_physical_address(PhysicalPage const&); PhysicalAddress get_physical_address(PhysicalPage const&);
@ -214,7 +214,7 @@ private:
void protect_kernel_image(); void protect_kernel_image();
void parse_memory_map(); void parse_memory_map();
static void flush_tlb_local(VirtualAddress, size_t page_count = 1); static void flush_tlb_local(VirtualAddress, size_t page_count = 1);
static void flush_tlb(const PageDirectory*, VirtualAddress, size_t page_count = 1); static void flush_tlb(PageDirectory const*, VirtualAddress, size_t page_count = 1);
static Region* kernel_region_from_vaddr(VirtualAddress); static Region* kernel_region_from_vaddr(VirtualAddress);
@ -286,7 +286,7 @@ inline bool is_user_range(VirtualAddress vaddr, size_t size)
return is_user_address(vaddr) && is_user_address(vaddr.offset(size)); return is_user_address(vaddr) && is_user_address(vaddr.offset(size));
} }
inline bool is_user_range(const Range& range) inline bool is_user_range(Range const& range)
{ {
return is_user_range(range.base(), range.size()); return is_user_range(range.base(), range.size());
} }