Kernel: Make VirtualRangeAllocator setup functions propagate errors

If an internal allocation failure occurs while setting up a new VRA,
we'll now propagate the error to our caller instead of panicking.
This commit is contained in:
Andreas Kling 2021-11-17 15:51:12 +01:00
parent 0f22ba5bf2
commit 578a576a98
Notes: sideshowbarker 2024-07-18 01:00:46 +09:00
3 changed files with 11 additions and 9 deletions

View file

@ -37,7 +37,7 @@ UNMAP_AFTER_INIT NonnullRefPtr<PageDirectory> PageDirectory::must_create_kernel_
// make sure this starts in a new page directory to make MemoryManager::initialize_physical_pages() happy
FlatPtr start_of_range = ((FlatPtr)end_of_kernel_image & ~(FlatPtr)0x1fffff) + 0x200000;
directory->m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range);
MUST(directory->m_range_allocator.initialize_with_range(VirtualAddress(start_of_range), KERNEL_PD_END - start_of_range));
return directory;
}
@ -50,11 +50,11 @@ ErrorOr<NonnullRefPtr<PageDirectory>> PageDirectory::try_create_for_userspace(Vi
auto directory = TRY(adopt_nonnull_ref_or_enomem(new (nothrow) PageDirectory));
if (parent_range_allocator) {
directory->m_range_allocator.initialize_from_parent(*parent_range_allocator);
TRY(directory->m_range_allocator.initialize_from_parent(*parent_range_allocator));
} else {
size_t random_offset = (get_fast_random<u8>() % 32 * MiB) & PAGE_MASK;
u32 base = userspace_range_base + random_offset;
directory->m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base);
TRY(directory->m_range_allocator.initialize_with_range(VirtualAddress(base), userspace_range_ceiling - base));
}
// NOTE: Take the MM lock since we need it for quickmap.

View file

@ -17,20 +17,22 @@ VirtualRangeAllocator::VirtualRangeAllocator()
{
}
void VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
ErrorOr<void> VirtualRangeAllocator::initialize_with_range(VirtualAddress base, size_t size)
{
m_total_range = { base, size };
m_available_ranges.insert(base.get(), VirtualRange { base, size });
TRY(m_available_ranges.try_insert(base.get(), VirtualRange { base, size }));
return {};
}
void VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
ErrorOr<void> VirtualRangeAllocator::initialize_from_parent(VirtualRangeAllocator const& parent_allocator)
{
SpinlockLocker lock(parent_allocator.m_lock);
m_total_range = parent_allocator.m_total_range;
m_available_ranges.clear();
for (auto it = parent_allocator.m_available_ranges.begin(); !it.is_end(); ++it) {
m_available_ranges.insert(it.key(), *it);
TRY(m_available_ranges.try_insert(it.key(), VirtualRange(*it)));
}
return {};
}
void VirtualRangeAllocator::dump() const

View file

@ -18,8 +18,8 @@ public:
VirtualRangeAllocator();
~VirtualRangeAllocator() = default;
void initialize_with_range(VirtualAddress, size_t);
void initialize_from_parent(VirtualRangeAllocator const&);
ErrorOr<void> initialize_with_range(VirtualAddress, size_t);
ErrorOr<void> initialize_from_parent(VirtualRangeAllocator const&);
ErrorOr<VirtualRange> try_allocate_anywhere(size_t, size_t alignment = PAGE_SIZE);
ErrorOr<VirtualRange> try_allocate_specific(VirtualAddress, size_t);