From c3915e4058341570e746979a69ce6696de232da5 Mon Sep 17 00:00:00 2001 From: Andreas Kling Date: Sun, 2 Apr 2023 20:40:47 +0200 Subject: [PATCH] Kernel: Stop using *LockRefPtr for Thread These were stored in a bunch of places. The main one that's a bit iffy is the Mutex::m_holder one, which I'm going to simplify in a subsequent commit. In Plan9FS and WorkQueue, we can't make the NNRPs const due to initialization order problems. That's probably doable with further cleanup, but left as an exercise for our future selves. Before starting this, I expected the thread blockers to be a problem, but as it turns out they were super straightforward (for once!) as they don't mutate the thread after initiating a block, so they can just use simple const-ified NNRPs. --- Kernel/FileSystem/Plan9FS/FileSystem.h | 2 +- Kernel/Locking/Mutex.h | 2 +- Kernel/Process.cpp | 14 +++++--------- Kernel/Process.h | 10 +++++----- Kernel/Scheduler.cpp | 2 +- Kernel/Syscalls/thread.cpp | 2 +- Kernel/Thread.cpp | 12 ++++++------ Kernel/Thread.h | 10 +++++----- Kernel/WorkQueue.h | 2 +- 9 files changed, 26 insertions(+), 30 deletions(-) diff --git a/Kernel/FileSystem/Plan9FS/FileSystem.h b/Kernel/FileSystem/Plan9FS/FileSystem.h index 9cbe44fe8ed..abb7226f49c 100644 --- a/Kernel/FileSystem/Plan9FS/FileSystem.h +++ b/Kernel/FileSystem/Plan9FS/FileSystem.h @@ -137,7 +137,7 @@ private: HashMap> m_completions; Spinlock m_thread_lock {}; - LockRefPtr m_thread; + RefPtr m_thread; Atomic m_thread_running { false }; Atomic m_thread_shutdown { false }; }; diff --git a/Kernel/Locking/Mutex.h b/Kernel/Locking/Mutex.h index 17d59925ec7..25a2eb35727 100644 --- a/Kernel/Locking/Mutex.h +++ b/Kernel/Locking/Mutex.h @@ -101,7 +101,7 @@ private: // the lock is unlocked, it just means we don't know which threads hold it. // When locked exclusively, this is always the one thread that holds the // lock. - LockRefPtr m_holder; + RefPtr m_holder; size_t m_shared_holders { 0 }; struct BlockedThreadLists { diff --git a/Kernel/Process.cpp b/Kernel/Process.cpp index 0a3f35c7386..77b0d9694ef 100644 --- a/Kernel/Process.cpp +++ b/Kernel/Process.cpp @@ -329,7 +329,7 @@ Process::Process(NonnullOwnPtr name, NonnullRefPtr credent } } -ErrorOr> Process::attach_resources(NonnullOwnPtr&& preallocated_space, Process* fork_parent) +ErrorOr> Process::attach_resources(NonnullOwnPtr&& preallocated_space, Process* fork_parent) { m_space.with([&](auto& space) { space = move(preallocated_space); @@ -338,10 +338,10 @@ ErrorOr> Process::attach_resources(NonnullOwnPtrtry_clone(*this); + return Thread::current()->clone(*this); } // NOTE: This non-forked code path is only taken when the kernel creates a process "manually" (at boot.) - return Thread::try_create(*this); + return Thread::create(*this); }; auto first_thread = TRY(create_first_thread()); @@ -906,17 +906,13 @@ ErrorOr Process::send_signal(u8 signal, Process* sender) return ESRCH; } -LockRefPtr Process::create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr name, u32 affinity, bool joinable) +ErrorOr> Process::create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr name, u32 affinity, bool joinable) { VERIFY((priority >= THREAD_PRIORITY_MIN) && (priority <= THREAD_PRIORITY_MAX)); // FIXME: Do something with guard pages? - auto thread_or_error = Thread::try_create(*this); - if (thread_or_error.is_error()) - return {}; - - auto thread = thread_or_error.release_value(); + auto thread = TRY(Thread::create(*this)); thread->set_name(move(name)); thread->set_affinity(affinity); thread->set_priority(priority); diff --git a/Kernel/Process.h b/Kernel/Process.h index ccf56841694..7f3188bfdca 100644 --- a/Kernel/Process.h +++ b/Kernel/Process.h @@ -187,7 +187,7 @@ public: struct ProcessAndFirstThread { NonnullRefPtr process; - NonnullLockRefPtr first_thread; + NonnullRefPtr first_thread; }; template @@ -203,7 +203,7 @@ public: ~Process(); - LockRefPtr create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true); + ErrorOr> create_kernel_thread(void (*entry)(void*), void* entry_data, u32 priority, NonnullOwnPtr name, u32 affinity = THREAD_AFFINITY_DEFAULT, bool joinable = true); bool is_profiling() const { return m_profiling; } void set_profiling(bool profiling) { m_profiling = profiling; } @@ -570,7 +570,7 @@ public: ErrorOr set_coredump_property(NonnullOwnPtr key, NonnullOwnPtr value); ErrorOr try_set_coredump_property(StringView key, StringView value); - Vector> const& threads_for_coredump(Badge) const { return m_threads_for_coredump; } + Vector> const& threads_for_coredump(Badge) const { return m_threads_for_coredump; } PerformanceEventBuffer* perf_events() { return m_perf_event_buffer; } PerformanceEventBuffer const* perf_events() const { return m_perf_event_buffer; } @@ -600,7 +600,7 @@ private: Process(NonnullOwnPtr name, NonnullRefPtr, ProcessID ppid, bool is_kernel_process, RefPtr current_directory, RefPtr executable, TTY* tty, UnveilNode unveil_tree, UnveilNode exec_unveil_tree); static ErrorOr create(NonnullOwnPtr name, UserID, GroupID, ProcessID ppid, bool is_kernel_process, RefPtr current_directory = nullptr, RefPtr executable = nullptr, TTY* = nullptr, Process* fork_parent = nullptr); - ErrorOr> attach_resources(NonnullOwnPtr&&, Process* fork_parent); + ErrorOr> attach_resources(NonnullOwnPtr&&, Process* fork_parent); static ProcessID allocate_pid(); void kill_threads_except_self(); @@ -892,7 +892,7 @@ private: }; SpinlockProtected, LockRank::None> m_coredump_properties {}; - Vector> m_threads_for_coredump; + Vector> m_threads_for_coredump; struct SignalActionData { VirtualAddress handler_or_sigaction; diff --git a/Kernel/Scheduler.cpp b/Kernel/Scheduler.cpp index 09051713649..6602e2a192a 100644 --- a/Kernel/Scheduler.cpp +++ b/Kernel/Scheduler.cpp @@ -391,7 +391,7 @@ UNMAP_AFTER_INIT Thread* Scheduler::create_ap_idle_thread(u32 cpu) VERIFY(Processor::is_bootstrap_processor()); VERIFY(s_colonel_process); - Thread* idle_thread = s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false); + Thread* idle_thread = MUST(s_colonel_process->create_kernel_thread(idle_loop, nullptr, THREAD_PRIORITY_MIN, MUST(KString::formatted("idle thread #{}", cpu)), 1 << cpu, false)); VERIFY(idle_thread); return idle_thread; } diff --git a/Kernel/Syscalls/thread.cpp b/Kernel/Syscalls/thread.cpp index c01bd0b7305..8e5dfabb38b 100644 --- a/Kernel/Syscalls/thread.cpp +++ b/Kernel/Syscalls/thread.cpp @@ -43,7 +43,7 @@ ErrorOr Process::sys$create_thread(void* (*entry)(void*), Userspace& Thread::all_instances() return *s_list; } -ErrorOr> Thread::try_create(NonnullRefPtr process) +ErrorOr> Thread::create(NonnullRefPtr process) { auto kernel_stack_region = TRY(MM.allocate_kernel_region(default_kernel_stack_size, {}, Memory::Region::Access::ReadWrite, AllocationStrategy::AllocateNow)); kernel_stack_region->set_stack(true); @@ -47,7 +47,7 @@ ErrorOr> Thread::try_create(NonnullRefPtr pro auto block_timer = TRY(try_make_lock_ref_counted()); auto name = TRY(process->name().with([](auto& name) { return name->try_clone(); })); - return adopt_nonnull_lock_ref_or_enomem(new (nothrow) Thread(move(process), move(kernel_stack_region), move(block_timer), move(name))); + return adopt_nonnull_ref_or_enomem(new (nothrow) Thread(move(process), move(kernel_stack_region), move(block_timer), move(name))); } Thread::Thread(NonnullRefPtr process, NonnullOwnPtr kernel_stack_region, NonnullLockRefPtr block_timer, NonnullOwnPtr name) @@ -1217,9 +1217,9 @@ RegisterState& Thread::get_register_dump_from_stack() return *trap->regs; } -ErrorOr> Thread::try_clone(Process& process) +ErrorOr> Thread::clone(NonnullRefPtr process) { - auto clone = TRY(Thread::try_create(process)); + auto clone = TRY(Thread::create(move(process))); m_signal_action_masks.span().copy_to(clone->m_signal_action_masks); clone->m_signal_mask = m_signal_mask; clone->m_fpu_state = m_fpu_state; @@ -1399,9 +1399,9 @@ ErrorOr Thread::make_thread_specific_region(Badge) }); } -LockRefPtr Thread::from_tid(ThreadID tid) +RefPtr Thread::from_tid(ThreadID tid) { - return Thread::all_instances().with([&](auto& list) -> LockRefPtr { + return Thread::all_instances().with([&](auto& list) -> RefPtr { for (Thread& thread : list) { if (thread.tid() == tid) return thread; diff --git a/Kernel/Thread.h b/Kernel/Thread.h index 6738e8bb83e..69b0e3b30fc 100644 --- a/Kernel/Thread.h +++ b/Kernel/Thread.h @@ -67,10 +67,10 @@ public: return Processor::current_thread(); } - static ErrorOr> try_create(NonnullRefPtr); + static ErrorOr> create(NonnullRefPtr); ~Thread(); - static LockRefPtr from_tid(ThreadID); + static RefPtr from_tid(ThreadID); static void finalize_dying_threads(); ThreadID tid() const { return m_tid; } @@ -294,7 +294,7 @@ public: private: BlockerSet* m_blocker_set { nullptr }; - NonnullLockRefPtr m_thread; + NonnullRefPtr const m_thread; u8 m_was_interrupted_by_signal { 0 }; bool m_is_blocking { false }; bool m_was_interrupted_by_death { false }; @@ -429,7 +429,7 @@ public: bool unblock(void*, bool); private: - NonnullLockRefPtr m_joinee; + NonnullRefPtr const m_joinee; void*& m_joinee_exit_value; ErrorOr& m_try_join_result; bool m_did_unblock { false }; @@ -961,7 +961,7 @@ public: return !m_is_joinable; } - ErrorOr> try_clone(Process&); + ErrorOr> clone(NonnullRefPtr); template Callback> static IterationDecision for_each_in_state(State, Callback); diff --git a/Kernel/WorkQueue.h b/Kernel/WorkQueue.h index ab35824666a..b14736b6c0a 100644 --- a/Kernel/WorkQueue.h +++ b/Kernel/WorkQueue.h @@ -61,7 +61,7 @@ private: void do_queue(WorkItem&); - LockRefPtr m_thread; + RefPtr m_thread; WaitQueue m_wait_queue; SpinlockProtected, LockRank::None> m_items {}; };