mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-04-21 12:05:15 +00:00
Kernel: Guard the all processes list with a Spinlock rather than a Mutex
There are callers of processes().with or processes().for_each that require interrupts to be disabled. Taking a Mutexe with interrupts disabled is a recipe for deadlock, so convert this to a Spinlock.
This commit is contained in:
parent
70518e69f4
commit
dea62fe93c
Notes:
sideshowbarker
2024-07-18 05:08:45 +09:00
Author: https://github.com/ADKaster Commit: https://github.com/SerenityOS/serenity/commit/dea62fe93c0 Pull-request: https://github.com/SerenityOS/serenity/pull/9580 Reviewed-by: https://github.com/alimpfard Reviewed-by: https://github.com/awesomekling Reviewed-by: https://github.com/bgianfo
4 changed files with 13 additions and 13 deletions
|
@ -916,7 +916,7 @@ KResult ProcFSRootDirectory::traverse_as_directory(unsigned fsid, Function<bool(
|
|||
InodeIdentifier identifier = { fsid, component.component_index() };
|
||||
callback({ component.name(), identifier, 0 });
|
||||
}
|
||||
processes().for_each_shared([&](Process& process) {
|
||||
processes().for_each([&](Process& process) {
|
||||
VERIFY(!(process.pid() < 0));
|
||||
u64 process_id = (u64)process.pid().value();
|
||||
InodeIdentifier identifier = { fsid, static_cast<InodeIndex>(process_id << 36) };
|
||||
|
|
|
@ -44,7 +44,7 @@ static void create_signal_trampoline();
|
|||
|
||||
RecursiveSpinlock g_profiling_lock;
|
||||
static Atomic<pid_t> next_pid;
|
||||
static Singleton<MutexProtected<Process::List>> s_processes;
|
||||
static Singleton<SpinlockProtected<Process::List>> s_processes;
|
||||
READONLY_AFTER_INIT HashMap<String, OwnPtr<Module>>* g_modules;
|
||||
READONLY_AFTER_INIT Memory::Region* g_signal_trampoline_region;
|
||||
|
||||
|
@ -55,7 +55,7 @@ MutexProtected<String>& hostname()
|
|||
return *s_hostname;
|
||||
}
|
||||
|
||||
MutexProtected<Process::List>& processes()
|
||||
SpinlockProtected<Process::List>& processes()
|
||||
{
|
||||
return *s_processes;
|
||||
}
|
||||
|
@ -86,7 +86,7 @@ UNMAP_AFTER_INIT void Process::initialize()
|
|||
NonnullRefPtrVector<Process> Process::all_processes()
|
||||
{
|
||||
NonnullRefPtrVector<Process> output;
|
||||
processes().with_shared([&](const auto& list) {
|
||||
processes().with([&](const auto& list) {
|
||||
output.ensure_capacity(list.size_slow());
|
||||
for (const auto& process : list)
|
||||
output.append(NonnullRefPtr<Process>(process));
|
||||
|
@ -138,7 +138,7 @@ void Process::register_new(Process& process)
|
|||
{
|
||||
// Note: this is essentially the same like process->ref()
|
||||
RefPtr<Process> new_process = process;
|
||||
processes().with_exclusive([&](auto& list) {
|
||||
processes().with([&](auto& list) {
|
||||
list.prepend(process);
|
||||
});
|
||||
}
|
||||
|
@ -301,7 +301,7 @@ bool Process::unref() const
|
|||
// NOTE: We need to obtain the process list lock before doing anything,
|
||||
// because otherwise someone might get in between us lowering the
|
||||
// refcount and acquiring the lock.
|
||||
auto did_hit_zero = processes().with_exclusive([&](auto& list) {
|
||||
auto did_hit_zero = processes().with([&](auto& list) {
|
||||
auto new_ref_count = deref_base();
|
||||
if (new_ref_count > 0)
|
||||
return false;
|
||||
|
@ -418,7 +418,7 @@ void Process::crash(int signal, FlatPtr ip, bool out_of_memory)
|
|||
|
||||
RefPtr<Process> Process::from_pid(ProcessID pid)
|
||||
{
|
||||
return processes().with_shared([&](const auto& list) -> RefPtr<Process> {
|
||||
return processes().with([&](const auto& list) -> RefPtr<Process> {
|
||||
for (auto& process : list) {
|
||||
if (process.pid() == pid)
|
||||
return &process;
|
||||
|
@ -696,7 +696,7 @@ void Process::die()
|
|||
m_threads_for_coredump.append(thread);
|
||||
});
|
||||
|
||||
processes().with_shared([&](const auto& list) {
|
||||
processes().with([&](const auto& list) {
|
||||
for (auto it = list.begin(); it != list.end();) {
|
||||
auto& process = *it;
|
||||
++it;
|
||||
|
|
|
@ -815,13 +815,13 @@ static_assert(sizeof(Process) == (PAGE_SIZE * 2));
|
|||
|
||||
extern RecursiveSpinlock g_profiling_lock;
|
||||
|
||||
MutexProtected<Process::List>& processes();
|
||||
SpinlockProtected<Process::List>& processes();
|
||||
|
||||
template<IteratorFunction<Process&> Callback>
|
||||
inline void Process::for_each(Callback callback)
|
||||
{
|
||||
VERIFY_INTERRUPTS_DISABLED();
|
||||
processes().with_shared([&](const auto& list) {
|
||||
processes().with([&](const auto& list) {
|
||||
for (auto it = list.begin(); it != list.end();) {
|
||||
auto& process = *it;
|
||||
++it;
|
||||
|
@ -835,7 +835,7 @@ template<IteratorFunction<Process&> Callback>
|
|||
inline void Process::for_each_child(Callback callback)
|
||||
{
|
||||
ProcessID my_pid = pid();
|
||||
processes().with_shared([&](const auto& list) {
|
||||
processes().with([&](const auto& list) {
|
||||
for (auto it = list.begin(); it != list.end();) {
|
||||
auto& process = *it;
|
||||
++it;
|
||||
|
@ -876,7 +876,7 @@ inline IterationDecision Process::for_each_thread(Callback callback)
|
|||
template<IteratorFunction<Process&> Callback>
|
||||
inline void Process::for_each_in_pgrp(ProcessGroupID pgid, Callback callback)
|
||||
{
|
||||
processes().with_shared([&](const auto& list) {
|
||||
processes().with([&](const auto& list) {
|
||||
for (auto it = list.begin(); it != list.end();) {
|
||||
auto& process = *it;
|
||||
++it;
|
||||
|
|
|
@ -65,7 +65,7 @@ KResult Process::do_killall(int signal)
|
|||
KResult error = KSuccess;
|
||||
|
||||
// Send the signal to all processes we have access to for.
|
||||
processes().for_each_shared([&](auto& process) {
|
||||
processes().for_each([&](auto& process) {
|
||||
KResult res = KSuccess;
|
||||
if (process.pid() == pid())
|
||||
res = do_killself(signal);
|
||||
|
|
Loading…
Add table
Reference in a new issue