mirror of
https://github.com/LadybirdBrowser/ladybird.git
synced 2025-04-24 05:25:13 +00:00
LibCore: Report error condition when reading process statistics failed
This commit is contained in:
parent
754bf22da7
commit
cf89180c35
Notes:
sideshowbarker
2024-07-19 00:08:32 +09:00
Author: https://github.com/tomuta Commit: https://github.com/SerenityOS/serenity/commit/cf89180c350 Pull-request: https://github.com/SerenityOS/serenity/pull/4736
12 changed files with 94 additions and 74 deletions
|
@ -358,57 +358,59 @@ void ProcessModel::update()
|
|||
|
||||
HashTable<PidAndTid> live_pids;
|
||||
u64 sum_ticks_scheduled = 0;
|
||||
for (auto& it : all_processes) {
|
||||
for (auto& thread : it.value.threads) {
|
||||
ThreadState state;
|
||||
state.pid = it.value.pid;
|
||||
state.user = it.value.username;
|
||||
state.pledge = it.value.pledge;
|
||||
state.veil = it.value.veil;
|
||||
state.syscall_count = thread.syscall_count;
|
||||
state.inode_faults = thread.inode_faults;
|
||||
state.zero_faults = thread.zero_faults;
|
||||
state.cow_faults = thread.cow_faults;
|
||||
state.unix_socket_read_bytes = thread.unix_socket_read_bytes;
|
||||
state.unix_socket_write_bytes = thread.unix_socket_write_bytes;
|
||||
state.ipv4_socket_read_bytes = thread.ipv4_socket_read_bytes;
|
||||
state.ipv4_socket_write_bytes = thread.ipv4_socket_write_bytes;
|
||||
state.file_read_bytes = thread.file_read_bytes;
|
||||
state.file_write_bytes = thread.file_write_bytes;
|
||||
state.amount_virtual = it.value.amount_virtual;
|
||||
state.amount_resident = it.value.amount_resident;
|
||||
state.amount_dirty_private = it.value.amount_dirty_private;
|
||||
state.amount_clean_inode = it.value.amount_clean_inode;
|
||||
state.amount_purgeable_volatile = it.value.amount_purgeable_volatile;
|
||||
state.amount_purgeable_nonvolatile = it.value.amount_purgeable_nonvolatile;
|
||||
if (all_processes.has_value()) {
|
||||
for (auto& it : all_processes.value()) {
|
||||
for (auto& thread : it.value.threads) {
|
||||
ThreadState state;
|
||||
state.pid = it.value.pid;
|
||||
state.user = it.value.username;
|
||||
state.pledge = it.value.pledge;
|
||||
state.veil = it.value.veil;
|
||||
state.syscall_count = thread.syscall_count;
|
||||
state.inode_faults = thread.inode_faults;
|
||||
state.zero_faults = thread.zero_faults;
|
||||
state.cow_faults = thread.cow_faults;
|
||||
state.unix_socket_read_bytes = thread.unix_socket_read_bytes;
|
||||
state.unix_socket_write_bytes = thread.unix_socket_write_bytes;
|
||||
state.ipv4_socket_read_bytes = thread.ipv4_socket_read_bytes;
|
||||
state.ipv4_socket_write_bytes = thread.ipv4_socket_write_bytes;
|
||||
state.file_read_bytes = thread.file_read_bytes;
|
||||
state.file_write_bytes = thread.file_write_bytes;
|
||||
state.amount_virtual = it.value.amount_virtual;
|
||||
state.amount_resident = it.value.amount_resident;
|
||||
state.amount_dirty_private = it.value.amount_dirty_private;
|
||||
state.amount_clean_inode = it.value.amount_clean_inode;
|
||||
state.amount_purgeable_volatile = it.value.amount_purgeable_volatile;
|
||||
state.amount_purgeable_nonvolatile = it.value.amount_purgeable_nonvolatile;
|
||||
|
||||
state.name = thread.name;
|
||||
state.executable = it.value.executable;
|
||||
state.name = thread.name;
|
||||
state.executable = it.value.executable;
|
||||
|
||||
state.ppid = it.value.ppid;
|
||||
state.tid = thread.tid;
|
||||
state.pgid = it.value.pgid;
|
||||
state.sid = it.value.sid;
|
||||
state.times_scheduled = thread.times_scheduled;
|
||||
state.ticks_user = thread.ticks_user;
|
||||
state.ticks_kernel = thread.ticks_kernel;
|
||||
state.cpu = thread.cpu;
|
||||
state.cpu_percent = 0;
|
||||
state.priority = thread.priority;
|
||||
state.effective_priority = thread.effective_priority;
|
||||
state.state = thread.state;
|
||||
sum_ticks_scheduled += thread.ticks_user + thread.ticks_kernel;
|
||||
{
|
||||
state.ppid = it.value.ppid;
|
||||
state.tid = thread.tid;
|
||||
state.pgid = it.value.pgid;
|
||||
state.sid = it.value.sid;
|
||||
state.times_scheduled = thread.times_scheduled;
|
||||
state.ticks_user = thread.ticks_user;
|
||||
state.ticks_kernel = thread.ticks_kernel;
|
||||
state.cpu = thread.cpu;
|
||||
state.cpu_percent = 0;
|
||||
state.priority = thread.priority;
|
||||
state.effective_priority = thread.effective_priority;
|
||||
state.state = thread.state;
|
||||
sum_ticks_scheduled += thread.ticks_user + thread.ticks_kernel;
|
||||
{
|
||||
auto pit = m_threads.find({ it.value.pid, thread.tid });
|
||||
if (pit == m_threads.end())
|
||||
m_threads.set({ it.value.pid, thread.tid }, make<Thread>());
|
||||
}
|
||||
auto pit = m_threads.find({ it.value.pid, thread.tid });
|
||||
if (pit == m_threads.end())
|
||||
m_threads.set({ it.value.pid, thread.tid }, make<Thread>());
|
||||
}
|
||||
auto pit = m_threads.find({ it.value.pid, thread.tid });
|
||||
ASSERT(pit != m_threads.end());
|
||||
(*pit).value->previous_state = (*pit).value->current_state;
|
||||
(*pit).value->current_state = state;
|
||||
ASSERT(pit != m_threads.end());
|
||||
(*pit).value->previous_state = (*pit).value->current_state;
|
||||
(*pit).value->current_state = state;
|
||||
|
||||
live_pids.set({ it.value.pid, thread.tid });
|
||||
live_pids.set({ it.value.pid, thread.tid });
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -197,10 +197,14 @@ bool generate_profile(pid_t pid)
|
|||
String process_name;
|
||||
|
||||
auto all_processes = Core::ProcessStatisticsReader::get_all();
|
||||
if (auto it = all_processes.find(pid); it != all_processes.end())
|
||||
process_name = it->value.name;
|
||||
else
|
||||
if (all_processes.has_value()) {
|
||||
if (auto it = all_processes.value().find(pid); it != all_processes.value().end())
|
||||
process_name = it->value.name;
|
||||
else
|
||||
process_name = "(unknown)";
|
||||
} else {
|
||||
process_name = "(unknown)";
|
||||
}
|
||||
|
||||
if (profiling_enable(pid) < 0) {
|
||||
int saved_errno = errno;
|
||||
|
|
|
@ -37,7 +37,7 @@ namespace Core {
|
|||
|
||||
HashMap<uid_t, String> ProcessStatisticsReader::s_usernames;
|
||||
|
||||
HashMap<pid_t, Core::ProcessStatistics> ProcessStatisticsReader::get_all()
|
||||
Optional<HashMap<pid_t, Core::ProcessStatistics>> ProcessStatisticsReader::get_all()
|
||||
{
|
||||
auto file = Core::File::construct("/proc/all");
|
||||
if (!file->open(Core::IODevice::ReadOnly)) {
|
||||
|
@ -49,7 +49,8 @@ HashMap<pid_t, Core::ProcessStatistics> ProcessStatisticsReader::get_all()
|
|||
|
||||
auto file_contents = file->read_all();
|
||||
auto json = JsonValue::from_string(file_contents);
|
||||
ASSERT(json.has_value());
|
||||
if (!json.has_value())
|
||||
return {};
|
||||
json.value().as_array().for_each([&](auto& value) {
|
||||
const JsonObject& process_object = value.as_object();
|
||||
Core::ProcessStatistics process;
|
||||
|
|
|
@ -86,7 +86,7 @@ struct ProcessStatistics {
|
|||
|
||||
class ProcessStatisticsReader {
|
||||
public:
|
||||
static HashMap<pid_t, Core::ProcessStatistics> get_all();
|
||||
static Optional<HashMap<pid_t, Core::ProcessStatistics>> get_all();
|
||||
|
||||
private:
|
||||
static String username_from_uid(uid_t);
|
||||
|
|
|
@ -50,14 +50,15 @@ void RunningProcessesModel::update()
|
|||
|
||||
Core::ProcessStatisticsReader reader;
|
||||
auto processes = reader.get_all();
|
||||
|
||||
for (auto& it : processes) {
|
||||
Process process;
|
||||
process.pid = it.value.pid;
|
||||
process.uid = it.value.uid;
|
||||
process.icon = FileIconProvider::icon_for_executable(it.value.executable).bitmap_for_size(16);
|
||||
process.name = it.value.name;
|
||||
m_processes.append(move(process));
|
||||
if (processes.has_value()) {
|
||||
for (auto& it : processes.value()) {
|
||||
Process process;
|
||||
process.pid = it.value.pid;
|
||||
process.uid = it.value.uid;
|
||||
process.icon = FileIconProvider::icon_for_executable(it.value.executable).bitmap_for_size(16);
|
||||
process.name = it.value.name;
|
||||
m_processes.append(move(process));
|
||||
}
|
||||
}
|
||||
|
||||
did_update();
|
||||
|
|
|
@ -147,10 +147,10 @@ private:
|
|||
idle = 0;
|
||||
|
||||
auto all_processes = Core::ProcessStatisticsReader::get_all();
|
||||
if (all_processes.is_empty())
|
||||
if (!all_processes.has_value() || all_processes.value().is_empty())
|
||||
return false;
|
||||
|
||||
for (auto& it : all_processes) {
|
||||
for (auto& it : all_processes.value()) {
|
||||
for (auto& jt : it.value.threads) {
|
||||
if (it.value.pid == 0)
|
||||
idle += jt.ticks_user + jt.ticks_kernel;
|
||||
|
|
|
@ -41,8 +41,10 @@ static void print_usage_and_exit()
|
|||
static int kill_all(const String& process_name, const unsigned signum)
|
||||
{
|
||||
auto processes = Core::ProcessStatisticsReader().get_all();
|
||||
if (!processes.has_value())
|
||||
return 1;
|
||||
|
||||
for (auto& it : processes) {
|
||||
for (auto& it : processes.value()) {
|
||||
if (it.value.name == process_name) {
|
||||
int ret = kill(it.value.pid, signum);
|
||||
if (ret < 0)
|
||||
|
|
|
@ -166,9 +166,11 @@ int main(int argc, char* argv[])
|
|||
}
|
||||
|
||||
printf("%-28s %4s %4s %-10s %4s %s\n", "COMMAND", "PID", "PGID", "USER", "FD", "NAME");
|
||||
auto processes = Core::ProcessStatisticsReader::get_all();
|
||||
if (!processes.has_value())
|
||||
return 1;
|
||||
if (arg_pid == -1) {
|
||||
auto processes = Core::ProcessStatisticsReader::get_all();
|
||||
for (auto process : processes) {
|
||||
for (auto process : processes.value()) {
|
||||
if (process.key == 0)
|
||||
continue;
|
||||
auto open_files = get_open_files_by_pid(process.key);
|
||||
|
@ -187,14 +189,13 @@ int main(int argc, char* argv[])
|
|||
}
|
||||
}
|
||||
} else {
|
||||
auto processes = Core::ProcessStatisticsReader::get_all();
|
||||
auto open_files = get_open_files_by_pid(arg_pid);
|
||||
|
||||
if (open_files.is_empty())
|
||||
return 0;
|
||||
|
||||
for (auto file : open_files) {
|
||||
display_entry(file, processes.get(arg_pid).value());
|
||||
display_entry(file, processes.value().get(arg_pid).value());
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -40,8 +40,10 @@ static int pid_of(const String& process_name, bool single_shot, bool omit_pid, p
|
|||
bool displayed_at_least_one = false;
|
||||
|
||||
auto processes = Core::ProcessStatisticsReader().get_all();
|
||||
if (!processes.has_value())
|
||||
return 1;
|
||||
|
||||
for (auto& it : processes) {
|
||||
for (auto& it : processes.value()) {
|
||||
if (it.value.name == process_name) {
|
||||
if (!omit_pid || it.value.pid != pid) {
|
||||
printf(" %d" + (displayed_at_least_one ? 0 : 1), it.value.pid);
|
||||
|
|
|
@ -120,8 +120,10 @@ int main(int argc, char** argv)
|
|||
printf("\n");
|
||||
|
||||
auto all_processes = Core::ProcessStatisticsReader::get_all();
|
||||
if (!all_processes.has_value())
|
||||
return 1;
|
||||
|
||||
for (const auto& it : all_processes) {
|
||||
for (const auto& it : all_processes.value()) {
|
||||
const auto& proc = it.value;
|
||||
auto tty = proc.tty;
|
||||
|
||||
|
|
|
@ -92,11 +92,12 @@ struct Snapshot {
|
|||
|
||||
static Snapshot get_snapshot()
|
||||
{
|
||||
Snapshot snapshot;
|
||||
|
||||
auto all_processes = Core::ProcessStatisticsReader::get_all();
|
||||
if (!all_processes.has_value())
|
||||
return {};
|
||||
|
||||
for (auto& it : all_processes) {
|
||||
Snapshot snapshot;
|
||||
for (auto& it : all_processes.value()) {
|
||||
auto& stats = it.value;
|
||||
for (auto& thread : stats.threads) {
|
||||
snapshot.sum_times_scheduled += thread.times_scheduled;
|
||||
|
|
|
@ -77,6 +77,10 @@ int main()
|
|||
}
|
||||
|
||||
auto process_statistics = Core::ProcessStatisticsReader::get_all();
|
||||
if (!process_statistics.has_value()) {
|
||||
warnln("Error: Could not get process statistics");
|
||||
return 1;
|
||||
}
|
||||
|
||||
auto now = time(nullptr);
|
||||
|
||||
|
@ -110,7 +114,7 @@ int main()
|
|||
|
||||
String what = "n/a";
|
||||
|
||||
for (auto& it : process_statistics) {
|
||||
for (auto& it : process_statistics.value()) {
|
||||
if (it.value.tty == tty && it.value.pid == it.value.pgid)
|
||||
what = it.value.name;
|
||||
}
|
||||
|
|
Loading…
Add table
Reference in a new issue