feat: use relm4 workers instead of runners

This commit is contained in:
Gabriele Musco 2023-09-05 06:56:56 +00:00
parent ac99741517
commit c996f5c81b
14 changed files with 896 additions and 163 deletions

View file

@ -1,4 +1,4 @@
use crate::cmd_runner::CmdRunner;
use crate::{cmd_runner::CmdRunner, ui::workers::runner_worker::WorkerJob};
use std::collections::HashMap;
#[derive(Debug, Clone)]
@ -10,6 +10,7 @@ pub struct Cmake {
}
impl Cmake {
#[deprecated]
pub fn get_prepare_runner(&self) -> CmdRunner {
let mut args = vec![
"-B".into(),
@ -33,6 +34,30 @@ impl Cmake {
CmdRunner::new(self.env.clone(), "cmake".into(), args)
}
pub fn get_prepare_job(&self) -> WorkerJob {
let mut args = vec![
"-B".into(),
self.build_dir.clone(),
"-G".into(),
"Ninja".into(),
];
if self.vars.is_some() {
for (k, v) in self.vars.as_ref().unwrap() {
if k.contains(' ') {
panic!("Cmake vars cannot contain spaces!");
}
if v.contains(' ') {
args.push(format!("-D{k}=\"{v}\"", k = k, v = v));
} else {
args.push(format!("-D{k}={v}", k = k, v = v));
}
}
}
args.push(self.source_dir.clone());
WorkerJob::new_cmd(self.env.clone(), "cmake".into(), Some(args))
}
#[deprecated]
pub fn get_build_runner(&self) -> CmdRunner {
CmdRunner::new(
self.env.clone(),
@ -40,7 +65,15 @@ impl Cmake {
vec!["--build".into(), self.build_dir.clone()],
)
}
pub fn get_build_job(&self) -> WorkerJob {
WorkerJob::new_cmd(
self.env.clone(),
"cmake".into(),
Some(vec!["--build".into(), self.build_dir.clone()]),
)
}
#[deprecated]
pub fn get_install_runner(&self) -> CmdRunner {
CmdRunner::new(
self.env.clone(),
@ -48,4 +81,12 @@ impl Cmake {
vec!["--install".into(), self.build_dir.clone()],
)
}
pub fn get_install_job(&self) -> WorkerJob {
WorkerJob::new_cmd(
self.env.clone(),
"cmake".into(),
Some(vec!["--install".into(), self.build_dir.clone()]),
)
}
}

View file

@ -2,6 +2,7 @@ use crate::{
cmd_runner::CmdRunner,
func_runner::{FuncRunner, FuncRunnerOut},
profile::Profile,
ui::workers::runner_worker::{FuncWorkerOut, WorkerJob},
};
use git2::Repository;
use std::path::Path;
@ -27,6 +28,7 @@ impl Git {
split.next().map(|s| s.into())
}
#[deprecated]
pub fn get_reset_runner(&self) -> CmdRunner {
CmdRunner::new(
None,
@ -40,6 +42,20 @@ impl Git {
)
}
pub fn get_reset_job(&self) -> WorkerJob {
WorkerJob::new_cmd(
None,
"git".into(),
Some(vec![
"-C".into(),
self.dir.clone(),
"reset".into(),
"--hard".into(),
]),
)
}
#[deprecated]
pub fn get_override_remote_url_runner(&self) -> FuncRunner {
let dir = self.dir.clone();
let n_remote_url = self.get_repo();
@ -76,6 +92,43 @@ impl Git {
}))
}
pub fn get_override_remote_url_job(&self) -> WorkerJob {
let dir = self.dir.clone();
let n_remote_url = self.get_repo();
WorkerJob::new_func(Box::new(move || {
if let Ok(repo) = Repository::open(dir) {
if let Ok(remote) = repo.find_remote("origin") {
if remote.url().unwrap_or("") != n_remote_url {
if repo.remote_set_url("origin", &n_remote_url).is_ok() {
return FuncWorkerOut {
success: true,
out: vec![],
};
}
return FuncWorkerOut {
success: false,
out: vec!["Failed to set origin remote url".into()],
};
}
} else {
return FuncWorkerOut {
success: false,
out: vec!["Could not find remote origin".into()],
};
}
return FuncWorkerOut {
success: true,
out: vec![],
};
}
FuncWorkerOut {
success: true,
out: vec![],
}
}))
}
#[deprecated]
pub fn get_pull_runner(&self) -> CmdRunner {
CmdRunner::new(
None,
@ -84,6 +137,15 @@ impl Git {
)
}
pub fn get_pull_job(&self) -> WorkerJob {
WorkerJob::new_cmd(
None,
"git".into(),
Some(vec!["-C".into(), self.dir.clone(), "pull".into()]),
)
}
#[deprecated]
pub fn get_clone_runner(&self) -> CmdRunner {
CmdRunner::new(
None,
@ -97,6 +159,20 @@ impl Git {
)
}
pub fn get_clone_job(&self) -> WorkerJob {
WorkerJob::new_cmd(
None,
"git".into(),
Some(vec![
"clone".into(),
self.get_repo(),
self.dir.clone(),
"--recurse-submodules".into(),
]),
)
}
#[deprecated]
pub fn get_checkout_ref_runner(&self) -> Option<CmdRunner> {
self.get_ref().map(|r| {
CmdRunner::new(
@ -107,6 +183,17 @@ impl Git {
})
}
pub fn get_checkout_ref_job(&self) -> Option<WorkerJob> {
self.get_ref().map(|r| {
WorkerJob::new_cmd(
None,
"git".into(),
Some(vec!["-C".into(), self.dir.clone(), "checkout".into(), r]),
)
})
}
#[deprecated]
pub fn get_clone_or_not_runner(&self) -> Option<CmdRunner> {
let path_s = format!("{}/.git", self.dir.clone());
let path = Path::new(&path_s);
@ -116,6 +203,16 @@ impl Git {
Some(self.get_clone_runner())
}
pub fn get_clone_or_not_job(&self) -> Option<WorkerJob> {
let path_s = format!("{}/.git", self.dir.clone());
let path = Path::new(&path_s);
if path.is_dir() {
return None;
}
Some(self.get_clone_job())
}
#[deprecated]
pub fn clone_or_pull(&self, profile: &Profile) -> Option<CmdRunner> {
match self.get_clone_or_not_runner() {
Some(r) => Some(r),
@ -125,4 +222,14 @@ impl Git {
},
}
}
pub fn get_clone_or_pull_job(&self, profile: &Profile) -> Option<WorkerJob> {
match self.get_clone_or_not_job() {
Some(j) => Some(j),
None => match profile.pull_on_build {
true => Some(self.get_pull_job()),
false => None,
}
}
}
}

View file

@ -3,10 +3,99 @@ use crate::{
cmd_runner::CmdRunner,
file_utils::rm_rf,
profile::Profile,
runner::Runner,
runner::Runner, ui::workers::runner_worker::WorkerJob,
};
use std::{collections::HashMap, path::Path};
use std::{collections::{HashMap, VecDeque}, path::Path};
pub fn get_build_basalt_jobs(profile: &Profile, clean_build: bool) -> VecDeque<WorkerJob> {
let mut jobs = VecDeque::<WorkerJob>::new();
let git = Git {
repo: match profile.features.basalt.repo.as_ref() {
Some(r) => r.clone(),
None => "https://gitlab.freedesktop.org/mateosss/basalt.git".into(),
},
dir: profile.features.basalt.path.as_ref().unwrap().clone(),
};
jobs.push_back(git.get_override_remote_url_job());
git.get_clone_or_pull_job(profile).map(|j| {
jobs.push_back(j);
});
git.get_checkout_ref_job().map(|j| {
jobs.push_back(j);
if profile.pull_on_build {
jobs.push_back(git.get_pull_job());
}
});
let build_dir = format!("{}/build", profile.features.basalt.path.as_ref().unwrap());
let mut cmake_vars: HashMap<String, String> = HashMap::new();
cmake_vars.insert("CMAKE_BUILD_TYPE".into(), "Release".into());
cmake_vars.insert("CMAKE_INSTALL_PREFIX".into(), profile.prefix.clone());
cmake_vars.insert("BUILD_TESTS".into(), "OFF".into());
cmake_vars.insert("BASALT_INSTANTIATIONS_DOUBLE".into(), "OFF".into());
cmake_vars.insert(
"CMAKE_INSTALL_LIBDIR".into(),
format!("{}/lib", profile.prefix),
);
let mut cmake_env: HashMap<String, String> = HashMap::new();
cmake_env.insert("CMAKE_BUILD_PARALLEL_LEVEL".into(), "2".into());
let cmake = Cmake {
env: Some(cmake_env),
vars: Some(cmake_vars),
source_dir: profile.features.basalt.path.as_ref().unwrap().clone(),
build_dir: build_dir.clone(),
};
jobs.push_back(WorkerJob::new_cmd(None, "bash".into(), Some(vec![
"-c".into(),
format!(
"cd {repo}/thirdparty/Pangolin && git checkout include/pangolin/utils/picojson.h && curl -sSL 'https://aur.archlinux.org/cgit/aur.git/plain/279c17d9c9eb9374c89489b449f92cb93350e8cd.patch?h=basalt-monado-git' -o picojson_fix.patch && git apply picojson_fix.patch && sed -i '1s/^/#include <stdint.h>\\n/' include/pangolin/platform.h",
repo = git.dir
),
])));
if !Path::new(&build_dir).is_dir() || clean_build {
rm_rf(&build_dir);
jobs.push_back(cmake.get_prepare_job());
}
jobs.push_back(cmake.get_build_job());
jobs.push_back(cmake.get_install_job());
jobs.push_back(WorkerJob::new_cmd(
None,
"mkdir".into(),
Some(vec![
"-p".into(),
format!(
"{}/share/basalt/thirdparty/basalt-headers/thirdparty",
profile.prefix
),
]),
));
jobs.push_back(WorkerJob::new_cmd(
None,
"cp".into(),
Some(vec![
"-Ra".into(),
format!(
"{}/thirdparty/basalt-headers/thirdparty/eigen",
profile.features.basalt.path.as_ref().unwrap().clone()
),
format!("{}/share/basalt/thirdparty", profile.prefix),
]),
));
jobs
}
#[deprecated]
pub fn get_build_basalt_runners(profile: &Profile, clean_build: bool) -> Vec<Box<dyn Runner>> {
let mut runners: Vec<Box<dyn Runner>> = vec![];
let git = Git {

View file

@ -2,10 +2,65 @@ use crate::{
build_tools::{cmake::Cmake, git::Git},
file_utils::rm_rf,
profile::Profile,
runner::Runner,
runner::Runner, ui::workers::runner_worker::WorkerJob,
};
use std::{collections::HashMap, path::Path};
use std::{collections::{HashMap, VecDeque}, path::Path};
pub fn get_build_libsurvive_jobs(profile: &Profile, clean_build: bool) -> VecDeque<WorkerJob> {
let mut jobs = VecDeque::<WorkerJob>::new();
let git = Git {
repo: match profile.features.libsurvive.repo.as_ref() {
Some(r) => r.clone(),
None => "https://github.com/cntools/libsurvive".into(),
},
dir: profile.features.libsurvive.path.as_ref().unwrap().clone(),
};
jobs.push_back(git.get_override_remote_url_job());
git.get_clone_or_pull_job(profile).map(|j| {
jobs.push_back(j);
});
git.get_checkout_ref_job().map(|j| {
jobs.push_back(j);
if profile.pull_on_build {
jobs.push_back(git.get_pull_job());
}
});
let build_dir = format!(
"{}/build",
profile.features.libsurvive.path.as_ref().unwrap()
);
let mut cmake_vars: HashMap<String, String> = HashMap::new();
cmake_vars.insert("CMAKE_BUILD_TYPE".into(), "Release".into());
cmake_vars.insert("ENABLE_api_example".into(), "OFF".into());
cmake_vars.insert("CMAKE_SKIP_INSTALL_RPATH".into(), "YES".into());
cmake_vars.insert("CMAKE_INSTALL_PREFIX".into(), profile.prefix.clone());
cmake_vars.insert(
"CMAKE_INSTALL_LIBDIR".into(),
format!("{}/lib", profile.prefix),
);
let cmake = Cmake {
env: None,
vars: Some(cmake_vars),
source_dir: profile.features.libsurvive.path.as_ref().unwrap().clone(),
build_dir: build_dir.clone(),
};
if !Path::new(&build_dir).is_dir() || clean_build {
rm_rf(&build_dir);
jobs.push_back(cmake.get_prepare_job());
}
jobs.push_back(cmake.get_build_job());
jobs.push_back(cmake.get_install_job());
jobs
}
#[deprecated]
pub fn get_build_libsurvive_runners(profile: &Profile, clean_build: bool) -> Vec<Box<dyn Runner>> {
let mut runners: Vec<Box<dyn Runner>> = vec![];
let git = Git {

View file

@ -1,7 +1,19 @@
use crate::{
cmd_runner::CmdRunner, constants::pkg_data_dir, paths::get_cache_dir, profile::Profile,
cmd_runner::CmdRunner, constants::pkg_data_dir, paths::get_cache_dir, profile::Profile, ui::workers::runner_worker::WorkerJob,
};
pub fn get_build_mercury_job(profile: &Profile) -> WorkerJob {
WorkerJob::new_cmd(
None,
format!(
"{sysdata}/scripts/build_mercury.sh",
sysdata = pkg_data_dir()
),
Some(vec![profile.prefix.clone(), get_cache_dir()])
)
}
#[deprecated]
pub fn get_build_mercury_runner(profile: &Profile) -> CmdRunner {
let args = vec![profile.prefix.clone(), get_cache_dir()];
CmdRunner::new(

View file

@ -2,10 +2,70 @@ use crate::{
build_tools::{cmake::Cmake, git::Git},
file_utils::rm_rf,
profile::Profile,
runner::Runner,
runner::Runner, ui::workers::runner_worker::WorkerJob,
};
use std::{collections::HashMap, path::Path};
use std::{collections::{HashMap, VecDeque}, path::Path};
pub fn get_build_monado_jobs(profile: &Profile, clean_build: bool) -> VecDeque<WorkerJob> {
let mut jobs = VecDeque::<WorkerJob>::new();
let git = Git {
repo: match profile.xrservice_repo.as_ref() {
Some(r) => r.clone(),
None => "https://gitlab.freedesktop.org/monado/monado".into(),
},
dir: profile.xrservice_path.clone(),
};
jobs.push_back(git.get_override_remote_url_job());
git.get_clone_or_pull_job(profile).map(|j| {
jobs.push_back(j);
});
git.get_checkout_ref_job().map(|j| {
jobs.push_back(j);
if profile.pull_on_build {
jobs.push_back(git.get_pull_job());
}
});
let build_dir = format!("{}/build", profile.xrservice_path);
let mut env: HashMap<String, String> = HashMap::new();
env.insert(
"PKG_CONFIG_PATH".into(),
format!("{}/lib/pkgconfig", profile.prefix),
);
let mut cmake_vars: HashMap<String, String> = HashMap::new();
cmake_vars.insert("CMAKE_BUILD_TYPE".into(), "Release".into());
cmake_vars.insert("XRT_HAVE_SYSTEM_CJSON".into(), "NO".into());
cmake_vars.insert("CMAKE_LIBDIR".into(), format!("{}/lib", profile.prefix));
cmake_vars.insert("CMAKE_INSTALL_PREFIX".into(), profile.prefix.clone());
cmake_vars.insert(
"CMAKE_C_FLAGS".into(),
format!("-Wl,-rpath='{}/lib'", profile.prefix),
);
cmake_vars.insert(
"CMAKE_CXX_FLAGS".into(),
format!("-Wl,-rpath='{}/lib'", profile.prefix),
);
let cmake = Cmake {
env: Some(env),
vars: Some(cmake_vars),
source_dir: profile.xrservice_path.clone(),
build_dir: build_dir.clone(),
};
if !Path::new(&build_dir).is_dir() || clean_build {
rm_rf(&build_dir);
jobs.push_back(cmake.get_prepare_job());
}
jobs.push_back(cmake.get_build_job());
jobs.push_back(cmake.get_install_job());
jobs
}
#[deprecated]
pub fn get_build_monado_runners(profile: &Profile, clean_build: bool) -> Vec<Box<dyn Runner>> {
let mut runners: Vec<Box<dyn Runner>> = vec![];
let git = Git {

View file

@ -2,10 +2,56 @@ use crate::{
build_tools::{cmake::Cmake, git::Git},
file_utils::rm_rf,
profile::Profile,
runner::Runner,
runner::Runner, ui::workers::runner_worker::WorkerJob,
};
use std::{collections::HashMap, path::Path};
use std::{collections::{HashMap, VecDeque}, path::Path};
pub fn get_build_opencomposite_jobs(
profile: &Profile,
clean_build: bool,
) -> VecDeque<WorkerJob> {
let mut jobs = VecDeque::<WorkerJob>::new();
let git = Git {
repo: match profile.opencomposite_repo.as_ref() {
Some(r) => r.clone(),
None => "https://gitlab.com/znixian/OpenOVR.git".into(),
},
dir: profile.opencomposite_path.clone(),
};
jobs.push_back(git.get_override_remote_url_job());
git.get_clone_or_pull_job(profile).map(|j| {
jobs.push_back(j);
});
git.get_checkout_ref_job().map(|j| {
jobs.push_back(j);
if profile.pull_on_build {
jobs.push_back(git.get_pull_job());
}
});
let build_dir = format!("{}/build", profile.opencomposite_path);
let mut cmake_vars: HashMap<String, String> = HashMap::new();
cmake_vars.insert("CMAKE_BUILD_TYPE".into(), "Release".into());
let cmake = Cmake {
env: None,
vars: Some(cmake_vars),
source_dir: profile.opencomposite_path.clone(),
build_dir: build_dir.clone(),
};
if !Path::new(&build_dir).is_dir() || clean_build {
rm_rf(&build_dir);
jobs.push_back(cmake.get_prepare_job());
}
jobs.push_back(cmake.get_build_job());
jobs
}
#[deprecated]
pub fn get_build_opencomposite_runners(
profile: &Profile,
clean_build: bool,

View file

@ -2,10 +2,58 @@ use crate::{
build_tools::{cmake::Cmake, git::Git},
file_utils::rm_rf,
profile::Profile,
runner::Runner,
runner::Runner, ui::workers::runner_worker::WorkerJob,
};
use std::{collections::HashMap, path::Path};
use std::{collections::{HashMap, VecDeque}, path::Path};
pub fn get_build_wivrn_jobs(profile: &Profile, clean_build: bool) -> VecDeque<WorkerJob> {
let mut jobs = VecDeque::<WorkerJob>::new();
let git = Git {
repo: match profile.xrservice_repo.as_ref() {
Some(r) => r.clone(),
None => "https://github.com/Meumeu/WiVRn".into(),
},
dir: profile.xrservice_path.clone(),
};
jobs.push_back(git.get_override_remote_url_job());
git.get_clone_or_pull_job(profile).map(|j| {
jobs.push_back(j);
});
git.get_checkout_ref_job().map(|j| {
jobs.push_back(j);
if profile.pull_on_build {
jobs.push_back(git.get_pull_job());
}
});
let build_dir = format!("{}/build", profile.xrservice_path);
let mut cmake_vars: HashMap<String, String> = HashMap::new();
cmake_vars.insert("CMAKE_BUILD_TYPE".into(), "Release".into());
cmake_vars.insert("XRT_HAVE_SYSTEM_CJSON".into(), "NO".into());
cmake_vars.insert("WIVRN_BUILD_CLIENT".into(), "OFF".into());
cmake_vars.insert("CMAKE_INSTALL_PREFIX".into(), profile.prefix.clone());
let cmake = Cmake {
env: None,
vars: Some(cmake_vars),
source_dir: profile.xrservice_path.clone(),
build_dir: build_dir.clone(),
};
if !Path::new(&build_dir).is_dir() || clean_build {
rm_rf(&build_dir);
jobs.push_back(cmake.get_prepare_job());
}
jobs.push_back(cmake.get_build_job());
jobs.push_back(cmake.get_install_job());
jobs
}
#[deprecated]
pub fn get_build_wivrn_runners(profile: &Profile, clean_build: bool) -> Vec<Box<dyn Runner>> {
let mut runners: Vec<Box<dyn Runner>> = vec![];
let git = Git {

View file

@ -1,3 +1,8 @@
use crate::{
file_utils::get_writer,
profile::{Profile, XRServiceType},
runner::{Runner, RunnerStatus},
};
use nix::{
sys::signal::{
kill,
@ -5,12 +10,6 @@ use nix::{
},
unistd::Pid,
};
use crate::{
file_utils::get_writer,
profile::{Profile, XRServiceType},
runner::{Runner, RunnerStatus},
};
use std::{
collections::HashMap,
io::{BufRead, BufReader, Write},

View file

@ -319,6 +319,17 @@ impl Profile {
.unwrap_or(&"".to_string())
.is_empty())
}
pub fn xrservice_binary(&self) -> String {
match self.xrservice_type {
XRServiceType::Monado => format!("{pfx}/bin/monado-service", pfx = self.prefix),
XRServiceType::Wivrn => format!("{pfx}/bin/wivrn-server", pfx = self.prefix),
}
}
pub fn can_start(&self) -> bool {
Path::new(&self.xrservice_binary()).is_file()
}
}
#[cfg(test)]

View file

@ -5,13 +5,15 @@ use super::debug_view::{DebugView, DebugViewMsg};
use super::fbt_config_editor::{FbtConfigEditor, FbtConfigEditorInit, FbtConfigEditorMsg};
use super::libsurvive_setup_window::LibsurviveSetupWindow;
use super::main_view::MainViewMsg;
use crate::builders::build_basalt::get_build_basalt_runners;
use crate::builders::build_libsurvive::get_build_libsurvive_runners;
use crate::builders::build_mercury::get_build_mercury_runner;
use crate::builders::build_monado::get_build_monado_runners;
use crate::builders::build_opencomposite::get_build_opencomposite_runners;
use crate::builders::build_wivrn::get_build_wivrn_runners;
use crate::cmd_runner::CmdRunner;
use super::workers::runner_worker::{
RunnerWorkerMsg, RunnerWorkerOut, RunnerWorkerWrap, WorkerJob,
};
use crate::builders::build_basalt::get_build_basalt_jobs;
use crate::builders::build_libsurvive::get_build_libsurvive_jobs;
use crate::builders::build_mercury::get_build_mercury_job;
use crate::builders::build_monado::get_build_monado_jobs;
use crate::builders::build_opencomposite::get_build_opencomposite_jobs;
use crate::builders::build_wivrn::get_build_wivrn_jobs;
use crate::config::Config;
use crate::constants::APP_NAME;
use crate::depcheck::check_dependency;
@ -35,8 +37,6 @@ use crate::profiles::lighthouse::lighthouse_profile;
use crate::profiles::system_valve_index::system_valve_index_profile;
use crate::profiles::valve_index::valve_index_profile;
use crate::profiles::wivrn::wivrn_profile;
use crate::runner::{Runner, RunnerStatus};
use crate::runner_pipeline::RunnerPipeline;
use crate::ui::build_window::BuildWindowMsg;
use crate::ui::debug_view::DebugViewInit;
use crate::ui::libsurvive_setup_window::LibsurviveSetupMsg;
@ -50,6 +50,7 @@ use relm4::adw::ResponseAppearance;
use relm4::gtk::glib;
use relm4::{new_action_group, new_stateful_action, new_stateless_action, prelude::*};
use relm4::{ComponentParts, ComponentSender, SimpleComponent};
use std::collections::VecDeque;
use std::fs::remove_file;
use std::time::Duration;
@ -80,9 +81,11 @@ pub struct App {
#[tracker::do_not_track]
config: Config,
#[tracker::do_not_track]
xrservice_runner: Option<CmdRunner>,
xrservice_worker: Option<RunnerWorkerWrap>,
#[tracker::do_not_track]
build_pipeline: Option<RunnerPipeline>,
restart_xrservice: bool,
#[tracker::do_not_track]
build_worker: Option<RunnerWorkerWrap>,
#[tracker::do_not_track]
profiles: Vec<Profile>,
#[tracker::do_not_track]
@ -93,6 +96,10 @@ pub struct App {
#[derive(Debug)]
pub enum Msg {
OnServiceLog(Vec<String>),
OnServiceExit(i32),
OnBuildLog(Vec<String>),
OnBuildExit(i32),
ClockTicking,
BuildProfile(bool),
EnableDebugViewChanged(bool),
@ -133,7 +140,7 @@ impl App {
}
}
pub fn start_xrservice(&mut self) {
pub fn start_xrservice(&mut self, sender: ComponentSender<Self>) {
let prof = self.get_selected_profile();
if set_current_active_runtime_to_profile(&prof).is_err() {
alert(
@ -153,32 +160,37 @@ impl App {
};
self.debug_view.sender().emit(DebugViewMsg::ClearLog);
self.xr_devices = XRDevices::default();
remove_file(&get_ipc_file_path(&prof.xrservice_type))
.is_err()
.then(|| println!("Failed to remove xrservice IPC file"));
let mut runner = CmdRunner::xrservice_runner_from_profile(&prof);
match runner.try_start() {
Ok(_) => {
self.xrservice_runner = Some(runner);
self.main_view
.sender()
.emit(MainViewMsg::XRServiceActiveChanged(
true,
Some(self.get_selected_profile()),
));
self.set_inhibit_session(true);
}
Err(_) => {
alert(
"Failed to start profile",
Some(concat!(
"You need to build the current profile before starting it.",
"\n\nYou can do this from the menu."
)),
Some(&self.app_win.clone().upcast::<gtk::Window>()),
);
}
};
if prof.can_start() {
remove_file(&get_ipc_file_path(&prof.xrservice_type))
.is_err()
.then(|| println!("Failed to remove xrservice IPC file"));
let worker = RunnerWorkerWrap::xrservice_worker_wrap_from_profile(
&prof,
sender.input_sender(),
|msg| match msg {
RunnerWorkerOut::Log(rows) => Msg::OnServiceLog(rows),
RunnerWorkerOut::Exit(code) => Msg::OnServiceExit(code),
},
);
worker.start();
self.xrservice_worker = Some(worker);
self.main_view
.sender()
.emit(MainViewMsg::XRServiceActiveChanged(
true,
Some(self.get_selected_profile()),
));
self.set_inhibit_session(true);
} else {
alert(
"Failed to start profile",
Some(concat!(
"You need to build the current profile before starting it.",
"\n\nYou can do this from the menu."
)),
Some(&self.app_win.clone().upcast::<gtk::Window>()),
);
}
}
pub fn restore_openxr_openvr_files(&self) {
@ -200,10 +212,8 @@ impl App {
pub fn shutdown_xrservice(&mut self) {
self.set_inhibit_session(false);
if self.xrservice_runner.is_some()
&& self.xrservice_runner.as_mut().unwrap().status() == RunnerStatus::Running
{
self.xrservice_runner.as_mut().unwrap().terminate();
if let Some(worker) = self.xrservice_worker.as_ref() {
worker.stop();
}
self.restore_openxr_openvr_files();
self.main_view
@ -266,10 +276,8 @@ impl SimpleComponent for App {
}
fn shutdown(&mut self, _widgets: &mut Self::Widgets, _output: relm4::Sender<Self::Output>) {
if self.xrservice_runner.is_some()
&& self.xrservice_runner.as_mut().unwrap().status() == RunnerStatus::Running
{
self.xrservice_runner.as_mut().unwrap().terminate();
if let Some(worker) = self.xrservice_worker.as_ref() {
worker.stop();
}
self.restore_openxr_openvr_files();
}
@ -278,67 +286,25 @@ impl SimpleComponent for App {
self.reset();
match message {
Msg::OnServiceLog(rows) => {
if !rows.is_empty() {
sender.input(Msg::ParseLog(rows.clone()));
self.debug_view
.sender()
.emit(DebugViewMsg::LogUpdated(rows));
}
}
Msg::OnServiceExit(_) => {
self.main_view
.sender()
.emit(MainViewMsg::XRServiceActiveChanged(false, None));
self.xrservice_worker = None;
if self.restart_xrservice {
self.restart_xrservice = false;
self.start_xrservice(sender);
}
}
Msg::ClockTicking => {
match &mut self.xrservice_runner {
None => {}
Some(runner) => {
let n_rows = runner.consume_rows();
if !n_rows.is_empty() {
sender.input(Msg::ParseLog(n_rows.clone()));
self.debug_view
.sender()
.emit(DebugViewMsg::LogUpdated(n_rows));
}
match runner.status() {
RunnerStatus::Running => {}
RunnerStatus::Stopped(_) => {
self.main_view
.sender()
.emit(MainViewMsg::XRServiceActiveChanged(false, None));
}
};
}
};
match &mut self.build_pipeline {
None => {}
Some(pipeline) => {
pipeline.update();
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateContent(pipeline.get_log()));
match pipeline.status() {
RunnerStatus::Running | RunnerStatus::Stopped(None) => {}
RunnerStatus::Stopped(Some(code)) => {
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateCanClose(true));
self.build_pipeline.take();
match code {
0 => {
self.build_window.sender().emit(
BuildWindowMsg::UpdateBuildStatus(BuildStatus::Done),
);
// apparently setcap on wivrn causes issues, so in case
// it's not monado, we're just skipping this
if self.get_selected_profile().xrservice_type
== XRServiceType::Monado
{
self.setcap_confirm_dialog.present();
}
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateCanClose(true));
}
errcode => self.build_window.sender().emit(
BuildWindowMsg::UpdateBuildStatus(BuildStatus::Error(
format!("Exit status {}", errcode),
)),
),
}
}
}
}
};
self.main_view.sender().emit(MainViewMsg::ClockTicking);
}
Msg::ParseLog(rows) => {
@ -376,40 +342,35 @@ impl SimpleComponent for App {
.sender()
.emit(MainViewMsg::EnableDebugViewChanged(val));
}
Msg::DoStartStopXRService => match &mut self.xrservice_runner {
Msg::DoStartStopXRService => match &mut self.xrservice_worker {
None => {
self.start_xrservice();
self.start_xrservice(sender);
}
Some(_) => {
self.shutdown_xrservice();
}
Some(runner) => match runner.status() {
RunnerStatus::Running => {
self.shutdown_xrservice();
}
RunnerStatus::Stopped(_) => {
self.start_xrservice();
}
},
},
Msg::RestartXRService => {
match &mut self.xrservice_runner {
None => {}
Some(runner) => match runner.status() {
RunnerStatus::Stopped(_) => {}
RunnerStatus::Running => {
if self.xrservice_runner.is_some()
&& self.xrservice_runner.as_mut().unwrap().status()
== RunnerStatus::Running
{
self.xrservice_runner.as_mut().unwrap().terminate();
}
}
},
Msg::RestartXRService => match &mut self.xrservice_worker {
None => {
self.start_xrservice(sender);
}
self.start_xrservice();
}
Some(worker) => {
let status = worker.state.lock().unwrap().exit_status.clone();
match status {
Some(_) => {
self.start_xrservice(sender);
}
None => {
worker.stop();
self.restart_xrservice = true;
}
}
}
},
Msg::BuildProfile(clean_build) => {
let profile = self.get_selected_profile();
let mut missing_deps = vec![];
let mut runners: Vec<Box<dyn Runner>> = vec![];
let mut jobs = VecDeque::<WorkerJob>::new();
// profile per se can't be built, but we still need opencomp
if profile.can_be_built {
missing_deps.extend(match profile.xrservice_type {
@ -418,23 +379,23 @@ impl SimpleComponent for App {
});
if profile.features.libsurvive.enabled {
missing_deps.extend(get_missing_libsurvive_deps());
runners.extend(get_build_libsurvive_runners(&profile, clean_build));
jobs.extend(get_build_libsurvive_jobs(&profile, clean_build));
}
if profile.features.basalt.enabled {
missing_deps.extend(get_missing_basalt_deps());
runners.extend(get_build_basalt_runners(&profile, clean_build));
jobs.extend(get_build_basalt_jobs(&profile, clean_build));
}
if profile.features.mercury_enabled {
missing_deps.extend(get_missing_mercury_deps());
runners.push(Box::new(get_build_mercury_runner(&profile)));
jobs.push_back(get_build_mercury_job(&profile));
}
runners.extend(match profile.xrservice_type {
XRServiceType::Monado => get_build_monado_runners(&profile, clean_build),
XRServiceType::Wivrn => get_build_wivrn_runners(&profile, clean_build),
jobs.extend(match profile.xrservice_type {
XRServiceType::Monado => get_build_monado_jobs(&profile, clean_build),
XRServiceType::Wivrn => get_build_wivrn_jobs(&profile, clean_build),
});
// no listed deps for opencomp
}
runners.extend(get_build_opencomposite_runners(&profile, clean_build));
jobs.extend(get_build_opencomposite_jobs(&profile, clean_build));
if !missing_deps.is_empty() {
missing_deps.sort_unstable();
missing_deps.dedup(); // dedup only works if sorted, hence the above
@ -456,8 +417,11 @@ impl SimpleComponent for App {
.sender()
.send(BuildWindowMsg::Present)
.unwrap();
let mut pipeline = RunnerPipeline::new(runners);
pipeline.start();
let worker = RunnerWorkerWrap::new(jobs, sender.input_sender(), |msg| match msg {
RunnerWorkerOut::Log(rows) => Msg::OnBuildLog(rows),
RunnerWorkerOut::Exit(code) => Msg::OnBuildExit(code),
});
worker.start();
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateTitle(format!(
@ -467,7 +431,34 @@ impl SimpleComponent for App {
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateCanClose(false));
self.build_pipeline = Some(pipeline);
self.build_worker = Some(worker);
}
Msg::OnBuildLog(rows) => {
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateContent(rows));
}
Msg::OnBuildExit(code) => {
match code {
0 => {
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateBuildStatus(BuildStatus::Done));
if self.get_selected_profile().xrservice_type == XRServiceType::Monado {
self.setcap_confirm_dialog.present();
}
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateCanClose(true));
}
errcode => {
self.build_window
.sender()
.emit(BuildWindowMsg::UpdateBuildStatus(BuildStatus::Error(
format!("Exit status {}", errcode),
)));
}
};
}
Msg::DeleteProfile => {
let todel = self.get_selected_profile();
@ -635,10 +626,11 @@ impl SimpleComponent for App {
config,
tracker: 0,
profiles,
xrservice_runner: None,
build_pipeline: None,
xrservice_worker: None,
build_worker: None,
xr_devices: XRDevices::default(),
fbt_config_editor: None,
restart_xrservice: false,
};
let widgets = view_output!();

View file

@ -15,3 +15,4 @@ pub mod profile_editor;
pub mod steam_launch_options_box;
pub mod util;
pub mod wivrn_conf_editor;
pub mod workers;

1
src/ui/workers/mod.rs Normal file
View file

@ -0,0 +1 @@
pub mod runner_worker;

View file

@ -0,0 +1,271 @@
use crate::{profile::Profile, withclones};
use nix::{
sys::signal::{
kill,
Signal::{SIGKILL, SIGTERM},
},
unistd::Pid,
};
use relm4::{prelude::*, Sender, Worker, WorkerController};
use std::{
collections::{HashMap, VecDeque},
io::{BufRead, BufReader},
mem,
process::{Command, Stdio},
sync::{Arc, Mutex},
thread::{self, sleep},
time::Duration,
};
macro_rules! logger_thread {
($buf_fd: expr, $sender: expr) => {
thread::spawn(move || {
let mut reader = BufReader::new($buf_fd);
loop {
let mut buf = String::new();
match reader.read_line(&mut buf) {
Err(_) => return,
Ok(bytes_read) => {
if bytes_read == 0 {
return;
}
if buf.is_empty() {
continue;
}
print!("{}", buf);
match $sender.output(Self::Output::Log(vec![buf])) {
Ok(_) => {}
Err(_) => return,
};
}
};
}
})
};
}
#[derive(Debug, Clone)]
pub struct CmdWorkerData {
pub environment: HashMap<String, String>,
pub command: String,
pub args: Vec<String>,
}
#[derive(Debug, Clone, Default)]
pub struct FuncWorkerOut {
pub success: bool,
pub out: Vec<String>,
}
pub struct FuncWorkerData {
pub func: Box<dyn FnOnce() -> FuncWorkerOut + Send + Sync + 'static>,
}
pub enum WorkerJob {
Cmd(CmdWorkerData),
Func(FuncWorkerData),
}
impl WorkerJob {
pub fn new_cmd(env: Option<HashMap<String, String>>, cmd: String, args: Option<Vec<String>>) -> Self {
Self::Cmd(CmdWorkerData { environment: env.unwrap_or_default(), command: cmd, args: args.unwrap_or_default() })
}
pub fn new_func(func: Box<dyn FnOnce() -> FuncWorkerOut + Send + Sync + 'static>) -> Self {
Self::Func(FuncWorkerData { func })
}
}
#[derive(Debug)]
pub enum RunnerWorkerOut {
Log(Vec<String>),
Exit(i32),
}
#[derive(Debug)]
pub enum RunnerWorkerMsg {
Start,
}
#[derive(Debug, Clone, Default)]
pub struct RunnerWorkerState {
pub current_pid: Option<Pid>,
pub exit_status: Option<i32>,
pub stop_requested: bool,
pub started: bool,
pub exited: bool,
}
pub struct RunnerWorker {
jobs: VecDeque<WorkerJob>,
state: Arc<Mutex<RunnerWorkerState>>,
}
pub struct RunnerWorkerInit {
pub jobs: VecDeque<WorkerJob>,
pub state: Arc<Mutex<RunnerWorkerState>>,
}
impl Worker for RunnerWorker {
type Init = RunnerWorkerInit;
type Input = RunnerWorkerMsg;
type Output = RunnerWorkerOut;
fn init(init: Self::Init, _sender: ComponentSender<Self>) -> Self {
Self {
jobs: init.jobs,
state: init.state,
}
}
fn update(&mut self, msg: RunnerWorkerMsg, sender: ComponentSender<Self>) {
// Send the result of the calculation back
match msg {
Self::Input::Start => {
if self.state.lock().unwrap().started {
panic!("Cannot start a RunnerWorker twice!")
}
self.state.lock().unwrap().started = true;
while !(self.jobs.is_empty() || self.state.lock().unwrap().stop_requested) {
self.state.lock().unwrap().current_pid = None;
let mut job = self.jobs.pop_front().unwrap();
match &mut job {
WorkerJob::Cmd(data) => {
withclones![data];
if let Ok(mut cmd) = Command::new(data.command)
.args(data.args)
.envs(data.environment)
.stderr(Stdio::piped())
.stdout(Stdio::piped())
.spawn()
{
self.state.lock().unwrap().current_pid = Some(Pid::from_raw(
cmd.id().try_into().expect("Could not convert pid to u32"),
));
let stdout = cmd.stdout.take().unwrap();
let stderr = cmd.stderr.take().unwrap();
let stdout_sender = sender.clone();
let stderr_sender = sender.clone();
let stdout_logger = logger_thread!(stdout, stdout_sender);
let stderr_logger = logger_thread!(stderr, stderr_sender);
cmd.wait().expect("Failed to wait for process");
stdout_logger.join().expect("Failed to join reader thread");
stderr_logger.join().expect("Failed to join reader thread");
} else {
let msg = "Failed to run command".to_string();
sender.output(Self::Output::Log(vec![msg]));
self.state.lock().unwrap().exit_status = Some(1);
break;
}
}
WorkerJob::Func(data) => {
let func = mem::replace(
&mut data.func,
Box::new(move || FuncWorkerOut::default()),
);
let out = func();
sender.output(Self::Output::Log(out.out.clone()));
if !out.success {
self.state.lock().unwrap().exit_status = Some(1);
break;
}
}
}
}
sender
.output(Self::Output::Exit(
self.state.lock().unwrap().exit_status.unwrap_or(0),
))
.expect("Failed to send output exit");
self.state.lock().unwrap().exited = true;
}
}
}
}
impl RunnerWorker {
pub fn xrservice_worker_from_profile(
prof: &Profile,
state: Arc<Mutex<RunnerWorkerState>>,
) -> relm4::WorkerHandle<RunnerWorker> {
let mut env = prof.environment.clone();
if !env.contains_key("LH_DRIVER") {
env.insert(
"LH_DRIVER".into(),
prof.lighthouse_driver.to_string().to_lowercase(),
);
}
let data = CmdWorkerData {
environment: env,
command: prof.xrservice_binary(),
args: vec![],
};
let mut jobs = VecDeque::new();
jobs.push_back(WorkerJob::Cmd(data));
Self::builder().detach_worker(RunnerWorkerInit { jobs, state })
}
}
pub struct RunnerWorkerWrap {
pub worker: WorkerController<RunnerWorker>,
pub state: Arc<Mutex<RunnerWorkerState>>,
}
impl RunnerWorkerWrap {
pub fn new<X: 'static, F: (Fn(RunnerWorkerOut) -> X) + 'static>(
jobs: VecDeque<WorkerJob>,
sender: &Sender<X>,
transform: F,
) -> Self {
let state = Arc::new(Mutex::new(RunnerWorkerState::default()));
let init = RunnerWorkerInit {
jobs,
state: state.clone(),
};
Self {
worker: RunnerWorker::builder()
.detach_worker(init)
.forward(sender, transform),
state,
}
}
pub fn xrservice_worker_wrap_from_profile<
X: 'static,
F: (Fn(RunnerWorkerOut) -> X) + 'static,
>(
prof: &Profile,
sender: &Sender<X>,
transform: F,
) -> Self {
let state = Arc::new(Mutex::new(RunnerWorkerState::default()));
Self {
worker: RunnerWorker::xrservice_worker_from_profile(prof, state.clone())
.forward(sender, transform),
state,
}
}
pub fn start(&self) {
self.worker.emit(RunnerWorkerMsg::Start);
}
pub fn stop(&self) {
if self.state.lock().unwrap().started && !self.state.lock().unwrap().exited {
self.state.lock().unwrap().stop_requested = true;
if let Some(pid) = self.state.lock().unwrap().current_pid {
kill(pid, SIGTERM).expect("Could not send sigterm to process");
let state = self.state.clone();
thread::spawn(move || {
sleep(Duration::from_secs(2));
if let Ok(s) = state.lock() {
if s.exited {
// process is still alive
kill(pid, SIGKILL).expect("Failed to kill process");
}
}
});
}
}
}
}