0.8.0 beta fixes (#2154)

* initial fixes

* 0.8.0 beta fixes

* run actions

* run fmt

* Fix windows build

* Add purge cache opt

* add must revalidate to project req

* lint + clippy

* fix processes, open folder

* Update migrator to use old launcher cache for perf

* fix empty dirs not moving

* fix lint + create natives dir if not exist

* fix large request batches

* finish

* Fix deep linking on mac

* fix comp err

* fix comp err (2)

---------

Signed-off-by: Geometrically <18202329+Geometrically@users.noreply.github.com>
This commit is contained in:
Geometrically
2024-08-16 23:20:11 -07:00
committed by GitHub
parent 3a4843fb46
commit 910e219c0e
66 changed files with 1961 additions and 1896 deletions

View File

@@ -3,6 +3,7 @@ use crate::util::fetch::{fetch_json, sha1_async, FetchSemaphore};
use chrono::{DateTime, Utc};
use dashmap::DashSet;
use reqwest::Method;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use sqlx::SqlitePool;
use std::collections::HashMap;
@@ -56,7 +57,7 @@ impl CacheValueType {
}
}
pub fn from_str(val: &str) -> CacheValueType {
pub fn from_string(val: &str) -> CacheValueType {
match val {
"project" => CacheValueType::Project,
"version" => CacheValueType::Version,
@@ -412,7 +413,7 @@ pub struct GameVersion {
}
impl CacheValue {
fn get_entry(self) -> CachedEntry {
pub fn get_entry(self) -> CachedEntry {
CachedEntry {
id: self.get_key(),
alias: self.get_alias(),
@@ -422,7 +423,7 @@ impl CacheValue {
}
}
fn get_type(&self) -> CacheValueType {
pub fn get_type(&self) -> CacheValueType {
match self {
CacheValue::Project(_) => CacheValueType::Project,
CacheValue::Version(_) => CacheValueType::Version,
@@ -505,7 +506,8 @@ impl CacheValue {
}
}
#[derive(PartialEq, Eq, Debug, Copy, Clone)]
#[derive(Deserialize, Serialize, PartialEq, Eq, Debug, Copy, Clone)]
#[serde(rename_all = "snake_case")]
pub enum CacheBehaviour {
/// Serve expired data. If fetch fails / launcher is offline, errors are ignored
/// and expired data is served
@@ -529,9 +531,9 @@ pub struct CachedEntry {
id: String,
alias: Option<String>,
#[serde(rename = "data_type")]
type_: CacheValueType,
pub type_: CacheValueType,
data: Option<CacheValue>,
expires: i64,
pub expires: i64,
}
macro_rules! impl_cache_methods {
@@ -654,11 +656,6 @@ impl CachedEntry {
pool: &SqlitePool,
fetch_semaphore: &FetchSemaphore,
) -> crate::Result<Vec<Self>> {
use std::time::Instant;
let now = Instant::now();
println!("start {type_:?} keys: {keys:?}");
if keys.is_empty() {
return Ok(Vec::new());
}
@@ -735,7 +732,7 @@ impl CachedEntry {
return_vals.push(Self {
id: row.id,
alias: row.alias,
type_: CacheValueType::from_str(&row.data_type),
type_: CacheValueType::from_string(&row.data_type),
data: Some(data),
expires: row.expires,
});
@@ -743,13 +740,6 @@ impl CachedEntry {
}
}
let time = now.elapsed();
println!(
"query {type_:?} keys: {remaining_keys:?}, elapsed: {:.2?}",
time
);
let now = Instant::now();
if !remaining_keys.is_empty() {
let res = Self::fetch_many(
type_,
@@ -787,9 +777,6 @@ impl CachedEntry {
}
}
let time = now.elapsed();
println!("FETCH {type_:?} DONE, elapsed: {:.2?}", time);
if !expired_keys.is_empty()
&& (cache_behaviour == CacheBehaviour::StaleWhileRevalidate
|| cache_behaviour
@@ -827,20 +814,50 @@ impl CachedEntry {
fetch_semaphore: &FetchSemaphore,
pool: &SqlitePool,
) -> crate::Result<Vec<(Self, bool)>> {
async fn fetch_many_batched<T: DeserializeOwned>(
method: Method,
api_url: &str,
url: &str,
keys: &DashSet<impl Display + Eq + Hash + Serialize>,
fetch_semaphore: &FetchSemaphore,
pool: &SqlitePool,
) -> crate::Result<Vec<T>> {
const MAX_REQUEST_SIZE: usize = 1000;
let urls = keys
.iter()
.collect::<Vec<_>>()
.chunks(MAX_REQUEST_SIZE)
.map(|chunk| {
serde_json::to_string(&chunk)
.map(|keys| format!("{api_url}{url}{keys}"))
})
.collect::<Result<Vec<_>, _>>()?;
let res = futures::future::try_join_all(urls.iter().map(|url| {
fetch_json::<Vec<_>>(
method.clone(),
url,
None,
None,
fetch_semaphore,
pool,
)
}))
.await?;
Ok(res.into_iter().flatten().collect())
}
macro_rules! fetch_original_values {
($type:ident, $api_url:expr, $url_suffix:expr, $cache_variant:path) => {{
let mut results = fetch_json::<Vec<_>>(
let mut results = fetch_many_batched(
Method::GET,
&*format!(
"{}{}?ids={}",
$api_url,
$url_suffix,
serde_json::to_string(&keys)?
),
None,
None,
$api_url,
&format!("{}?ids=", $url_suffix),
&keys,
&fetch_semaphore,
pool,
&pool,
)
.await?
.into_iter()
@@ -938,14 +955,11 @@ impl CachedEntry {
)
}
CacheValueType::Team => {
let mut teams = fetch_json::<Vec<Vec<TeamMember>>>(
let mut teams = fetch_many_batched::<Vec<TeamMember>>(
Method::GET,
&format!(
"{MODRINTH_API_URL_V3}teams?ids={}",
serde_json::to_string(&keys)?
),
None,
None,
MODRINTH_API_URL_V3,
"teams?ids=",
&keys,
fetch_semaphore,
pool,
)
@@ -980,14 +994,11 @@ impl CachedEntry {
values
}
CacheValueType::Organization => {
let mut orgs = fetch_json::<Vec<Organization>>(
let mut orgs = fetch_many_batched::<Organization>(
Method::GET,
&format!(
"{MODRINTH_API_URL_V3}organizations?ids={}",
serde_json::to_string(&keys)?
),
None,
None,
MODRINTH_API_URL_V3,
"organizations?ids=",
&keys,
fetch_semaphore,
pool,
)
@@ -1064,8 +1075,6 @@ impl CachedEntry {
false,
));
println!("found hash {hash} {version_id} {project_id}");
vals.push((
CacheValue::File(CachedFile {
hash,
@@ -1307,7 +1316,6 @@ impl CachedEntry {
false,
));
println!("found update {hash} {game_version} {loader} {version_id}");
vals.push((
CacheValue::FileUpdate(CachedFileUpdate {
hash: hash.clone(),
@@ -1372,7 +1380,7 @@ impl CachedEntry {
})
}
async fn upsert_many(
pub(crate) async fn upsert_many(
items: &[Self],
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<()> {
@@ -1402,6 +1410,25 @@ impl CachedEntry {
Ok(())
}
pub async fn purge_cache_types(
cache_types: &[CacheValueType],
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<()> {
let cache_types = serde_json::to_string(&cache_types)?;
sqlx::query!(
"
DELETE FROM cache
WHERE data_type IN (SELECT value FROM json_each($1))
",
cache_types,
)
.execute(exec)
.await?;
Ok(())
}
}
pub async fn cache_file_hash(

View File

@@ -1,7 +1,11 @@
//! Theseus directory information
use crate::state::{JavaVersion, Settings};
use crate::event::emit::{emit_loading, init_loading};
use crate::state::{JavaVersion, Profile, Settings};
use crate::util::fetch::IoSemaphore;
use crate::LoadingBarType;
use dashmap::DashSet;
use std::path::{Path, PathBuf};
use std::sync::Arc;
use tokio::fs;
pub const CACHES_FOLDER_NAME: &str = "caches";
@@ -11,7 +15,7 @@ pub const METADATA_FOLDER_NAME: &str = "meta";
#[derive(Debug)]
pub struct DirectoryInfo {
pub settings_dir: PathBuf, // Base settings directory- settings.json and icon cache.
pub settings_dir: PathBuf, // Base settings directory- app database
pub config_dir: PathBuf, // Base config directory- instances, minecraft downloads, etc. Changeable as a setting.
}
@@ -153,7 +157,7 @@ impl DirectoryInfo {
/// Get the cache directory for Theseus
#[inline]
pub fn caches_dir(&self) -> PathBuf {
self.settings_dir.join(CACHES_FOLDER_NAME)
self.config_dir.join(CACHES_FOLDER_NAME)
}
/// Get path from environment variable
@@ -162,6 +166,7 @@ impl DirectoryInfo {
std::env::var_os(name).map(PathBuf::from)
}
#[tracing::instrument(skip(settings, exec, io_semaphore))]
pub async fn move_launcher_directory<'a, E>(
settings: &mut Settings,
exec: E,
@@ -170,13 +175,14 @@ impl DirectoryInfo {
where
E: sqlx::Executor<'a, Database = sqlx::Sqlite> + Copy,
{
let app_dir = DirectoryInfo::get_initial_settings_dir().ok_or(
crate::ErrorKind::FSError(
"Could not find valid config dir".to_string(),
),
)?;
if let Some(ref prev_custom_dir) = settings.prev_custom_dir {
let prev_dir = PathBuf::from(prev_custom_dir);
let app_dir = DirectoryInfo::get_initial_settings_dir().ok_or(
crate::ErrorKind::FSError(
"Could not find valid config dir".to_string(),
),
)?;
let move_dir = settings
.custom_dir
@@ -203,70 +209,267 @@ impl DirectoryInfo {
}
}
async fn move_directory(
source: &Path,
destination: &Path,
io_semaphore: &IoSemaphore,
) -> crate::Result<()> {
if !source.exists() {
crate::util::io::create_dir_all(source).await?;
}
if !destination.exists() {
crate::util::io::create_dir_all(destination).await?;
}
for entry_path in
crate::pack::import::get_all_subfiles(source).await?
fn is_same_disk(
old_dir: &Path,
new_dir: &Path,
) -> crate::Result<bool> {
#[cfg(unix)]
{
let relative_path = entry_path.strip_prefix(source)?;
let new_path = destination.join(relative_path);
crate::util::fetch::copy(
&entry_path,
&new_path,
io_semaphore,
)
.await?;
use std::os::unix::fs::MetadataExt;
Ok(old_dir.metadata()?.dev() == new_dir.metadata()?.dev())
}
Ok(())
#[cfg(windows)]
{
let old_dir = crate::util::io::canonicalize(old_dir)?;
let new_dir = crate::util::io::canonicalize(new_dir)?;
let old_component = old_dir.components().next();
let new_component = new_dir.components().next();
match (old_component, new_component) {
(
Some(std::path::Component::Prefix(old)),
Some(std::path::Component::Prefix(new)),
) => Ok(old.as_os_str() == new.as_os_str()),
_ => Ok(false),
}
}
}
fn get_disk_usage(path: &Path) -> crate::Result<Option<u64>> {
let path = crate::util::io::canonicalize(path)?;
let disks = sysinfo::Disks::new_with_refreshed_list();
for disk in disks.iter() {
if path.starts_with(disk.mount_point()) {
return Ok(Some(disk.available_space()));
}
}
Ok(None)
}
let new_dir = move_dir.to_string_lossy().to_string();
if prev_dir != move_dir {
if !is_dir_writeable(&move_dir).await? {
settings.custom_dir = Some(prev_custom_dir.clone());
let loader_bar_id = init_loading(
LoadingBarType::DirectoryMove {
old: prev_dir.clone(),
new: move_dir.clone(),
},
100.0,
"Moving launcher directory",
)
.await?;
return Ok(());
if !is_dir_writeable(&move_dir).await? {
return Err(crate::ErrorKind::DirectoryMoveError(format!("Cannot move directory to {}: directory is not writeable", move_dir.display())).into());
}
move_directory(
&prev_dir.join(CACHES_FOLDER_NAME),
&app_dir.join(CACHES_FOLDER_NAME),
io_semaphore,
)
.await?;
move_directory(
&prev_dir.join(LAUNCHER_LOGS_FOLDER_NAME),
&app_dir.join(LAUNCHER_LOGS_FOLDER_NAME),
io_semaphore,
)
.await?;
const MOVE_DIRS: &[&str] = &[
CACHES_FOLDER_NAME,
PROFILES_FOLDER_NAME,
METADATA_FOLDER_NAME,
];
move_directory(
&prev_dir.join(PROFILES_FOLDER_NAME),
&move_dir.join(PROFILES_FOLDER_NAME),
io_semaphore,
)
.await?;
move_directory(
&prev_dir.join(METADATA_FOLDER_NAME),
&move_dir.join(METADATA_FOLDER_NAME),
io_semaphore,
)
.await?;
struct MovePath {
old: PathBuf,
new: PathBuf,
size: u64,
}
async fn add_paths(
source: &Path,
destination: &Path,
paths: &mut Vec<MovePath>,
total_size: &mut u64,
) -> crate::Result<()> {
if !source.exists() {
crate::util::io::create_dir_all(source).await?;
}
if !destination.exists() {
crate::util::io::create_dir_all(destination).await?;
}
for entry_path in
crate::pack::import::get_all_subfiles(source, false)
.await?
{
let relative_path = entry_path.strip_prefix(source)?;
let new_path = destination.join(relative_path);
let path_size =
entry_path.metadata().map(|x| x.len()).unwrap_or(0);
*total_size += path_size;
paths.push(MovePath {
old: entry_path,
new: new_path,
size: path_size,
});
}
Ok(())
}
let mut paths: Vec<MovePath> = vec![];
let mut total_size = 0;
for dir in MOVE_DIRS {
add_paths(
&prev_dir.join(dir),
&move_dir.join(dir),
&mut paths,
&mut total_size,
)
.await?;
emit_loading(
&loader_bar_id,
10.0 / (MOVE_DIRS.len() as f64),
None,
)
.await?;
}
let paths_len = paths.len();
if is_same_disk(&prev_dir, &move_dir).unwrap_or(false) {
let success_idxs = Arc::new(DashSet::new());
let loader_bar_id = Arc::new(&loader_bar_id);
let res =
futures::future::try_join_all(paths.iter().enumerate().map(|(idx, x)| {
let loader_bar_id = loader_bar_id.clone();
let success_idxs = success_idxs.clone();
async move {
let _permit = io_semaphore.0.acquire().await?;
if let Some(parent) = x.new.parent() {
crate::util::io::create_dir_all(parent).await.map_err(|e| {
crate::Error::from(crate::ErrorKind::DirectoryMoveError(
format!(
"Failed to create directory {}: {}",
parent.display(),
e
)
))
})?;
}
crate::util::io::rename(
&x.old,
&x.new,
)
.await
.map_err(|e| {
crate::Error::from(crate::ErrorKind::DirectoryMoveError(
format!(
"Failed to move directory from {} to {}: {}",
x.old.display(),
x.new.display(),
e
),
))
})?;
let _ = emit_loading(
&loader_bar_id,
90.0 / paths_len as f64,
None,
)
.await;
success_idxs.insert(idx);
Ok::<(), crate::Error>(())
}
}))
.await;
if let Err(e) = res {
for idx in success_idxs.iter() {
let path = &paths[*idx.key()];
let res =
tokio::fs::rename(&path.new, &path.old).await;
if let Err(e) = res {
tracing::warn!(
"Failed to rollback directory {}: {}",
path.new.display(),
e
);
}
}
return Err(e);
}
} else {
if let Some(disk_usage) = get_disk_usage(&move_dir)? {
if total_size > disk_usage {
return Err(crate::ErrorKind::DirectoryMoveError(format!("Not enough space to move directory to {}: only {} bytes available", app_dir.display(), disk_usage)).into());
}
}
let loader_bar_id = Arc::new(&loader_bar_id);
futures::future::try_join_all(paths.iter().map(|x| {
let loader_bar_id = loader_bar_id.clone();
async move {
crate::util::fetch::copy(
&x.old,
&x.new,
io_semaphore,
)
.await.map_err(|e| { crate::Error::from(
crate::ErrorKind::DirectoryMoveError(format!("Failed to move directory from {} to {}: {}", x.old.display(), x.new.display(), e)))
})?;
let _ = emit_loading(
&loader_bar_id,
((x.size as f64) / (total_size as f64)) * 60.0,
None,
)
.await;
Ok::<(), crate::Error>(())
}
}))
.await?;
futures::future::join_all(paths.iter().map(|x| {
let loader_bar_id = loader_bar_id.clone();
async move {
let res = async {
let _permit = io_semaphore.0.acquire().await?;
crate::util::io::remove_file(&x.old).await?;
emit_loading(
&loader_bar_id,
30.0 / paths_len as f64,
None,
)
.await?;
Ok::<(), crate::Error>(())
};
if let Err(e) = res.await {
tracing::warn!(
"Failed to remove old file {}: {}",
x.old.display(),
e
);
}
}
}))
.await;
}
let java_versions = JavaVersion::get_all(exec).await?;
for (_, mut java_version) in java_versions {
@@ -274,16 +477,42 @@ impl DirectoryInfo {
prev_custom_dir,
new_dir.trim_end_matches('/').trim_end_matches('\\'),
);
java_version.upsert(exec).await?;
java_version.upsert(exec).await?
}
let profiles = Profile::get_all(exec).await?;
for mut profile in profiles {
profile.icon_path = profile.icon_path.map(|x| {
x.replace(
prev_custom_dir,
new_dir
.trim_end_matches('/')
.trim_end_matches('\\'),
)
});
profile.java_path = profile.java_path.map(|x| {
x.replace(
prev_custom_dir,
new_dir
.trim_end_matches('/')
.trim_end_matches('\\'),
)
});
profile.upsert(exec).await?;
}
}
settings.custom_dir = Some(new_dir.clone());
settings.prev_custom_dir = Some(new_dir);
settings.update(exec).await?;
settings.custom_dir = Some(new_dir);
}
settings.prev_custom_dir.clone_from(&settings.custom_dir);
if settings.custom_dir.is_none() {
settings.custom_dir = Some(app_dir.to_string_lossy().to_string());
}
settings.update(exec).await?;
Ok(())
}
}

View File

@@ -6,7 +6,7 @@ use discord_rich_presence::{
};
use tokio::sync::RwLock;
use crate::state::{Process, Profile};
use crate::state::Profile;
use crate::State;
pub struct DiscordGuard {
@@ -17,8 +17,8 @@ pub struct DiscordGuard {
impl DiscordGuard {
/// Initialize discord IPC client, and attempt to connect to it
/// If it fails, it will still return a DiscordGuard, but the client will be unconnected
pub async fn init() -> crate::Result<DiscordGuard> {
let mut dipc =
pub fn init() -> crate::Result<DiscordGuard> {
let dipc =
DiscordIpcClient::new("1123683254248148992").map_err(|e| {
crate::ErrorKind::OtherError(format!(
"Could not create Discord client {}",
@@ -26,15 +26,10 @@ impl DiscordGuard {
))
})?;
let res = dipc.connect(); // Do not need to connect to Discord to use app
let connected = if res.is_ok() {
Arc::new(AtomicBool::new(true))
} else {
Arc::new(AtomicBool::new(false))
};
let client = Arc::new(RwLock::new(dipc));
Ok(DiscordGuard { client, connected })
Ok(DiscordGuard {
client: Arc::new(RwLock::new(dipc)),
connected: Arc::new(AtomicBool::new(false)),
})
}
/// If the client failed connecting during init(), this will check for connection and attempt to reconnect
@@ -172,7 +167,7 @@ impl DiscordGuard {
return self.clear_activity(true).await;
}
let running_profiles = Process::get_all(&state.pool).await?;
let running_profiles = state.process_manager.get_all();
if let Some(existing_child) = running_profiles.first() {
let prof =
Profile::get(&existing_child.profile_path, &state.pool).await?;

View File

@@ -88,7 +88,7 @@ pub(crate) async fn watch_profiles_init(
watcher: &FileWatcher,
dirs: &DirectoryInfo,
) -> crate::Result<()> {
if let Ok(profiles_dir) = std::fs::read_dir(&dirs.profiles_dir()) {
if let Ok(profiles_dir) = std::fs::read_dir(dirs.profiles_dir()) {
for profile_dir in profiles_dir {
if let Ok(file_name) = profile_dir.map(|x| x.file_name()) {
if let Some(file_name) = file_name.to_str() {
@@ -112,18 +112,20 @@ pub(crate) async fn watch_profile(
) -> crate::Result<()> {
let profile_path = dirs.profiles_dir().join(profile_path);
for folder in ProjectType::iterator()
.map(|x| x.get_folder())
.chain(["crash-reports"])
{
let path = profile_path.join(folder);
if profile_path.exists() && profile_path.is_dir() {
for folder in ProjectType::iterator()
.map(|x| x.get_folder())
.chain(["crash-reports"])
{
let path = profile_path.join(folder);
if !path.exists() {
crate::util::io::create_dir_all(&path).await?;
if !path.exists() {
crate::util::io::create_dir_all(&path).await?;
}
let mut watcher = watcher.write().await;
watcher.watcher().watch(&path, RecursiveMode::Recursive)?;
}
let mut watcher = watcher.write().await;
watcher.watcher().watch(&path, RecursiveMode::Recursive)?;
}
Ok(())

View File

@@ -1,11 +1,13 @@
use crate::data::DirectoryInfo;
use crate::data::{Dependency, User, Version};
use crate::jre::check_jre;
use crate::prelude::ModLoader;
use crate::state;
use crate::state::{
Credentials, DefaultPage, DeviceToken, DeviceTokenKey, DeviceTokenPair,
Hooks, LinkedData, MemorySettings, ModrinthCredentials, Profile,
ProfileInstallStage, Theme, WindowSize,
CacheValue, CachedEntry, CachedFile, CachedFileHash, CachedFileUpdate,
Credentials, DefaultPage, DependencyType, DeviceToken, DeviceTokenKey,
DeviceTokenPair, FileType, Hooks, LinkedData, MemorySettings,
ModrinthCredentials, Profile, ProfileInstallStage, TeamMember, Theme,
VersionFile, WindowSize,
};
use crate::util::fetch::{read_json, IoSemaphore};
use chrono::{DateTime, Utc};
@@ -34,18 +36,6 @@ where
};
let old_launcher_root_str = old_launcher_root.to_string_lossy().to_string();
let new_launcher_root = DirectoryInfo::get_initial_settings_dir().ok_or(
crate::ErrorKind::FSError(
"Could not find valid config dir".to_string(),
),
)?;
let new_launcher_root_str = new_launcher_root
.to_string_lossy()
.to_string()
.trim_end_matches('/')
.trim_end_matches('\\')
.to_string();
let io_semaphore = IoSemaphore(Semaphore::new(10));
let settings_path = old_launcher_root.join("settings.json");
@@ -95,13 +85,9 @@ where
settings.prev_custom_dir = Some(old_launcher_root_str.clone());
for (_, legacy_version) in legacy_settings.java_globals.0 {
if let Ok(Some(mut java_version)) =
if let Ok(Some(java_version)) =
check_jre(PathBuf::from(legacy_version.path)).await
{
java_version.path = java_version
.path
.replace(&old_launcher_root_str, &new_launcher_root_str);
java_version.upsert(exec).await?;
}
}
@@ -175,127 +161,239 @@ where
}
}
let mut cached_entries = vec![];
if let Ok(profiles_dir) = std::fs::read_dir(
&legacy_settings
legacy_settings
.loaded_config_dir
.unwrap_or(old_launcher_root)
.clone()
.unwrap_or_else(|| old_launcher_root.clone())
.join("profiles"),
) {
for entry in profiles_dir.flatten() {
if entry.path().is_dir() {
let profile_path = entry.path().join("profile.json");
if !entry.path().is_dir() {
continue;
}
if let Ok(profile) =
read_json::<LegacyProfile>(&profile_path, &io_semaphore)
.await
let profile_path = entry.path().join("profile.json");
let profile = if let Ok(profile) =
read_json::<LegacyProfile>(&profile_path, &io_semaphore)
.await
{
profile
} else {
continue;
};
for (path, project) in profile.projects {
let full_path = legacy_settings
.loaded_config_dir
.clone()
.unwrap_or_else(|| old_launcher_root.clone())
.join("profiles")
.join(&profile.path)
.join(&path);
if !full_path.exists() || !full_path.is_file() {
continue;
}
let sha512 = project.sha512;
if let LegacyProjectMetadata::Modrinth {
version,
members,
update_version,
..
} = project.metadata
{
Profile {
path: profile.path,
install_stage: match profile.install_stage {
LegacyProfileInstallStage::Installed => {
ProfileInstallStage::Installed
}
LegacyProfileInstallStage::Installing => {
ProfileInstallStage::Installing
}
LegacyProfileInstallStage::PackInstalling => {
ProfileInstallStage::PackInstalling
}
LegacyProfileInstallStage::NotInstalled => {
ProfileInstallStage::NotInstalled
}
},
name: profile.metadata.name,
icon_path: profile.metadata.icon.map(|x| {
x.replace(
&old_launcher_root_str,
&new_launcher_root_str,
)
}),
game_version: profile.metadata.game_version,
loader: match profile.metadata.loader {
LegacyModLoader::Vanilla => ModLoader::Vanilla,
LegacyModLoader::Forge => ModLoader::Forge,
LegacyModLoader::Fabric => ModLoader::Fabric,
LegacyModLoader::Quilt => ModLoader::Quilt,
LegacyModLoader::NeoForge => {
ModLoader::NeoForge
}
},
loader_version: profile
.metadata
.loader_version
.map(|x| x.id),
groups: profile.metadata.groups,
linked_data: profile.metadata.linked_data.and_then(
|x| {
if let Some(project_id) = x.project_id {
if let Some(version_id) = x.version_id {
if let Some(locked) = x.locked {
return Some(LinkedData {
project_id,
version_id,
locked,
});
}
}
}
if let Some(file) = version
.files
.iter()
.find(|x| x.hashes.get("sha512") == Some(&sha512))
{
if let Some(sha1) = file.hashes.get("sha1") {
if let Ok(metadata) = full_path.metadata() {
let file_name = format!(
"{}/{}",
profile.path,
path.replace("\\", "/")
.replace(".disabled", "")
);
None
},
),
created: profile.metadata.date_created,
modified: profile.metadata.date_modified,
last_played: profile.metadata.last_played,
submitted_time_played: profile
.metadata
.submitted_time_played,
recent_time_played: profile
.metadata
.recent_time_played,
java_path: profile.java.as_ref().and_then(|x| {
x.override_version.clone().map(|x| {
x.path.replace(
&old_launcher_root_str,
&new_launcher_root_str,
)
})
}),
extra_launch_args: profile
.java
.as_ref()
.and_then(|x| x.extra_arguments.clone()),
custom_env_vars: profile
.java
.and_then(|x| x.custom_env_args),
memory: profile
.memory
.map(|x| MemorySettings { maximum: x.maximum }),
force_fullscreen: profile.fullscreen,
game_resolution: profile
.resolution
.map(|x| WindowSize(x.0, x.1)),
hooks: Hooks {
pre_launch: profile
.hooks
.as_ref()
.and_then(|x| x.pre_launch.clone()),
wrapper: profile
.hooks
.as_ref()
.and_then(|x| x.wrapper.clone()),
post_exit: profile
.hooks
.and_then(|x| x.post_exit),
},
cached_entries.push(CacheValue::FileHash(
CachedFileHash {
path: file_name,
size: metadata.len(),
hash: sha1.clone(),
},
));
}
cached_entries.push(CacheValue::File(
CachedFile {
hash: sha1.clone(),
project_id: version.project_id.clone(),
version_id: version.id.clone(),
},
));
if let Some(update_version) = update_version {
let mod_loader: ModLoader =
profile.metadata.loader.into();
cached_entries.push(
CacheValue::FileUpdate(
CachedFileUpdate {
hash: sha1.clone(),
game_version: profile
.metadata
.game_version
.clone(),
loader: mod_loader
.as_str()
.to_string(),
update_version_id:
update_version.id.clone(),
},
),
);
cached_entries.push(CacheValue::Version(
(*update_version).into(),
));
}
let members = members
.into_iter()
.map(|x| {
let user = User {
id: x.user.id,
username: x.user.username,
avatar_url: x.user.avatar_url,
bio: x.user.bio,
created: x.user.created,
role: x.user.role,
};
cached_entries.push(CacheValue::User(
user.clone(),
));
TeamMember {
team_id: x.team_id,
user: user.clone(),
is_owner: x.role == "Owner",
role: x.role,
ordering: x.ordering,
}
})
.collect::<Vec<_>>();
cached_entries.push(CacheValue::Team(members));
cached_entries.push(CacheValue::Version(
(*version).into(),
));
}
}
.upsert(exec)
.await?;
}
}
Profile {
path: profile.path,
install_stage: match profile.install_stage {
LegacyProfileInstallStage::Installed => {
ProfileInstallStage::Installed
}
LegacyProfileInstallStage::Installing => {
ProfileInstallStage::Installing
}
LegacyProfileInstallStage::PackInstalling => {
ProfileInstallStage::PackInstalling
}
LegacyProfileInstallStage::NotInstalled => {
ProfileInstallStage::NotInstalled
}
},
name: profile.metadata.name,
icon_path: profile.metadata.icon,
game_version: profile.metadata.game_version,
loader: profile.metadata.loader.into(),
loader_version: profile
.metadata
.loader_version
.map(|x| x.id),
groups: profile.metadata.groups,
linked_data: profile.metadata.linked_data.and_then(|x| {
if let Some(project_id) = x.project_id {
if let Some(version_id) = x.version_id {
if let Some(locked) = x.locked {
return Some(LinkedData {
project_id,
version_id,
locked,
});
}
}
}
None
}),
created: profile.metadata.date_created,
modified: profile.metadata.date_modified,
last_played: profile.metadata.last_played,
submitted_time_played: profile
.metadata
.submitted_time_played,
recent_time_played: profile.metadata.recent_time_played,
java_path: profile.java.as_ref().and_then(|x| {
x.override_version.clone().map(|x| x.path)
}),
extra_launch_args: profile
.java
.as_ref()
.and_then(|x| x.extra_arguments.clone()),
custom_env_vars: profile
.java
.and_then(|x| x.custom_env_args),
memory: profile
.memory
.map(|x| MemorySettings { maximum: x.maximum }),
force_fullscreen: profile.fullscreen,
game_resolution: profile
.resolution
.map(|x| WindowSize(x.0, x.1)),
hooks: Hooks {
pre_launch: profile
.hooks
.as_ref()
.and_then(|x| x.pre_launch.clone()),
wrapper: profile
.hooks
.as_ref()
.and_then(|x| x.wrapper.clone()),
post_exit: profile.hooks.and_then(|x| x.post_exit),
},
}
.upsert(exec)
.await?;
}
}
CachedEntry::upsert_many(
&cached_entries
.into_iter()
.map(|x| {
let mut entry = x.get_entry();
entry.expires =
Utc::now().timestamp() - entry.type_.expiry();
entry
})
.collect::<Vec<_>>(),
exec,
)
.await?;
settings.migrated = true;
settings.update(exec).await?;
}
@@ -384,6 +482,12 @@ struct LegacyJavaVersion {
#[derive(Deserialize, Clone, Debug)]
struct LegacyModrinthUser {
pub id: String,
pub username: String,
// pub name: Option<String>,
pub avatar_url: Option<String>,
pub bio: Option<String>,
pub created: DateTime<Utc>,
pub role: String,
}
#[derive(Deserialize, Clone, Debug)]
@@ -439,6 +543,195 @@ struct LegacyProfile {
pub resolution: Option<LegacyWindowSize>,
pub fullscreen: Option<bool>,
pub hooks: Option<LegacyHooks>,
pub projects: HashMap<String, LegacyProject>,
}
#[derive(Deserialize, Clone, Debug)]
struct LegacyProject {
pub sha512: String,
// pub disabled: bool,
pub metadata: LegacyProjectMetadata,
// pub file_name: String,
}
#[derive(Deserialize, Clone, Debug)]
#[serde(tag = "type", rename_all = "snake_case")]
enum LegacyProjectMetadata {
Modrinth {
// project: Box<LegacyModrinthProject>,
version: Box<LegacyModrinthVersion>,
members: Vec<LegacyModrinthTeamMember>,
update_version: Option<Box<LegacyModrinthVersion>>,
},
Inferred,
Unknown,
}
// #[derive(Deserialize, Clone, Debug)]
// struct LegacyModrinthProject {
// pub id: String,
// pub slug: Option<String>,
// pub project_type: String,
// pub team: String,
// pub title: String,
// pub description: String,
// pub body: String,
//
// pub published: DateTime<Utc>,
// pub updated: DateTime<Utc>,
//
// pub client_side: LegacySideType,
// pub server_side: LegacySideType,
//
// pub downloads: u32,
// pub followers: u32,
//
// pub categories: Vec<String>,
// pub additional_categories: Vec<String>,
// pub game_versions: Vec<String>,
// pub loaders: Vec<String>,
//
// pub versions: Vec<String>,
//
// pub icon_url: Option<String>,
// }
#[derive(Deserialize, Clone, Debug)]
struct LegacyModrinthVersion {
pub id: String,
pub project_id: String,
pub author_id: String,
pub featured: bool,
pub name: String,
pub version_number: String,
pub changelog: String,
pub changelog_url: Option<String>,
pub date_published: DateTime<Utc>,
pub downloads: u32,
pub version_type: String,
pub files: Vec<LegacyModrinthVersionFile>,
pub dependencies: Vec<LegacyDependency>,
pub game_versions: Vec<String>,
pub loaders: Vec<String>,
}
impl From<LegacyModrinthVersion> for Version {
fn from(value: LegacyModrinthVersion) -> Self {
Version {
id: value.id,
project_id: value.project_id,
author_id: value.author_id,
featured: value.featured,
name: value.name,
version_number: value.version_number,
changelog: value.changelog,
changelog_url: value.changelog_url,
date_published: value.date_published,
downloads: value.downloads,
version_type: value.version_type,
files: value
.files
.into_iter()
.map(|x| VersionFile {
hashes: x.hashes,
url: x.url,
filename: x.filename,
primary: x.primary,
size: x.size,
file_type: x.file_type.map(|x| match x {
LegacyFileType::RequiredResourcePack => {
FileType::RequiredResourcePack
}
LegacyFileType::OptionalResourcePack => {
FileType::OptionalResourcePack
}
LegacyFileType::Unknown => FileType::Unknown,
}),
})
.collect::<Vec<_>>(),
dependencies: value
.dependencies
.into_iter()
.map(|x| Dependency {
version_id: x.version_id,
project_id: x.project_id,
file_name: x.file_name,
dependency_type: match x.dependency_type {
LegacyDependencyType::Required => {
DependencyType::Required
}
LegacyDependencyType::Optional => {
DependencyType::Optional
}
LegacyDependencyType::Incompatible => {
DependencyType::Incompatible
}
LegacyDependencyType::Embedded => {
DependencyType::Embedded
}
},
})
.collect::<Vec<_>>(),
game_versions: value.game_versions,
loaders: value.loaders,
}
}
}
#[derive(Deserialize, Clone, Debug)]
struct LegacyModrinthVersionFile {
pub hashes: HashMap<String, String>,
pub url: String,
pub filename: String,
pub primary: bool,
pub size: u32,
pub file_type: Option<LegacyFileType>,
}
#[derive(Deserialize, Clone, Debug)]
struct LegacyDependency {
pub version_id: Option<String>,
pub project_id: Option<String>,
pub file_name: Option<String>,
pub dependency_type: LegacyDependencyType,
}
#[derive(Deserialize, Clone, Debug)]
struct LegacyModrinthTeamMember {
pub team_id: String,
pub user: LegacyModrinthUser,
pub role: String,
pub ordering: i64,
}
#[derive(Deserialize, Copy, Clone, Debug)]
#[serde(rename_all = "lowercase")]
enum LegacyDependencyType {
Required,
Optional,
Incompatible,
Embedded,
}
// #[derive(Deserialize, Clone, Debug, Eq, PartialEq)]
// #[serde(rename_all = "kebab-case")]
// enum LegacySideType {
// Required,
// Optional,
// Unsupported,
// Unknown,
// }
#[derive(Deserialize, Copy, Clone, Debug)]
#[serde(rename_all = "kebab-case")]
enum LegacyFileType {
RequiredResourcePack,
OptionalResourcePack,
Unknown,
}
#[derive(Deserialize, Clone, Debug)]
@@ -477,6 +770,18 @@ enum LegacyModLoader {
NeoForge,
}
impl From<LegacyModLoader> for ModLoader {
fn from(value: LegacyModLoader) -> Self {
match value {
LegacyModLoader::Vanilla => ModLoader::Vanilla,
LegacyModLoader::Forge => ModLoader::Forge,
LegacyModLoader::Fabric => ModLoader::Fabric,
LegacyModLoader::Quilt => ModLoader::Quilt,
LegacyModLoader::NeoForge => ModLoader::NeoForge,
}
}
}
#[derive(Deserialize, Clone, Debug)]
struct LegacyLinkedData {
pub project_id: Option<String>,

View File

@@ -1,8 +1,4 @@
//! Theseus state management system
use crate::event::emit::{emit_loading, init_loading_unsafe};
use crate::event::LoadingBarType;
use crate::util::fetch::{FetchSemaphore, IoSemaphore};
use std::sync::Arc;
use tokio::sync::{OnceCell, Semaphore};
@@ -35,7 +31,7 @@ pub use self::minecraft_auth::*;
mod cache;
pub use self::cache::*;
mod db;
pub mod db;
pub mod fs_watcher;
mod mr_auth;
@@ -61,6 +57,9 @@ pub struct State {
/// Discord RPC
pub discord_rpc: DiscordGuard,
/// Process manager
pub process_manager: ProcessManager,
pub(crate) pool: SqlitePool,
pub(crate) file_watcher: FileWatcher,
@@ -72,7 +71,13 @@ impl State {
.get_or_try_init(Self::initialize_state)
.await?;
Process::garbage_collect(&state.pool).await?;
tokio::task::spawn(async move {
let res = state.discord_rpc.clear_to_default(true).await;
if let Err(e) = res {
tracing::error!("Error running discord RPC: {e}");
}
});
Ok(())
}
@@ -94,13 +99,6 @@ impl State {
#[tracing::instrument]
async fn initialize_state() -> crate::Result<Arc<Self>> {
let loading_bar = init_loading_unsafe(
LoadingBarType::StateInit,
100.0,
"Initializing launcher",
)
.await?;
let pool = db::connect().await?;
legacy_converter::migrate_legacy_data(&pool).await?;
@@ -120,29 +118,20 @@ impl State {
&io_semaphore,
)
.await?;
let directories = DirectoryInfo::init(settings.custom_dir).await?;
emit_loading(&loading_bar, 10.0, None).await?;
let discord_rpc = DiscordGuard::init().await?;
if settings.discord_rpc {
// Add default Idling to discord rich presence
// Force add to avoid recursion
let _ = discord_rpc.force_set_activity("Idling...", true).await;
}
let discord_rpc = DiscordGuard::init()?;
let file_watcher = fs_watcher::init_watcher().await?;
fs_watcher::watch_profiles_init(&file_watcher, &directories).await?;
emit_loading(&loading_bar, 10.0, None).await?;
Ok(Arc::new(Self {
directories,
fetch_semaphore,
io_semaphore,
api_semaphore,
discord_rpc,
process_manager: ProcessManager::new(),
pool,
file_watcher,
}))

View File

@@ -1,136 +1,134 @@
use chrono::{DateTime, Utc};
use serde::Deserialize;
use serde::Serialize;
use tokio::process::Command;
use crate::event::emit::emit_process;
use crate::event::ProcessPayloadType;
use crate::profile;
use crate::util::io::IOError;
use crate::{profile, ErrorKind};
use chrono::{DateTime, Utc};
use dashmap::DashMap;
use serde::Deserialize;
use serde::Serialize;
use std::process::ExitStatus;
use tokio::process::{Child, Command};
use uuid::Uuid;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct Process {
pub pid: i64,
pub start_time: i64,
pub name: String,
pub executable: String,
pub profile_path: String,
pub post_exit_command: Option<String>,
pub struct ProcessManager {
processes: DashMap<Uuid, Process>,
}
macro_rules! select_process_with_predicate {
($predicate:tt, $param:ident) => {
sqlx::query_as!(
Process,
r#"
SELECT
pid, start_time, name, executable, profile_path, post_exit_command
FROM processes
"#
+ $predicate,
$param
)
};
impl Default for ProcessManager {
fn default() -> Self {
Self::new()
}
}
impl Process {
/// Runs on launcher startup. Queries all the cached processes and removes processes that no
/// longer exist. If a PID is found, they are "rescued" and passed to our process manager
pub async fn garbage_collect(
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite> + Copy,
) -> crate::Result<()> {
let processes = Self::get_all(exec).await?;
let mut system = sysinfo::System::new();
system.refresh_processes();
for cached_process in processes {
let process = system
.process(sysinfo::Pid::from_u32(cached_process.pid as u32));
if let Some(process) = process {
if cached_process.start_time as u64 == process.start_time()
&& cached_process.name == process.name()
&& cached_process.executable
== process
.exe()
.map(|x| x.to_string_lossy())
.unwrap_or_default()
{
tokio::spawn(cached_process.sequential_process_manager());
break;
}
}
Self::remove(cached_process.pid as u32, exec).await?;
impl ProcessManager {
pub fn new() -> Self {
Self {
processes: DashMap::new(),
}
Ok(())
}
pub async fn insert_new_process(
&self,
profile_path: &str,
mut mc_command: Command,
post_exit_command: Option<String>, // Command to run after minecraft.
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<Self> {
post_exit_command: Option<String>,
) -> crate::Result<ProcessMetadata> {
let mc_proc = mc_command.spawn().map_err(IOError::from)?;
let pid = mc_proc.id().ok_or_else(|| {
crate::ErrorKind::LauncherError(
"Process immediately failed, could not get PID".to_string(),
)
})?;
let mut system = sysinfo::System::new();
system.refresh_processes();
let process =
system.process(sysinfo::Pid::from_u32(pid)).ok_or_else(|| {
crate::ErrorKind::LauncherError(format!(
"Could not find process {}",
pid
))
})?;
let start_time = process.start_time();
let name = process.name().to_string();
let Some(path) = process.exe() else {
return Err(ErrorKind::LauncherError(format!(
"Cached process {} has no accessible path",
pid
))
.into());
let process = Process {
metadata: ProcessMetadata {
uuid: Uuid::new_v4(),
start_time: Utc::now(),
profile_path: profile_path.to_string(),
},
child: mc_proc,
};
let executable = path.to_string_lossy().to_string();
let metadata = process.metadata.clone();
let process = Self {
pid: pid as i64,
start_time: start_time as i64,
name,
executable,
profile_path: profile_path.to_string(),
tokio::spawn(Process::sequential_process_manager(
profile_path.to_string(),
post_exit_command,
};
process.upsert(exec).await?;
metadata.uuid,
));
tokio::spawn(process.clone().sequential_process_manager());
self.processes.insert(process.metadata.uuid, process);
emit_process(
profile_path,
pid,
metadata.uuid,
ProcessPayloadType::Launched,
"Launched Minecraft",
)
.await?;
Ok(process)
Ok(metadata)
}
pub fn get(&self, id: Uuid) -> Option<ProcessMetadata> {
self.processes.get(&id).map(|x| x.metadata.clone())
}
pub fn get_all(&self) -> Vec<ProcessMetadata> {
self.processes
.iter()
.map(|x| x.value().metadata.clone())
.collect()
}
pub fn try_wait(
&self,
id: Uuid,
) -> crate::Result<Option<Option<ExitStatus>>> {
if let Some(mut process) = self.processes.get_mut(&id) {
Ok(Some(process.child.try_wait()?))
} else {
Ok(None)
}
}
pub async fn wait_for(&self, id: Uuid) -> crate::Result<()> {
if let Some(mut process) = self.processes.get_mut(&id) {
process.child.wait().await?;
}
Ok(())
}
pub async fn kill(&self, id: Uuid) -> crate::Result<()> {
if let Some(mut process) = self.processes.get_mut(&id) {
process.child.kill().await?;
}
Ok(())
}
fn remove(&self, id: Uuid) {
self.processes.remove(&id);
}
}
#[derive(Debug, Deserialize, Serialize, Clone)]
pub struct ProcessMetadata {
pub uuid: Uuid,
pub profile_path: String,
pub start_time: DateTime<Utc>,
}
#[derive(Debug)]
struct Process {
metadata: ProcessMetadata,
child: Child,
}
impl Process {
// Spawns a new child process and inserts it into the hashmap
// Also, as the process ends, it spawns the follow-up process if it exists
// By convention, ExitStatus is last command's exit status, and we exit on the first non-zero exit status
async fn sequential_process_manager(self) -> crate::Result<i32> {
async fn sequential_process_manager(
profile_path: String,
post_exit_command: Option<String>,
uuid: Uuid,
) -> crate::Result<()> {
async fn update_playtime(
last_updated_playtime: &mut DateTime<Utc>,
profile_path: &str,
@@ -160,207 +158,79 @@ impl Process {
let mc_exit_status;
let mut last_updated_playtime = Utc::now();
let state = crate::State::get().await?;
loop {
if let Some(t) = self.try_wait().await? {
mc_exit_status = t;
if let Some(process) = state.process_manager.try_wait(uuid)? {
if let Some(t) = process {
mc_exit_status = t;
break;
}
} else {
mc_exit_status = ExitStatus::default();
break;
}
// sleep for 10ms
tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
// Auto-update playtime every minute
update_playtime(
&mut last_updated_playtime,
&self.profile_path,
false,
)
.await;
update_playtime(&mut last_updated_playtime, &profile_path, false)
.await;
}
state.process_manager.remove(uuid);
emit_process(
&profile_path,
uuid,
ProcessPayloadType::Finished,
"Exited process",
)
.await?;
// Now fully complete- update playtime one last time
update_playtime(&mut last_updated_playtime, &self.profile_path, true)
.await;
update_playtime(&mut last_updated_playtime, &profile_path, true).await;
// Publish play time update
// Allow failure, it will be stored locally and sent next time
// Sent in another thread as first call may take a couple seconds and hold up process ending
let profile_path = self.profile_path.clone();
let profile = profile_path.clone();
tokio::spawn(async move {
if let Err(e) =
profile::try_update_playtime(&profile_path.clone()).await
{
if let Err(e) = profile::try_update_playtime(&profile).await {
tracing::warn!(
"Failed to update playtime for profile {}: {}",
&profile_path,
profile,
e
);
}
});
let state = crate::State::get().await?;
let _ = state.discord_rpc.clear_to_default(true).await;
Self::remove(self.pid as u32, &state.pool).await?;
// If in tauri, window should show itself again after process exists if it was hidden
#[cfg(feature = "tauri")]
{
let window = crate::EventState::get_main_window().await?;
if let Some(window) = window {
window.unminimize()?;
window.set_focus()?;
}
}
if mc_exit_status == 0 {
if mc_exit_status.success() {
// We do not wait on the post exist command to finish running! We let it spawn + run on its own.
// This behaviour may be changed in the future
if let Some(hook) = self.post_exit_command {
if let Some(hook) = post_exit_command {
let mut cmd = hook.split(' ');
if let Some(command) = cmd.next() {
let mut command = Command::new(command);
command.args(&cmd.collect::<Vec<&str>>()).current_dir(
crate::api::profile::get_full_path(&self.profile_path)
.await?,
command.args(cmd.collect::<Vec<&str>>()).current_dir(
profile::get_full_path(&profile_path).await?,
);
command.spawn().map_err(IOError::from)?;
}
}
}
emit_process(
&self.profile_path,
self.pid as u32,
ProcessPayloadType::Finished,
"Exited process",
)
.await?;
Ok(mc_exit_status)
}
async fn try_wait(&self) -> crate::Result<Option<i32>> {
let mut system = sysinfo::System::new();
if !system.refresh_process(sysinfo::Pid::from_u32(self.pid as u32)) {
return Ok(Some(0));
}
let process = system.process(sysinfo::Pid::from_u32(self.pid as u32));
if let Some(process) = process {
if process.status() == sysinfo::ProcessStatus::Run {
Ok(None)
} else {
Ok(Some(0))
}
} else {
Ok(Some(0))
}
}
pub async fn wait_for(&self) -> crate::Result<()> {
loop {
if self.try_wait().await?.is_some() {
break;
}
// sleep for 10ms
tokio::time::sleep(tokio::time::Duration::from_millis(50)).await;
}
Ok(())
}
pub async fn kill(&self) -> crate::Result<()> {
let mut system = sysinfo::System::new();
if system.refresh_process(sysinfo::Pid::from_u32(self.pid as u32)) {
let process =
system.process(sysinfo::Pid::from_u32(self.pid as u32));
if let Some(process) = process {
process.kill();
}
}
Ok(())
}
pub async fn get(
pid: i32,
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<Option<Self>> {
let res = select_process_with_predicate!("WHERE pid = $1", pid)
.fetch_optional(exec)
.await?;
Ok(res)
}
pub async fn get_from_profile(
profile_path: &str,
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<Vec<Self>> {
let results = select_process_with_predicate!(
"WHERE profile_path = $1",
profile_path
)
.fetch_all(exec)
.await?;
Ok(results)
}
pub async fn get_all(
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<Vec<Self>> {
let true_val = 1;
let results = select_process_with_predicate!("WHERE 1=$1", true_val)
.fetch_all(exec)
.await?;
Ok(results)
}
pub async fn upsert(
&self,
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<()> {
sqlx::query!(
"
INSERT INTO processes (pid, start_time, name, executable, profile_path, post_exit_command)
VALUES ($1, $2, $3, $4, $5, $6)
ON CONFLICT (pid) DO UPDATE SET
start_time = $2,
name = $3,
executable = $4,
profile_path = $5,
post_exit_command = $6
",
self.pid,
self.start_time,
self.name,
self.executable,
self.profile_path,
self.post_exit_command
)
.execute(exec)
.await?;
Ok(())
}
pub async fn remove(
pid: u32,
exec: impl sqlx::Executor<'_, Database = sqlx::Sqlite>,
) -> crate::Result<()> {
let pid = pid as i32;
sqlx::query!(
"
DELETE FROM processes WHERE pid = $1
",
pid,
)
.execute(exec)
.await?;
Ok(())
}
}