Initial shared instances backend (#3800)

* Create base shared instance migration and initial routes

* Fix build

* Add version uploads

* Add permissions field for shared instance users

* Actually use permissions field

* Add "public" flag to shared instances that allow GETing them without authorization

* Add the ability to get and list shared instance versions

* Add the ability to delete shared instance versions

* Fix build after merge

* Secured file hosting (#3784)

* Remove Backblaze-specific file-hosting backend

* Added S3_USES_PATH_STYLE_BUCKETS

* Remove unused file_id parameter from delete_file_version

* Add support for separate public and private buckets in labrinth::file_hosting

* Rename delete_file_version to delete_file

* Add (untested) get_url_for_private_file

* Remove url field from shared instance routes

* Remove url field from shared instance routes

* Use private bucket for shared instance versions

* Make S3 environment variables fully separate between public and private buckets

* Change file host expiry for shared instances to 180 seconds

* Fix lint

* Merge shared instance migrations into a single migration

* Replace shared instance owners with Ghost instead of deleting the instance
This commit is contained in:
Josiah Glosson
2025-06-19 14:46:12 -05:00
committed by GitHub
parent d4864deac5
commit cc34e69524
61 changed files with 2161 additions and 491 deletions

View File

@@ -4,8 +4,9 @@ use actix_web_prom::PrometheusMetricsBuilder;
use clap::Parser;
use labrinth::background_task::BackgroundTask;
use labrinth::database::redis::RedisPool;
use labrinth::file_hosting::S3Host;
use labrinth::file_hosting::{S3BucketConfig, S3Host};
use labrinth::search;
use labrinth::util::env::parse_var;
use labrinth::util::ratelimit::rate_limit_middleware;
use labrinth::{check_env_vars, clickhouse, database, file_hosting, queue};
use std::ffi::CStr;
@@ -51,6 +52,7 @@ async fn main() -> std::io::Result<()> {
if check_env_vars() {
error!("Some environment variables are missing!");
std::process::exit(1);
}
// DSN is from SENTRY_DSN env variable.
@@ -93,24 +95,33 @@ async fn main() -> std::io::Result<()> {
let file_host: Arc<dyn file_hosting::FileHost + Send + Sync> =
match storage_backend.as_str() {
"backblaze" => Arc::new(
file_hosting::BackblazeHost::new(
&dotenvy::var("BACKBLAZE_KEY_ID").unwrap(),
&dotenvy::var("BACKBLAZE_KEY").unwrap(),
&dotenvy::var("BACKBLAZE_BUCKET_ID").unwrap(),
"s3" => {
let config_from_env = |bucket_type| S3BucketConfig {
name: parse_var(&format!("S3_{bucket_type}_BUCKET_NAME"))
.unwrap(),
uses_path_style: parse_var(&format!(
"S3_{bucket_type}_USES_PATH_STYLE_BUCKET"
))
.unwrap(),
region: parse_var(&format!("S3_{bucket_type}_REGION"))
.unwrap(),
url: parse_var(&format!("S3_{bucket_type}_URL")).unwrap(),
access_token: parse_var(&format!(
"S3_{bucket_type}_ACCESS_TOKEN"
))
.unwrap(),
secret: parse_var(&format!("S3_{bucket_type}_SECRET"))
.unwrap(),
};
Arc::new(
S3Host::new(
config_from_env("PUBLIC"),
config_from_env("PRIVATE"),
)
.unwrap(),
)
.await,
),
"s3" => Arc::new(
S3Host::new(
&dotenvy::var("S3_BUCKET_NAME").unwrap(),
&dotenvy::var("S3_REGION").unwrap(),
&dotenvy::var("S3_URL").unwrap(),
&dotenvy::var("S3_ACCESS_TOKEN").unwrap(),
&dotenvy::var("S3_SECRET").unwrap(),
)
.unwrap(),
),
}
"local" => Arc::new(file_hosting::MockHost::new()),
_ => panic!("Invalid storage backend specified. Aborting startup!"),
};