Initial shared instances backend (#3800)

* Create base shared instance migration and initial routes

* Fix build

* Add version uploads

* Add permissions field for shared instance users

* Actually use permissions field

* Add "public" flag to shared instances that allow GETing them without authorization

* Add the ability to get and list shared instance versions

* Add the ability to delete shared instance versions

* Fix build after merge

* Secured file hosting (#3784)

* Remove Backblaze-specific file-hosting backend

* Added S3_USES_PATH_STYLE_BUCKETS

* Remove unused file_id parameter from delete_file_version

* Add support for separate public and private buckets in labrinth::file_hosting

* Rename delete_file_version to delete_file

* Add (untested) get_url_for_private_file

* Remove url field from shared instance routes

* Remove url field from shared instance routes

* Use private bucket for shared instance versions

* Make S3 environment variables fully separate between public and private buckets

* Change file host expiry for shared instances to 180 seconds

* Fix lint

* Merge shared instance migrations into a single migration

* Replace shared instance owners with Ghost instead of deleting the instance
This commit is contained in:
Josiah Glosson
2025-06-19 14:46:12 -05:00
committed by GitHub
parent d4864deac5
commit cc34e69524
61 changed files with 2161 additions and 491 deletions

View File

@@ -1,5 +1,6 @@
use crate::file_hosting::{
DeleteFileData, FileHost, FileHostingError, UploadFileData,
DeleteFileData, FileHost, FileHostPublicity, FileHostingError,
UploadFileData,
};
use async_trait::async_trait;
use bytes::Bytes;
@@ -10,50 +11,70 @@ use s3::creds::Credentials;
use s3::region::Region;
use sha2::Digest;
pub struct S3BucketConfig {
pub name: String,
pub uses_path_style: bool,
pub region: String,
pub url: String,
pub access_token: String,
pub secret: String,
}
pub struct S3Host {
bucket: Bucket,
public_bucket: Bucket,
private_bucket: Bucket,
}
impl S3Host {
pub fn new(
bucket_name: &str,
bucket_region: &str,
url: &str,
access_token: &str,
secret: &str,
public_bucket: S3BucketConfig,
private_bucket: S3BucketConfig,
) -> Result<S3Host, FileHostingError> {
let bucket = Bucket::new(
bucket_name,
if bucket_region == "r2" {
Region::R2 {
account_id: url.to_string(),
}
} else {
Region::Custom {
region: bucket_region.to_string(),
endpoint: url.to_string(),
}
},
Credentials::new(
Some(access_token),
Some(secret),
None,
None,
None,
)
.map_err(|_| {
FileHostingError::S3Error(
"Error while creating credentials".to_string(),
let create_bucket =
|config: S3BucketConfig| -> Result<_, FileHostingError> {
let mut bucket = Bucket::new(
"",
if config.region == "r2" {
Region::R2 {
account_id: config.url,
}
} else {
Region::Custom {
region: config.region,
endpoint: config.url,
}
},
Credentials {
access_key: Some(config.access_token),
secret_key: Some(config.secret),
..Credentials::anonymous().unwrap()
},
)
})?,
)
.map_err(|_| {
FileHostingError::S3Error(
"Error while creating Bucket instance".to_string(),
)
})?;
.map_err(|e| {
FileHostingError::S3Error("creating Bucket instance", e)
})?;
Ok(S3Host { bucket: *bucket })
bucket.name = config.name;
if config.uses_path_style {
bucket.set_path_style();
} else {
bucket.set_subdomain_style();
}
Ok(bucket)
};
Ok(S3Host {
public_bucket: *create_bucket(public_bucket)?,
private_bucket: *create_bucket(private_bucket)?,
})
}
fn get_bucket(&self, publicity: FileHostPublicity) -> &Bucket {
match publicity {
FileHostPublicity::Public => &self.public_bucket,
FileHostPublicity::Private => &self.private_bucket,
}
}
}
@@ -63,27 +84,24 @@ impl FileHost for S3Host {
&self,
content_type: &str,
file_name: &str,
file_publicity: FileHostPublicity,
file_bytes: Bytes,
) -> Result<UploadFileData, FileHostingError> {
let content_sha1 = sha1::Sha1::digest(&file_bytes).encode_hex();
let content_sha512 = format!("{:x}", sha2::Sha512::digest(&file_bytes));
self.bucket
self.get_bucket(file_publicity)
.put_object_with_content_type(
format!("/{file_name}"),
&file_bytes,
content_type,
)
.await
.map_err(|err| {
FileHostingError::S3Error(format!(
"Error while uploading file {file_name} to S3: {err}"
))
})?;
.map_err(|e| FileHostingError::S3Error("uploading file", e))?;
Ok(UploadFileData {
file_id: file_name.to_string(),
file_name: file_name.to_string(),
file_publicity,
content_length: file_bytes.len() as u32,
content_sha512,
content_sha1,
@@ -93,22 +111,32 @@ impl FileHost for S3Host {
})
}
async fn delete_file_version(
async fn get_url_for_private_file(
&self,
file_id: &str,
file_name: &str,
expiry_secs: u32,
) -> Result<String, FileHostingError> {
let url = self
.private_bucket
.presign_get(format!("/{file_name}"), expiry_secs, None)
.await
.map_err(|e| {
FileHostingError::S3Error("generating presigned URL", e)
})?;
Ok(url)
}
async fn delete_file(
&self,
file_name: &str,
file_publicity: FileHostPublicity,
) -> Result<DeleteFileData, FileHostingError> {
self.bucket
self.get_bucket(file_publicity)
.delete_object(format!("/{file_name}"))
.await
.map_err(|err| {
FileHostingError::S3Error(format!(
"Error while deleting file {file_name} to S3: {err}"
))
})?;
.map_err(|e| FileHostingError::S3Error("deleting file", e))?;
Ok(DeleteFileData {
file_id: file_id.to_string(),
file_name: file_name.to_string(),
})
}