You've already forked AstralRinth
forked from didirus/AstralRinth
* typos :help_me: * (part 1/?) massive cleanup to make the code more Rust-ic and cut down heap allocations. * (part 2/?) massive cleanup to make the code more Rust-ic and cut down heap allocations. * (part 3/?) cut down some pretty major heap allocations here - more Bytes and BytesMuts, less Vec<u8>s also I don't really understand why you need to `to_vec` when you don't really use it again afterwards * (part 4/?) deduplicate error handling in backblaze logic * (part 5/?) fixes, cleanups, refactors, and reformatting * (part 6/?) cleanups and refactors * remove loads of `as_str` in types that already are `Display` * Revert "remove loads of `as_str` in types that already are `Display`" This reverts commit 4f974310cfb167ceba03001d81388db4f0fbb509. * reformat and move routes util to the util module * use streams * Run prepare + formatting issues Co-authored-by: Jai A <jaiagr+gpg@pm.me> Co-authored-by: Geometrically <18202329+Geometrically@users.noreply.github.com>
65 lines
1.9 KiB
Rust
65 lines
1.9 KiB
Rust
use log::info;
|
|
use sqlx::migrate::{Migrate, MigrateDatabase, Migrator};
|
|
use sqlx::postgres::{PgPool, PgPoolOptions};
|
|
use sqlx::{Connection, PgConnection, Postgres};
|
|
use std::path::Path;
|
|
|
|
const MIGRATION_FOLDER: &str = "migrations";
|
|
|
|
pub async fn connect() -> Result<PgPool, sqlx::Error> {
|
|
info!("Initializing database connection");
|
|
let database_url = dotenv::var("DATABASE_URL").expect("`DATABASE_URL` not in .env");
|
|
let pool = PgPoolOptions::new()
|
|
.min_connections(
|
|
dotenv::var("DATABASE_MIN_CONNECTIONS")
|
|
.ok()
|
|
.and_then(|x| x.parse().ok())
|
|
.unwrap_or(16),
|
|
)
|
|
.max_connections(
|
|
dotenv::var("DATABASE_MAX_CONNECTIONS")
|
|
.ok()
|
|
.and_then(|x| x.parse().ok())
|
|
.unwrap_or(16),
|
|
)
|
|
.connect(&database_url)
|
|
.await?;
|
|
|
|
Ok(pool)
|
|
}
|
|
pub async fn check_for_migrations() -> Result<(), sqlx::Error> {
|
|
let uri = dotenv::var("DATABASE_URL").expect("`DATABASE_URL` not in .env");
|
|
let uri = uri.as_str();
|
|
if !Postgres::database_exists(uri).await? {
|
|
info!("Creating database...");
|
|
Postgres::create_database(uri).await?;
|
|
}
|
|
info!("Applying migrations...");
|
|
run_migrations(uri).await?;
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub async fn run_migrations(uri: &str) -> Result<(), sqlx::Error> {
|
|
let migrator = Migrator::new(Path::new(MIGRATION_FOLDER)).await?;
|
|
let mut conn: PgConnection = PgConnection::connect(uri).await?;
|
|
|
|
conn.ensure_migrations_table().await?;
|
|
|
|
let (version, dirty) = conn.version().await?.unwrap_or((0, false));
|
|
|
|
if dirty {
|
|
panic!("The database is dirty! Please check your database status.");
|
|
}
|
|
|
|
for migration in migrator.iter() {
|
|
if migration.version > version {
|
|
let _elapsed = conn.apply(migration).await?;
|
|
} else {
|
|
conn.validate(migration).await?;
|
|
}
|
|
}
|
|
|
|
Ok(())
|
|
}
|