You've already forked AstralRinth
forked from didirus/AstralRinth
Tests (#719)
* computer switch * some fixes; github action * added pr to master * sqlx database setup * switched intial GHA test db * removed sqlx database setup * unfinished patch route * bug fixes + tests * more tests, more fixes, cargo fmt * merge fixes * more tests, full reorganization * fmt, clippy * sqlx-data * revs * removed comments * delete revs
This commit is contained in:
24
.github/workflows/tests.yml
vendored
24
.github/workflows/tests.yml
vendored
@@ -4,6 +4,8 @@ on:
|
||||
push:
|
||||
branches: [master]
|
||||
pull_request:
|
||||
branches:
|
||||
- master
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
@@ -21,23 +23,34 @@ jobs:
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
|
||||
# Start Docker Compose
|
||||
- name: Start Docker Compose
|
||||
run: docker-compose up -d
|
||||
|
||||
- uses: actions-rs/toolchain@v1
|
||||
name: Install toolchain
|
||||
with:
|
||||
profile: minimal
|
||||
toolchain: ${{ matrix.rust }}
|
||||
override: true
|
||||
- name: Cache build artifacts
|
||||
id: cache-build
|
||||
|
||||
# Cache dependencies and build artifacts
|
||||
- name: Cache build artifacts and dependencies
|
||||
uses: actions/cache@v2
|
||||
with:
|
||||
path: target/**
|
||||
key: ${{ runner.os }}-build-cache-${{ matrix.rust }}
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ hashFiles('**/Cargo.lock') }}
|
||||
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: build
|
||||
env:
|
||||
SQLX_OFFLINE: true
|
||||
|
||||
- uses: actions-rs/cargo@v1
|
||||
with:
|
||||
command: test
|
||||
@@ -50,4 +63,5 @@ jobs:
|
||||
S3_URL: ${{ secrets.S3_URL }}
|
||||
S3_REGION: ${{ secrets.S3_REGION }}
|
||||
S3_BUCKET_NAME: ${{ secrets.S3_BUCKET_NAME }}
|
||||
SQLX_OFFLINE: true
|
||||
SQLX_OFFLINE: true
|
||||
DATABASE_URL: postgresql://labrinth:labrinth@localhost/postgres
|
||||
|
||||
13
Cargo.lock
generated
13
Cargo.lock
generated
@@ -83,9 +83,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "actix-http"
|
||||
version = "3.3.1"
|
||||
version = "3.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c2079246596c18b4a33e274ae10c0e50613f4d32a4198e09c7b93771013fed74"
|
||||
checksum = "a92ef85799cba03f76e4f7c10f533e66d87c9a7e7055f3391f09000ad8351bc9"
|
||||
dependencies = [
|
||||
"actix-codec",
|
||||
"actix-rt",
|
||||
@@ -93,7 +93,7 @@ dependencies = [
|
||||
"actix-utils",
|
||||
"ahash 0.8.3",
|
||||
"base64 0.21.2",
|
||||
"bitflags 1.3.2",
|
||||
"bitflags 2.4.0",
|
||||
"brotli",
|
||||
"bytes",
|
||||
"bytestring",
|
||||
@@ -597,9 +597,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a"
|
||||
|
||||
[[package]]
|
||||
name = "bitflags"
|
||||
version = "2.3.3"
|
||||
version = "2.4.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "630be753d4e58660abd17930c71b647fe46c27ea6b63cc59e1e3851406972e42"
|
||||
checksum = "b4682ae6287fcf752ecaabbfcc7b6f9b72aa33933dc23a554d853aea8eea8635"
|
||||
|
||||
[[package]]
|
||||
name = "bitvec"
|
||||
@@ -2230,6 +2230,7 @@ dependencies = [
|
||||
"actix",
|
||||
"actix-cors",
|
||||
"actix-files",
|
||||
"actix-http",
|
||||
"actix-multipart",
|
||||
"actix-rt",
|
||||
"actix-web",
|
||||
@@ -3596,7 +3597,7 @@ version = "0.38.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ac5ffa1efe7548069688cd7028f32591853cd7b5b756d41bcffd2353e4fc75b4"
|
||||
dependencies = [
|
||||
"bitflags 2.3.3",
|
||||
"bitflags 2.4.0",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.4.3",
|
||||
|
||||
@@ -91,4 +91,7 @@ color-thief = "0.2.2"
|
||||
|
||||
woothee = "0.13.0"
|
||||
|
||||
lettre = "0.10.4"
|
||||
lettre = "0.10.4"
|
||||
|
||||
[dev-dependencies]
|
||||
actix-http = "3.4.0"
|
||||
|
||||
@@ -1,5 +1,26 @@
|
||||
{
|
||||
"db": "PostgreSQL",
|
||||
"009bce5eee6ed65d9dc0899a4e24da528507a3f00b7ec997fa9ccdd7599655b1": {
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Int8"
|
||||
}
|
||||
],
|
||||
"nullable": [
|
||||
false
|
||||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Int8",
|
||||
"Text"
|
||||
]
|
||||
}
|
||||
},
|
||||
"query": "\n SELECT m.id FROM organizations o\n INNER JOIN mods m ON m.organization_id = o.id\n WHERE (o.id = $1 AND $1 IS NOT NULL) OR (o.title = $2 AND $2 IS NOT NULL)\n "
|
||||
},
|
||||
"010cafcafb6adc25b00e3c81d844736b0245e752a90334c58209d8a02536c800": {
|
||||
"describe": {
|
||||
"columns": [],
|
||||
@@ -3669,6 +3690,19 @@
|
||||
},
|
||||
"query": "\n SELECT n.id FROM notifications n\n WHERE n.user_id = $1\n "
|
||||
},
|
||||
"7b6b76f383adcbe2afbd2a2e87e66fd2a0d9d05b68b27823c1395e7cc3b8c0a2": {
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Varchar",
|
||||
"Int8"
|
||||
]
|
||||
}
|
||||
},
|
||||
"query": "\n UPDATE collections\n SET status = $1\n WHERE (id = $2)\n "
|
||||
},
|
||||
"7c0cdacf0898155c94008a96a0b918550df4475b9e3362a926d4d00e001880c1": {
|
||||
"describe": {
|
||||
"columns": [
|
||||
@@ -3821,19 +3855,6 @@
|
||||
},
|
||||
"query": "\n SELECT name FROM side_types\n "
|
||||
},
|
||||
"86049f204c9eda5241403d22b5f8ffe13b258ddfffb81a1a9ee8602e21c64723": {
|
||||
"describe": {
|
||||
"columns": [],
|
||||
"nullable": [],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Varchar",
|
||||
"Int8"
|
||||
]
|
||||
}
|
||||
},
|
||||
"query": "\n UPDATE collections\n SET status = $1\n WHERE (id = $2)\n "
|
||||
},
|
||||
"868ee76d507cc9e94cd3c2e44770faff127e2b3c5f49b8100a9a37ac4d7b1f1d": {
|
||||
"describe": {
|
||||
"columns": [],
|
||||
@@ -6131,27 +6152,6 @@
|
||||
},
|
||||
"query": "\n UPDATE versions\n SET featured = $1\n WHERE (id = $2)\n "
|
||||
},
|
||||
"e60561aeefbc2bed1f77ff4bbca763b5be84bd6bc3eff75ca57e3590be286d45": {
|
||||
"describe": {
|
||||
"columns": [
|
||||
{
|
||||
"name": "id",
|
||||
"ordinal": 0,
|
||||
"type_info": "Int8"
|
||||
}
|
||||
],
|
||||
"nullable": [
|
||||
false
|
||||
],
|
||||
"parameters": {
|
||||
"Left": [
|
||||
"Int8",
|
||||
"Text"
|
||||
]
|
||||
}
|
||||
},
|
||||
"query": "\n SELECT m.id FROM organizations o\n LEFT JOIN mods m ON m.id = o.id\n WHERE (o.id = $1 AND $1 IS NOT NULL) OR (o.title = $2 AND $2 IS NOT NULL)\n "
|
||||
},
|
||||
"e60ea75112db37d3e73812e21b1907716e4762e06aa883af878e3be82e3f87d3": {
|
||||
"describe": {
|
||||
"columns": [
|
||||
|
||||
@@ -3,16 +3,17 @@ use crate::auth::session::issue_session;
|
||||
use crate::auth::validate::get_user_record_from_bearer_token;
|
||||
use crate::auth::{get_user_from_headers, AuthenticationError};
|
||||
use crate::database::models::flow_item::Flow;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::ids::base62_impl::{parse_base62, to_base62};
|
||||
use crate::models::ids::random_base62_rng;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::models::users::{Badges, Role};
|
||||
use crate::parse_strings_from_var;
|
||||
use crate::queue::session::AuthQueue;
|
||||
use crate::queue::socket::ActiveSockets;
|
||||
use crate::routes::ApiError;
|
||||
use crate::util::captcha::check_turnstile_captcha;
|
||||
use crate::util::env::parse_strings_from_var;
|
||||
use crate::util::ext::{get_image_content_type, get_image_ext};
|
||||
use crate::util::validate::{validation_errors_to_string, RE_URL_SAFE};
|
||||
use actix_web::web::{scope, Data, Payload, Query, ServiceConfig};
|
||||
@@ -54,7 +55,7 @@ pub fn config(cfg: &mut ServiceConfig) {
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Eq, PartialEq, Clone, Copy)]
|
||||
#[derive(Serialize, Deserialize, Default, Eq, PartialEq, Clone, Copy, Debug)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum AuthProvider {
|
||||
#[default]
|
||||
@@ -84,7 +85,7 @@ impl TempUser {
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
client: &PgPool,
|
||||
file_host: &Arc<dyn FileHost + Send + Sync>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<crate::database::models::UserId, AuthenticationError> {
|
||||
if let Some(email) = &self.email {
|
||||
if crate::database::models::User::get_email(email, client)
|
||||
@@ -907,7 +908,7 @@ pub async fn init(
|
||||
req: HttpRequest,
|
||||
Query(info): Query<AuthorizationInit>, // callback url
|
||||
client: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, AuthenticationError> {
|
||||
let url = url::Url::parse(&info.url).map_err(|_| AuthenticationError::Url)?;
|
||||
@@ -959,7 +960,7 @@ pub async fn ws_init(
|
||||
Query(info): Query<WsInit>,
|
||||
body: Payload,
|
||||
db: Data<RwLock<ActiveSockets>>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
) -> Result<HttpResponse, actix_web::Error> {
|
||||
let (res, session, _msg_stream) = actix_ws::handle(&req, body)?;
|
||||
|
||||
@@ -967,7 +968,7 @@ pub async fn ws_init(
|
||||
mut ws_stream: actix_ws::Session,
|
||||
info: WsInit,
|
||||
db: Data<RwLock<ActiveSockets>>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
) -> Result<(), Closed> {
|
||||
let flow = Flow::OAuth {
|
||||
user_id: None,
|
||||
@@ -1003,7 +1004,7 @@ pub async fn auth_callback(
|
||||
active_sockets: Data<RwLock<ActiveSockets>>,
|
||||
client: Data<PgPool>,
|
||||
file_host: Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
) -> Result<HttpResponse, super::templates::ErrorPage> {
|
||||
let state_string = query
|
||||
.get("state")
|
||||
@@ -1210,7 +1211,7 @@ pub struct DeleteAuthProvider {
|
||||
pub async fn delete_auth_provider(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
delete_provider: web::Json<DeleteAuthProvider>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -1297,7 +1298,7 @@ pub struct NewAccount {
|
||||
pub async fn create_account_with_password(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
new_account: web::Json<NewAccount>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
new_account
|
||||
@@ -1414,7 +1415,7 @@ pub struct Login {
|
||||
pub async fn login_password(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
login: web::Json<Login>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
if !check_turnstile_captcha(&req, &login.challenge).await? {
|
||||
@@ -1478,7 +1479,7 @@ async fn validate_2fa_code(
|
||||
secret: String,
|
||||
allow_backup: bool,
|
||||
user_id: crate::database::models::UserId,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
pool: &PgPool,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
) -> Result<bool, AuthenticationError> {
|
||||
@@ -1530,7 +1531,7 @@ async fn validate_2fa_code(
|
||||
pub async fn login_2fa(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
login: web::Json<Login2FA>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let flow = Flow::get(&login.flow, &redis)
|
||||
@@ -1577,7 +1578,7 @@ pub async fn login_2fa(
|
||||
pub async fn begin_2fa_flow(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -1616,7 +1617,7 @@ pub async fn begin_2fa_flow(
|
||||
pub async fn finish_2fa_flow(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
login: web::Json<Login2FA>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -1739,7 +1740,7 @@ pub struct Remove2FA {
|
||||
pub async fn remove_2fa(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
login: web::Json<Remove2FA>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -1821,7 +1822,7 @@ pub struct ResetPassword {
|
||||
pub async fn reset_password_begin(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
reset_password: web::Json<ResetPassword>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
if !check_turnstile_captcha(&req, &reset_password.challenge).await? {
|
||||
@@ -1866,7 +1867,7 @@ pub struct ChangePassword {
|
||||
pub async fn change_password(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
change_password: web::Json<ChangePassword>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -2007,7 +2008,7 @@ pub struct SetEmail {
|
||||
pub async fn set_email(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
email: web::Json<SetEmail>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -2073,7 +2074,7 @@ pub async fn set_email(
|
||||
pub async fn resend_verify_email(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -2118,7 +2119,7 @@ pub struct VerifyEmail {
|
||||
#[post("email/verify")]
|
||||
pub async fn verify_email(
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
email: web::Json<VerifyEmail>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let flow = Flow::get(&email.flow, &redis).await?;
|
||||
@@ -2168,7 +2169,7 @@ pub async fn verify_email(
|
||||
pub async fn subscribe_newsletter(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
|
||||
@@ -4,6 +4,7 @@ use crate::database::models::generate_pat_id;
|
||||
use crate::auth::get_user_from_headers;
|
||||
use crate::routes::ApiError;
|
||||
|
||||
use crate::database::redis::RedisPool;
|
||||
use actix_web::web::{self, Data};
|
||||
use actix_web::{delete, get, patch, post, HttpRequest, HttpResponse};
|
||||
use chrono::{DateTime, Utc};
|
||||
@@ -30,7 +31,7 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
pub async fn get_pats(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -73,14 +74,14 @@ pub async fn create_pat(
|
||||
req: HttpRequest,
|
||||
info: web::Json<NewPersonalAccessToken>,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
info.0
|
||||
.validate()
|
||||
.map_err(|err| ApiError::InvalidInput(validation_errors_to_string(err, None)))?;
|
||||
|
||||
if info.scopes.restricted() {
|
||||
if info.scopes.is_restricted() {
|
||||
return Err(ApiError::InvalidInput(
|
||||
"Invalid scopes requested!".to_string(),
|
||||
));
|
||||
@@ -159,7 +160,7 @@ pub async fn edit_pat(
|
||||
id: web::Path<(String,)>,
|
||||
info: web::Json<ModifyPersonalAccessToken>,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -180,7 +181,7 @@ pub async fn edit_pat(
|
||||
let mut transaction = pool.begin().await?;
|
||||
|
||||
if let Some(scopes) = &info.scopes {
|
||||
if scopes.restricted() {
|
||||
if scopes.is_restricted() {
|
||||
return Err(ApiError::InvalidInput(
|
||||
"Invalid scopes requested!".to_string(),
|
||||
));
|
||||
@@ -248,7 +249,7 @@ pub async fn delete_pat(
|
||||
req: HttpRequest,
|
||||
id: web::Path<(String,)>,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::auth::{get_user_from_headers, AuthenticationError};
|
||||
use crate::database::models::session_item::Session as DBSession;
|
||||
use crate::database::models::session_item::SessionBuilder;
|
||||
use crate::database::models::UserId;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::models::sessions::Session;
|
||||
use crate::queue::session::AuthQueue;
|
||||
@@ -86,7 +87,7 @@ pub async fn issue_session(
|
||||
req: HttpRequest,
|
||||
user_id: UserId,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<DBSession, AuthenticationError> {
|
||||
let metadata = get_session_metadata(&req).await?;
|
||||
|
||||
@@ -132,7 +133,7 @@ pub async fn issue_session(
|
||||
pub async fn list(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let current_user = get_user_from_headers(
|
||||
@@ -167,7 +168,7 @@ pub async fn delete(
|
||||
info: web::Path<(String,)>,
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let current_user = get_user_from_headers(
|
||||
@@ -206,7 +207,7 @@ pub async fn delete(
|
||||
pub async fn refresh(
|
||||
req: HttpRequest,
|
||||
pool: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let current_user = get_user_from_headers(&req, &**pool, &redis, &session_queue, None)
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::auth::flows::AuthProvider;
|
||||
use crate::auth::session::get_session_metadata;
|
||||
use crate::auth::AuthenticationError;
|
||||
use crate::database::models::user_item;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::models::users::{Role, User, UserId, UserPayoutData};
|
||||
use crate::queue::session::AuthQueue;
|
||||
@@ -12,7 +13,7 @@ use reqwest::header::{HeaderValue, AUTHORIZATION};
|
||||
pub async fn get_user_from_headers<'a, E>(
|
||||
req: &HttpRequest,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
session_queue: &AuthQueue,
|
||||
required_scopes: Option<&[Scopes]>,
|
||||
) -> Result<(Scopes, User), AuthenticationError>
|
||||
@@ -82,7 +83,7 @@ pub async fn get_user_record_from_bearer_token<'a, 'b, E>(
|
||||
req: &HttpRequest,
|
||||
token: Option<&str>,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
session_queue: &AuthQueue,
|
||||
) -> Result<Option<(Scopes, user_item::User)>, AuthenticationError>
|
||||
where
|
||||
@@ -140,7 +141,7 @@ where
|
||||
session_queue.add_session(session.id, metadata).await;
|
||||
}
|
||||
|
||||
user.map(|x| (Scopes::ALL, x))
|
||||
user.map(|x| (Scopes::all(), x))
|
||||
}
|
||||
Some(("github", _)) | Some(("gho", _)) | Some(("ghp", _)) => {
|
||||
let user = AuthProvider::GitHub.get_user(token).await?;
|
||||
@@ -153,7 +154,7 @@ where
|
||||
)
|
||||
.await?;
|
||||
|
||||
user.map(|x| (Scopes::NOT_RESTRICTED, x))
|
||||
user.map(|x| ((Scopes::all() ^ Scopes::restricted()), x))
|
||||
}
|
||||
_ => return Err(AuthenticationError::InvalidAuthMethod),
|
||||
};
|
||||
@@ -163,13 +164,14 @@ where
|
||||
pub async fn check_is_moderator_from_headers<'a, 'b, E>(
|
||||
req: &HttpRequest,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
session_queue: &AuthQueue,
|
||||
required_scopes: Option<&[Scopes]>,
|
||||
) -> Result<User, AuthenticationError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
|
||||
{
|
||||
let user = get_user_from_headers(req, executor, redis, session_queue, None)
|
||||
let user = get_user_from_headers(req, executor, redis, session_queue, required_scopes)
|
||||
.await?
|
||||
.1;
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
pub mod models;
|
||||
mod postgres_database;
|
||||
pub mod redis;
|
||||
pub use models::Image;
|
||||
pub use models::Project;
|
||||
pub use models::Version;
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
use crate::database::redis::RedisPool;
|
||||
|
||||
use super::ids::*;
|
||||
use super::DatabaseError;
|
||||
use chrono::DateTime;
|
||||
use chrono::Utc;
|
||||
use futures::TryStreamExt;
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const TAGS_NAMESPACE: &str = "tags";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
pub struct ProjectType {
|
||||
pub id: ProjectTypeId,
|
||||
@@ -98,17 +98,12 @@ impl Category {
|
||||
Ok(result.map(|r| CategoryId(r.id)))
|
||||
}
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Vec<Category>, DatabaseError>
|
||||
pub async fn list<'a, E>(exec: E, redis: &RedisPool) -> Result<Vec<Category>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:category", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "category")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<Category>>(&x).ok());
|
||||
|
||||
@@ -137,12 +132,13 @@ impl Category {
|
||||
.try_collect::<Vec<Category>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:category", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"category",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
@@ -167,17 +163,12 @@ impl Loader {
|
||||
Ok(result.map(|r| LoaderId(r.id)))
|
||||
}
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Vec<Loader>, DatabaseError>
|
||||
pub async fn list<'a, E>(exec: E, redis: &RedisPool) -> Result<Vec<Loader>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:loader", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "loader")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<Loader>>(&x).ok());
|
||||
|
||||
@@ -212,12 +203,13 @@ impl Loader {
|
||||
.try_collect::<Vec<_>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:loader", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"loader",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
@@ -256,17 +248,12 @@ impl GameVersion {
|
||||
Ok(result.map(|r| GameVersionId(r.id)))
|
||||
}
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Vec<GameVersion>, DatabaseError>
|
||||
pub async fn list<'a, E>(exec: E, redis: &RedisPool) -> Result<Vec<GameVersion>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:game_version", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "game_version")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<GameVersion>>(&x).ok());
|
||||
|
||||
@@ -291,14 +278,14 @@ impl GameVersion {
|
||||
.try_collect::<Vec<GameVersion>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:game_version", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"game_version",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
@@ -306,7 +293,7 @@ impl GameVersion {
|
||||
version_type_option: Option<&str>,
|
||||
major_option: Option<bool>,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<GameVersion>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -408,15 +395,13 @@ impl DonationPlatform {
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<DonationPlatform>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:donation_platform", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "donation_platform")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<DonationPlatform>>(&x).ok());
|
||||
|
||||
@@ -440,12 +425,13 @@ impl DonationPlatform {
|
||||
.try_collect::<Vec<DonationPlatform>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:donation_platform", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"donation_platform",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
@@ -470,17 +456,12 @@ impl ReportType {
|
||||
Ok(result.map(|r| ReportTypeId(r.id)))
|
||||
}
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Vec<String>, DatabaseError>
|
||||
pub async fn list<'a, E>(exec: E, redis: &RedisPool) -> Result<Vec<String>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:report_type", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "report_type")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<String>>(&x).ok());
|
||||
|
||||
@@ -498,12 +479,13 @@ impl ReportType {
|
||||
.try_collect::<Vec<String>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:report_type", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"report_type",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
@@ -528,17 +510,12 @@ impl ProjectType {
|
||||
Ok(result.map(|r| ProjectTypeId(r.id)))
|
||||
}
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Vec<String>, DatabaseError>
|
||||
pub async fn list<'a, E>(exec: E, redis: &RedisPool) -> Result<Vec<String>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:project_type", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "project_type")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<String>>(&x).ok());
|
||||
|
||||
@@ -556,12 +533,13 @@ impl ProjectType {
|
||||
.try_collect::<Vec<String>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:project_type", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"project_type",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
@@ -586,17 +564,12 @@ impl SideType {
|
||||
Ok(result.map(|r| SideTypeId(r.id)))
|
||||
}
|
||||
|
||||
pub async fn list<'a, E>(
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Vec<String>, DatabaseError>
|
||||
pub async fn list<'a, E>(exec: E, redis: &RedisPool) -> Result<Vec<String>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:side_type", TAGS_NAMESPACE))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(TAGS_NAMESPACE, "side_type")
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<String>>(&x).ok());
|
||||
|
||||
@@ -614,12 +587,13 @@ impl SideType {
|
||||
.try_collect::<Vec<String>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:side_type", TAGS_NAMESPACE))
|
||||
.arg(serde_json::to_string(&result)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TAGS_NAMESPACE,
|
||||
"side_type",
|
||||
serde_json::to_string(&result)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(result)
|
||||
|
||||
@@ -1,13 +1,12 @@
|
||||
use super::ids::*;
|
||||
use crate::database::models;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::collections::CollectionStatus;
|
||||
use chrono::{DateTime, Utc};
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const COLLECTIONS_NAMESPACE: &str = "collections";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct CollectionBuilder {
|
||||
@@ -102,7 +101,7 @@ impl Collection {
|
||||
pub async fn remove(
|
||||
id: CollectionId,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<()>, DatabaseError> {
|
||||
let collection = Self::get(id, &mut *transaction, redis).await?;
|
||||
|
||||
@@ -138,7 +137,7 @@ impl Collection {
|
||||
pub async fn get<'a, 'b, E>(
|
||||
id: CollectionId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<Collection>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -151,7 +150,7 @@ impl Collection {
|
||||
pub async fn get_many<'a, E>(
|
||||
collection_ids: &[CollectionId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Collection>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -162,20 +161,12 @@ impl Collection {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_collections = Vec::new();
|
||||
let mut remaining_collections: Vec<CollectionId> = collection_ids.to_vec();
|
||||
|
||||
if !collection_ids.is_empty() {
|
||||
let collections = cmd("MGET")
|
||||
.arg(
|
||||
collection_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", COLLECTIONS_NAMESPACE, x.0))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let collections = redis
|
||||
.multi_get::<String, _>(COLLECTIONS_NAMESPACE, collection_ids.iter().map(|x| x.0))
|
||||
.await?;
|
||||
|
||||
for collection in collections {
|
||||
@@ -233,14 +224,14 @@ impl Collection {
|
||||
.await?;
|
||||
|
||||
for collection in db_collections {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", COLLECTIONS_NAMESPACE, collection.id.0))
|
||||
.arg(serde_json::to_string(&collection)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
COLLECTIONS_NAMESPACE,
|
||||
collection.id.0,
|
||||
serde_json::to_string(&collection)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
found_collections.push(collection);
|
||||
}
|
||||
}
|
||||
@@ -248,16 +239,8 @@ impl Collection {
|
||||
Ok(found_collections)
|
||||
}
|
||||
|
||||
pub async fn clear_cache(
|
||||
id: CollectionId,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
cmd.arg(format!("{}:{}", COLLECTIONS_NAMESPACE, id.0));
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
pub async fn clear_cache(id: CollectionId, redis: &RedisPool) -> Result<(), DatabaseError> {
|
||||
redis.delete(COLLECTIONS_NAMESPACE, id.0).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use super::ids::*;
|
||||
use crate::auth::flows::AuthProvider;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use chrono::Duration;
|
||||
use rand::distributions::Alphanumeric;
|
||||
use rand::Rng;
|
||||
use rand_chacha::rand_core::SeedableRng;
|
||||
use rand_chacha::ChaCha20Rng;
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const FLOWS_NAMESPACE: &str = "flows";
|
||||
@@ -40,50 +40,32 @@ impl Flow {
|
||||
pub async fn insert(
|
||||
&self,
|
||||
expires: Duration,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<String, DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let flow = ChaCha20Rng::from_entropy()
|
||||
.sample_iter(&Alphanumeric)
|
||||
.take(32)
|
||||
.map(char::from)
|
||||
.collect::<String>();
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", FLOWS_NAMESPACE, flow))
|
||||
.arg(serde_json::to_string(&self)?)
|
||||
.arg("EX")
|
||||
.arg(expires.num_seconds())
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
FLOWS_NAMESPACE,
|
||||
&flow,
|
||||
serde_json::to_string(&self)?,
|
||||
Some(expires.num_seconds()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(flow)
|
||||
}
|
||||
|
||||
pub async fn get(
|
||||
id: &str,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Option<Flow>, DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:{}", FLOWS_NAMESPACE, id))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
.await?;
|
||||
|
||||
pub async fn get(id: &str, redis: &RedisPool) -> Result<Option<Flow>, DatabaseError> {
|
||||
let res = redis.get::<String, _>(FLOWS_NAMESPACE, id).await?;
|
||||
Ok(res.and_then(|x| serde_json::from_str(&x).ok()))
|
||||
}
|
||||
|
||||
pub async fn remove(
|
||||
id: &str,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<Option<()>, DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
cmd.arg(format!("{}:{}", FLOWS_NAMESPACE, id));
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
pub async fn remove(id: &str, redis: &RedisPool) -> Result<Option<()>, DatabaseError> {
|
||||
redis.delete(FLOWS_NAMESPACE, id).await?;
|
||||
Ok(Some(()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,10 @@
|
||||
use super::ids::*;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::{database::models::DatabaseError, models::images::ImageContext};
|
||||
use chrono::{DateTime, Utc};
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const IMAGES_NAMESPACE: &str = "images";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct Image {
|
||||
@@ -58,7 +57,7 @@ impl Image {
|
||||
pub async fn remove(
|
||||
id: ImageId,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<()>, DatabaseError> {
|
||||
let image = Self::get(id, &mut *transaction, redis).await?;
|
||||
|
||||
@@ -161,7 +160,7 @@ impl Image {
|
||||
pub async fn get<'a, 'b, E>(
|
||||
id: ImageId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<Image>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -174,7 +173,7 @@ impl Image {
|
||||
pub async fn get_many<'a, E>(
|
||||
image_ids: &[ImageId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Image>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -185,24 +184,15 @@ impl Image {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_images = Vec::new();
|
||||
let mut remaining_ids = image_ids.to_vec();
|
||||
|
||||
let image_ids = image_ids.iter().map(|x| x.0).collect::<Vec<_>>();
|
||||
|
||||
if !image_ids.is_empty() {
|
||||
let images = cmd("MGET")
|
||||
.arg(
|
||||
image_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", IMAGES_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let images = redis
|
||||
.multi_get::<String, _>(IMAGES_NAMESPACE, image_ids)
|
||||
.await?;
|
||||
|
||||
for image in images {
|
||||
if let Some(image) = image.and_then(|x| serde_json::from_str::<Image>(&x).ok()) {
|
||||
remaining_ids.retain(|x| image.id.0 != x.0);
|
||||
@@ -245,14 +235,14 @@ impl Image {
|
||||
.await?;
|
||||
|
||||
for image in db_images {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", IMAGES_NAMESPACE, image.id.0))
|
||||
.arg(serde_json::to_string(&image)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
IMAGES_NAMESPACE,
|
||||
image.id.0,
|
||||
serde_json::to_string(&image)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
found_images.push(image);
|
||||
}
|
||||
}
|
||||
@@ -260,16 +250,8 @@ impl Image {
|
||||
Ok(found_images)
|
||||
}
|
||||
|
||||
pub async fn clear_cache(
|
||||
id: ImageId,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
cmd.arg(format!("{}:{}", IMAGES_NAMESPACE, id.0));
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
pub async fn clear_cache(id: ImageId, redis: &RedisPool) -> Result<(), DatabaseError> {
|
||||
redis.delete(IMAGES_NAMESPACE, id.0).await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,14 @@
|
||||
use crate::models::ids::base62_impl::{parse_base62, to_base62};
|
||||
use crate::{
|
||||
database::redis::RedisPool,
|
||||
models::ids::base62_impl::{parse_base62, to_base62},
|
||||
};
|
||||
|
||||
use super::{ids::*, TeamMember};
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const ORGANIZATIONS_NAMESPACE: &str = "organizations";
|
||||
const ORGANIZATIONS_TITLES_NAMESPACE: &str = "organizations_titles";
|
||||
|
||||
const DEFAULT_EXPIRY: i64 = 1800;
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
/// An organization of users who together control one or more projects and organizations.
|
||||
pub struct Organization {
|
||||
@@ -55,7 +55,7 @@ impl Organization {
|
||||
pub async fn get<'a, E>(
|
||||
string: &str,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<Self>, super::DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -68,7 +68,7 @@ impl Organization {
|
||||
pub async fn get_id<'a, 'b, E>(
|
||||
id: OrganizationId,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<Self>, super::DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -81,7 +81,7 @@ impl Organization {
|
||||
pub async fn get_many_ids<'a, 'b, E>(
|
||||
organization_ids: &[OrganizationId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Self>, super::DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -96,7 +96,7 @@ impl Organization {
|
||||
pub async fn get_many<'a, E, T: ToString>(
|
||||
organization_strings: &[T],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Self>, super::DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -107,8 +107,6 @@ impl Organization {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_organizations = Vec::new();
|
||||
let mut remaining_strings = organization_strings
|
||||
.iter()
|
||||
@@ -121,20 +119,13 @@ impl Organization {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
organization_ids.append(
|
||||
&mut cmd("MGET")
|
||||
.arg(
|
||||
&mut redis
|
||||
.multi_get::<i64, _>(
|
||||
ORGANIZATIONS_TITLES_NAMESPACE,
|
||||
organization_strings
|
||||
.iter()
|
||||
.map(|x| {
|
||||
format!(
|
||||
"{}:{}",
|
||||
ORGANIZATIONS_TITLES_NAMESPACE,
|
||||
x.to_string().to_lowercase()
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
.map(|x| x.to_string().to_lowercase()),
|
||||
)
|
||||
.query_async::<_, Vec<Option<i64>>>(&mut redis)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
@@ -142,14 +133,8 @@ impl Organization {
|
||||
);
|
||||
|
||||
if !organization_ids.is_empty() {
|
||||
let organizations = cmd("MGET")
|
||||
.arg(
|
||||
organization_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", ORGANIZATIONS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let organizations = redis
|
||||
.multi_get::<String, _>(ORGANIZATIONS_NAMESPACE, organization_ids)
|
||||
.await?;
|
||||
|
||||
for organization in organizations {
|
||||
@@ -201,25 +186,23 @@ impl Organization {
|
||||
.await?;
|
||||
|
||||
for organization in organizations {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", ORGANIZATIONS_NAMESPACE, organization.id.0))
|
||||
.arg(serde_json::to_string(&organization)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
ORGANIZATIONS_NAMESPACE,
|
||||
organization.id.0,
|
||||
serde_json::to_string(&organization)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
redis
|
||||
.set(
|
||||
ORGANIZATIONS_TITLES_NAMESPACE,
|
||||
organization.title.to_lowercase(),
|
||||
organization.id.0,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!(
|
||||
"{}:{}",
|
||||
ORGANIZATIONS_TITLES_NAMESPACE,
|
||||
organization.title.to_lowercase()
|
||||
))
|
||||
.arg(organization.id.0)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
.await?;
|
||||
found_organizations.push(organization);
|
||||
}
|
||||
}
|
||||
@@ -265,7 +248,7 @@ impl Organization {
|
||||
pub async fn remove(
|
||||
id: OrganizationId,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<()>, super::DatabaseError> {
|
||||
use futures::TryStreamExt;
|
||||
|
||||
@@ -333,20 +316,17 @@ impl Organization {
|
||||
pub async fn clear_cache(
|
||||
id: OrganizationId,
|
||||
title: Option<String>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), super::DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
cmd.arg(format!("{}:{}", ORGANIZATIONS_NAMESPACE, id.0));
|
||||
if let Some(title) = title {
|
||||
cmd.arg(format!(
|
||||
"{}:{}",
|
||||
ORGANIZATIONS_TITLES_NAMESPACE,
|
||||
title.to_lowercase()
|
||||
));
|
||||
}
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
redis
|
||||
.delete_many([
|
||||
(ORGANIZATIONS_NAMESPACE, Some(id.0.to_string())),
|
||||
(
|
||||
ORGANIZATIONS_TITLES_NAMESPACE,
|
||||
title.map(|x| x.to_lowercase()),
|
||||
),
|
||||
])
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use super::ids::*;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::base62_impl::{parse_base62, to_base62};
|
||||
use crate::models::pats::Scopes;
|
||||
use chrono::{DateTime, Utc};
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const PATS_NAMESPACE: &str = "pats";
|
||||
const PATS_TOKENS_NAMESPACE: &str = "pats_tokens";
|
||||
const PATS_USERS_NAMESPACE: &str = "pats_users";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Deserialize, Serialize)]
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
pub struct PersonalAccessToken {
|
||||
pub id: PatId,
|
||||
pub name: String,
|
||||
@@ -55,7 +54,7 @@ impl PersonalAccessToken {
|
||||
pub async fn get<'a, E, T: ToString>(
|
||||
id: T,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<PersonalAccessToken>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -68,7 +67,7 @@ impl PersonalAccessToken {
|
||||
pub async fn get_many_ids<'a, E>(
|
||||
pat_ids: &[PatId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<PersonalAccessToken>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -83,7 +82,7 @@ impl PersonalAccessToken {
|
||||
pub async fn get_many<'a, E, T: ToString>(
|
||||
pat_strings: &[T],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<PersonalAccessToken>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -94,8 +93,6 @@ impl PersonalAccessToken {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_pats = Vec::new();
|
||||
let mut remaining_strings = pat_strings
|
||||
.iter()
|
||||
@@ -108,14 +105,11 @@ impl PersonalAccessToken {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
pat_ids.append(
|
||||
&mut cmd("MGET")
|
||||
.arg(
|
||||
pat_strings
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", PATS_TOKENS_NAMESPACE, x.to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
&mut redis
|
||||
.multi_get::<i64, _>(
|
||||
PATS_TOKENS_NAMESPACE,
|
||||
pat_strings.iter().map(|x| x.to_string()),
|
||||
)
|
||||
.query_async::<_, Vec<Option<i64>>>(&mut redis)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
@@ -123,16 +117,9 @@ impl PersonalAccessToken {
|
||||
);
|
||||
|
||||
if !pat_ids.is_empty() {
|
||||
let pats = cmd("MGET")
|
||||
.arg(
|
||||
pat_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", PATS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let pats = redis
|
||||
.multi_get::<String, _>(PATS_NAMESPACE, pat_ids)
|
||||
.await?;
|
||||
|
||||
for pat in pats {
|
||||
if let Some(pat) =
|
||||
pat.and_then(|x| serde_json::from_str::<PersonalAccessToken>(&x).ok())
|
||||
@@ -181,20 +168,16 @@ impl PersonalAccessToken {
|
||||
.await?;
|
||||
|
||||
for pat in db_pats {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", PATS_NAMESPACE, pat.id.0))
|
||||
.arg(serde_json::to_string(&pat)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(PATS_NAMESPACE, pat.id.0, serde_json::to_string(&pat)?, None)
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", PATS_TOKENS_NAMESPACE, pat.access_token))
|
||||
.arg(pat.id.0)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
PATS_TOKENS_NAMESPACE,
|
||||
pat.access_token.clone(),
|
||||
pat.id.0,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
found_pats.push(pat);
|
||||
}
|
||||
@@ -206,15 +189,13 @@ impl PersonalAccessToken {
|
||||
pub async fn get_user_pats<'a, E>(
|
||||
user_id: UserId,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<PatId>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:{}", PATS_USERS_NAMESPACE, user_id.0))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(PATS_USERS_NAMESPACE, user_id.0)
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<i64>>(&x).ok());
|
||||
|
||||
@@ -237,41 +218,34 @@ impl PersonalAccessToken {
|
||||
.try_collect::<Vec<PatId>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", PATS_USERS_NAMESPACE, user_id.0))
|
||||
.arg(serde_json::to_string(&db_pats)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
PATS_USERS_NAMESPACE,
|
||||
user_id.0,
|
||||
serde_json::to_string(&db_pats)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(db_pats)
|
||||
}
|
||||
|
||||
pub async fn clear_cache(
|
||||
clear_pats: Vec<(Option<PatId>, Option<String>, Option<UserId>)>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
if clear_pats.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
for (id, token, user_id) in clear_pats {
|
||||
if let Some(id) = id {
|
||||
cmd.arg(format!("{}:{}", PATS_NAMESPACE, id.0));
|
||||
}
|
||||
if let Some(token) = token {
|
||||
cmd.arg(format!("{}:{}", PATS_TOKENS_NAMESPACE, token));
|
||||
}
|
||||
if let Some(user_id) = user_id {
|
||||
cmd.arg(format!("{}:{}", PATS_USERS_NAMESPACE, user_id.0));
|
||||
}
|
||||
}
|
||||
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
redis
|
||||
.delete_many(clear_pats.into_iter().flat_map(|(id, token, user_id)| {
|
||||
[
|
||||
(PATS_NAMESPACE, id.map(|i| i.0.to_string())),
|
||||
(PATS_TOKENS_NAMESPACE, token),
|
||||
(PATS_USERS_NAMESPACE, user_id.map(|i| i.0.to_string())),
|
||||
]
|
||||
}))
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
use super::ids::*;
|
||||
use crate::database::models;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::base62_impl::{parse_base62, to_base62};
|
||||
use crate::models::projects::{MonetizationStatus, ProjectStatus};
|
||||
use chrono::{DateTime, Utc};
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const PROJECTS_NAMESPACE: &str = "projects";
|
||||
const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs";
|
||||
pub const PROJECTS_NAMESPACE: &str = "projects";
|
||||
pub const PROJECTS_SLUGS_NAMESPACE: &str = "projects_slugs";
|
||||
const PROJECTS_DEPENDENCIES_NAMESPACE: &str = "projects_dependencies";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Clone, Debug, Serialize, Deserialize)]
|
||||
pub struct DonationUrl {
|
||||
@@ -299,7 +298,7 @@ impl Project {
|
||||
pub async fn remove(
|
||||
id: ProjectId,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<()>, DatabaseError> {
|
||||
let project = Self::get_id(id, &mut *transaction, redis).await?;
|
||||
|
||||
@@ -433,7 +432,7 @@ impl Project {
|
||||
pub async fn get<'a, 'b, E>(
|
||||
string: &str,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<QueryProject>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -446,7 +445,7 @@ impl Project {
|
||||
pub async fn get_id<'a, 'b, E>(
|
||||
id: ProjectId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<QueryProject>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -459,7 +458,7 @@ impl Project {
|
||||
pub async fn get_many_ids<'a, E>(
|
||||
project_ids: &[ProjectId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<QueryProject>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -474,7 +473,7 @@ impl Project {
|
||||
pub async fn get_many<'a, E, T: ToString>(
|
||||
project_strings: &[T],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<QueryProject>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -485,8 +484,6 @@ impl Project {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_projects = Vec::new();
|
||||
let mut remaining_strings = project_strings
|
||||
.iter()
|
||||
@@ -499,20 +496,11 @@ impl Project {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
project_ids.append(
|
||||
&mut cmd("MGET")
|
||||
.arg(
|
||||
project_strings
|
||||
.iter()
|
||||
.map(|x| {
|
||||
format!(
|
||||
"{}:{}",
|
||||
PROJECTS_SLUGS_NAMESPACE,
|
||||
x.to_string().to_lowercase()
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
&mut redis
|
||||
.multi_get::<i64, _>(
|
||||
PROJECTS_SLUGS_NAMESPACE,
|
||||
project_strings.iter().map(|x| x.to_string().to_lowercase()),
|
||||
)
|
||||
.query_async::<_, Vec<Option<i64>>>(&mut redis)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
@@ -520,16 +508,9 @@ impl Project {
|
||||
);
|
||||
|
||||
if !project_ids.is_empty() {
|
||||
let projects = cmd("MGET")
|
||||
.arg(
|
||||
project_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", PROJECTS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let projects = redis
|
||||
.multi_get::<String, _>(PROJECTS_NAMESPACE, project_ids)
|
||||
.await?;
|
||||
|
||||
for project in projects {
|
||||
if let Some(project) =
|
||||
project.and_then(|x| serde_json::from_str::<QueryProject>(&x).ok())
|
||||
@@ -551,7 +532,6 @@ impl Project {
|
||||
.flat_map(|x| parse_base62(&x.to_string()).ok())
|
||||
.map(|x| x as i64)
|
||||
.collect();
|
||||
|
||||
let db_projects: Vec<QueryProject> = sqlx::query!(
|
||||
"
|
||||
SELECT m.id id, m.project_type project_type, m.title title, m.description description, m.downloads downloads, m.follows follows,
|
||||
@@ -672,25 +652,22 @@ impl Project {
|
||||
.await?;
|
||||
|
||||
for project in db_projects {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", PROJECTS_NAMESPACE, project.inner.id.0))
|
||||
.arg(serde_json::to_string(&project)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
PROJECTS_NAMESPACE,
|
||||
project.inner.id.0,
|
||||
serde_json::to_string(&project)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(slug) = &project.inner.slug {
|
||||
cmd("SET")
|
||||
.arg(format!(
|
||||
"{}:{}",
|
||||
redis
|
||||
.set(
|
||||
PROJECTS_SLUGS_NAMESPACE,
|
||||
slug.to_lowercase()
|
||||
))
|
||||
.arg(project.inner.id.0)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
slug.to_lowercase(),
|
||||
project.inner.id.0,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
found_projects.push(project);
|
||||
@@ -703,7 +680,7 @@ impl Project {
|
||||
pub async fn get_dependencies<'a, E>(
|
||||
id: ProjectId,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<(Option<VersionId>, Option<ProjectId>, Option<ProjectId>)>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -712,13 +689,9 @@ impl Project {
|
||||
|
||||
use futures::stream::TryStreamExt;
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let dependencies = cmd("GET")
|
||||
.arg(format!("{}:{}", PROJECTS_DEPENDENCIES_NAMESPACE, id.0))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let dependencies = redis
|
||||
.get::<String, _>(PROJECTS_DEPENDENCIES_NAMESPACE, id.0)
|
||||
.await?;
|
||||
|
||||
if let Some(dependencies) =
|
||||
dependencies.and_then(|x| serde_json::from_str::<Dependencies>(&x).ok())
|
||||
{
|
||||
@@ -752,14 +725,14 @@ impl Project {
|
||||
.try_collect::<Dependencies>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", PROJECTS_DEPENDENCIES_NAMESPACE, id.0))
|
||||
.arg(serde_json::to_string(&dependencies)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
PROJECTS_DEPENDENCIES_NAMESPACE,
|
||||
id.0,
|
||||
serde_json::to_string(&dependencies)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(dependencies)
|
||||
}
|
||||
|
||||
@@ -817,25 +790,22 @@ impl Project {
|
||||
id: ProjectId,
|
||||
slug: Option<String>,
|
||||
clear_dependencies: Option<bool>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
cmd.arg(format!("{}:{}", PROJECTS_NAMESPACE, id.0));
|
||||
if let Some(slug) = slug {
|
||||
cmd.arg(format!(
|
||||
"{}:{}",
|
||||
PROJECTS_SLUGS_NAMESPACE,
|
||||
slug.to_lowercase()
|
||||
));
|
||||
}
|
||||
if clear_dependencies.unwrap_or(false) {
|
||||
cmd.arg(format!("{}:{}", PROJECTS_DEPENDENCIES_NAMESPACE, id.0));
|
||||
}
|
||||
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
redis
|
||||
.delete_many([
|
||||
(PROJECTS_NAMESPACE, Some(id.0.to_string())),
|
||||
(PROJECTS_SLUGS_NAMESPACE, slug.map(|x| x.to_lowercase())),
|
||||
(
|
||||
PROJECTS_DEPENDENCIES_NAMESPACE,
|
||||
if clear_dependencies.unwrap_or(false) {
|
||||
Some(id.0.to_string())
|
||||
} else {
|
||||
None
|
||||
},
|
||||
),
|
||||
])
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,14 +1,13 @@
|
||||
use super::ids::*;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::base62_impl::{parse_base62, to_base62};
|
||||
use chrono::{DateTime, Utc};
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const SESSIONS_NAMESPACE: &str = "sessions";
|
||||
const SESSIONS_IDS_NAMESPACE: &str = "sessions_ids";
|
||||
const SESSIONS_USERS_NAMESPACE: &str = "sessions_users";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
pub struct SessionBuilder {
|
||||
pub session: String,
|
||||
@@ -83,7 +82,7 @@ impl Session {
|
||||
pub async fn get<'a, E, T: ToString>(
|
||||
id: T,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<Session>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -96,7 +95,7 @@ impl Session {
|
||||
pub async fn get_id<'a, 'b, E>(
|
||||
id: SessionId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<Session>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -109,7 +108,7 @@ impl Session {
|
||||
pub async fn get_many_ids<'a, E>(
|
||||
session_ids: &[SessionId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Session>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -124,7 +123,7 @@ impl Session {
|
||||
pub async fn get_many<'a, E, T: ToString>(
|
||||
session_strings: &[T],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Session>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -135,8 +134,6 @@ impl Session {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_sessions = Vec::new();
|
||||
let mut remaining_strings = session_strings
|
||||
.iter()
|
||||
@@ -149,14 +146,11 @@ impl Session {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
session_ids.append(
|
||||
&mut cmd("MGET")
|
||||
.arg(
|
||||
session_strings
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", SESSIONS_IDS_NAMESPACE, x.to_string()))
|
||||
.collect::<Vec<_>>(),
|
||||
&mut redis
|
||||
.multi_get::<i64, _>(
|
||||
SESSIONS_IDS_NAMESPACE,
|
||||
session_strings.iter().map(|x| x.to_string()),
|
||||
)
|
||||
.query_async::<_, Vec<Option<i64>>>(&mut redis)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
@@ -164,16 +158,9 @@ impl Session {
|
||||
);
|
||||
|
||||
if !session_ids.is_empty() {
|
||||
let sessions = cmd("MGET")
|
||||
.arg(
|
||||
session_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", SESSIONS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let sessions = redis
|
||||
.multi_get::<String, _>(SESSIONS_NAMESPACE, session_ids)
|
||||
.await?;
|
||||
|
||||
for session in sessions {
|
||||
if let Some(session) =
|
||||
session.and_then(|x| serde_json::from_str::<Session>(&x).ok())
|
||||
@@ -225,20 +212,21 @@ impl Session {
|
||||
.await?;
|
||||
|
||||
for session in db_sessions {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", SESSIONS_NAMESPACE, session.id.0))
|
||||
.arg(serde_json::to_string(&session)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
SESSIONS_NAMESPACE,
|
||||
session.id.0,
|
||||
serde_json::to_string(&session)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", SESSIONS_IDS_NAMESPACE, session.session))
|
||||
.arg(session.id.0)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
SESSIONS_IDS_NAMESPACE,
|
||||
session.session.clone(),
|
||||
session.id.0,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
found_sessions.push(session);
|
||||
}
|
||||
@@ -250,15 +238,13 @@ impl Session {
|
||||
pub async fn get_user_sessions<'a, E>(
|
||||
user_id: UserId,
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<SessionId>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
{
|
||||
let mut redis = redis.get().await?;
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}:{}", SESSIONS_USERS_NAMESPACE, user_id.0))
|
||||
.query_async::<_, Option<String>>(&mut redis)
|
||||
let res = redis
|
||||
.get::<String, _>(SESSIONS_USERS_NAMESPACE, user_id.0)
|
||||
.await?
|
||||
.and_then(|x| serde_json::from_str::<Vec<i64>>(&x).ok());
|
||||
|
||||
@@ -281,12 +267,13 @@ impl Session {
|
||||
.try_collect::<Vec<SessionId>>()
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", SESSIONS_USERS_NAMESPACE, user_id.0))
|
||||
.arg(serde_json::to_string(&db_sessions)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
SESSIONS_USERS_NAMESPACE,
|
||||
user_id.0,
|
||||
serde_json::to_string(&db_sessions)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(db_sessions)
|
||||
@@ -294,29 +281,25 @@ impl Session {
|
||||
|
||||
pub async fn clear_cache(
|
||||
clear_sessions: Vec<(Option<SessionId>, Option<String>, Option<UserId>)>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
if clear_sessions.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
for (id, session, user_id) in clear_sessions {
|
||||
if let Some(id) = id {
|
||||
cmd.arg(format!("{}:{}", SESSIONS_NAMESPACE, id.0));
|
||||
}
|
||||
if let Some(session) = session {
|
||||
cmd.arg(format!("{}:{}", SESSIONS_IDS_NAMESPACE, session));
|
||||
}
|
||||
if let Some(user_id) = user_id {
|
||||
cmd.arg(format!("{}:{}", SESSIONS_USERS_NAMESPACE, user_id.0));
|
||||
}
|
||||
}
|
||||
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
redis
|
||||
.delete_many(
|
||||
clear_sessions
|
||||
.into_iter()
|
||||
.flat_map(|(id, session, user_id)| {
|
||||
[
|
||||
(SESSIONS_NAMESPACE, id.map(|i| i.0.to_string())),
|
||||
(SESSIONS_IDS_NAMESPACE, session),
|
||||
(SESSIONS_USERS_NAMESPACE, user_id.map(|i| i.0.to_string())),
|
||||
]
|
||||
}),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
use super::{ids::*, Organization, Project};
|
||||
use crate::models::teams::{OrganizationPermissions, ProjectPermissions};
|
||||
use crate::{
|
||||
database::redis::RedisPool,
|
||||
models::teams::{OrganizationPermissions, ProjectPermissions},
|
||||
};
|
||||
use itertools::Itertools;
|
||||
use redis::cmd;
|
||||
use rust_decimal::Decimal;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const TEAMS_NAMESPACE: &str = "teams";
|
||||
const DEFAULT_EXPIRY: i64 = 1800;
|
||||
|
||||
pub struct TeamBuilder {
|
||||
pub members: Vec<TeamMemberBuilder>,
|
||||
@@ -145,7 +146,7 @@ impl TeamMember {
|
||||
pub async fn get_from_team_full<'a, 'b, E>(
|
||||
id: TeamId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<TeamMember>, super::DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
|
||||
@@ -156,7 +157,7 @@ impl TeamMember {
|
||||
pub async fn get_from_team_full_many<'a, E>(
|
||||
team_ids: &[TeamId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<TeamMember>, super::DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
|
||||
@@ -169,18 +170,10 @@ impl TeamMember {
|
||||
|
||||
let mut team_ids_parsed: Vec<i64> = team_ids.iter().map(|x| x.0).collect();
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_teams = Vec::new();
|
||||
|
||||
let teams = cmd("MGET")
|
||||
.arg(
|
||||
team_ids_parsed
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", TEAMS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let teams = redis
|
||||
.multi_get::<String, _>(TEAMS_NAMESPACE, team_ids_parsed.clone())
|
||||
.await?;
|
||||
|
||||
for team_raw in teams {
|
||||
@@ -232,14 +225,14 @@ impl TeamMember {
|
||||
for (id, members) in &teams.into_iter().group_by(|x| x.team_id) {
|
||||
let mut members = members.collect::<Vec<_>>();
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", TEAMS_NAMESPACE, id.0))
|
||||
.arg(serde_json::to_string(&members)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
TEAMS_NAMESPACE,
|
||||
id.0,
|
||||
serde_json::to_string(&members)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
found_teams.append(&mut members);
|
||||
}
|
||||
}
|
||||
@@ -247,16 +240,8 @@ impl TeamMember {
|
||||
Ok(found_teams)
|
||||
}
|
||||
|
||||
pub async fn clear_cache(
|
||||
id: TeamId,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<(), super::DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
cmd("DEL")
|
||||
.arg(format!("{}:{}", TEAMS_NAMESPACE, id.0))
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
.await?;
|
||||
|
||||
pub async fn clear_cache(id: TeamId, redis: &RedisPool) -> Result<(), super::DatabaseError> {
|
||||
redis.delete(TEAMS_NAMESPACE, id.0).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -2,7 +2,7 @@ use super::ids::*;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::models::threads::{MessageBody, ThreadType};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::Deserialize;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
pub struct ThreadBuilder {
|
||||
pub type_: ThreadType,
|
||||
@@ -11,7 +11,7 @@ pub struct ThreadBuilder {
|
||||
pub report_id: Option<ReportId>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Serialize)]
|
||||
pub struct Thread {
|
||||
pub id: ThreadId,
|
||||
|
||||
@@ -30,7 +30,7 @@ pub struct ThreadMessageBuilder {
|
||||
pub thread_id: ThreadId,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
pub struct ThreadMessage {
|
||||
pub id: ThreadMessageId,
|
||||
pub thread_id: ThreadId,
|
||||
|
||||
@@ -1,17 +1,16 @@
|
||||
use super::ids::{ProjectId, UserId};
|
||||
use super::CollectionId;
|
||||
use crate::database::models::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::base62_impl::{parse_base62, to_base62};
|
||||
use crate::models::users::{Badges, RecipientType, RecipientWallet};
|
||||
use chrono::{DateTime, Utc};
|
||||
use redis::cmd;
|
||||
use rust_decimal::Decimal;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
const USERS_NAMESPACE: &str = "users";
|
||||
const USER_USERNAMES_NAMESPACE: &str = "users_usernames";
|
||||
// const USERS_PROJECTS_NAMESPACE: &str = "users_projects";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Deserialize, Serialize, Clone, Debug)]
|
||||
pub struct User {
|
||||
@@ -87,7 +86,7 @@ impl User {
|
||||
pub async fn get<'a, 'b, E>(
|
||||
string: &str,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<User>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -100,7 +99,7 @@ impl User {
|
||||
pub async fn get_id<'a, 'b, E>(
|
||||
id: UserId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<User>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -113,7 +112,7 @@ impl User {
|
||||
pub async fn get_many_ids<'a, E>(
|
||||
user_ids: &[UserId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<User>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -128,7 +127,7 @@ impl User {
|
||||
pub async fn get_many<'a, E, T: ToString>(
|
||||
users_strings: &[T],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<User>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -139,8 +138,6 @@ impl User {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_users = Vec::new();
|
||||
let mut remaining_strings = users_strings
|
||||
.iter()
|
||||
@@ -153,20 +150,11 @@ impl User {
|
||||
.collect::<Vec<_>>();
|
||||
|
||||
user_ids.append(
|
||||
&mut cmd("MGET")
|
||||
.arg(
|
||||
users_strings
|
||||
.iter()
|
||||
.map(|x| {
|
||||
format!(
|
||||
"{}:{}",
|
||||
USER_USERNAMES_NAMESPACE,
|
||||
x.to_string().to_lowercase()
|
||||
)
|
||||
})
|
||||
.collect::<Vec<_>>(),
|
||||
&mut redis
|
||||
.multi_get::<i64, _>(
|
||||
USER_USERNAMES_NAMESPACE,
|
||||
users_strings.iter().map(|x| x.to_string().to_lowercase()),
|
||||
)
|
||||
.query_async::<_, Vec<Option<i64>>>(&mut redis)
|
||||
.await?
|
||||
.into_iter()
|
||||
.flatten()
|
||||
@@ -174,16 +162,9 @@ impl User {
|
||||
);
|
||||
|
||||
if !user_ids.is_empty() {
|
||||
let users = cmd("MGET")
|
||||
.arg(
|
||||
user_ids
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", USERS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let users = redis
|
||||
.multi_get::<String, _>(USERS_NAMESPACE, user_ids)
|
||||
.await?;
|
||||
|
||||
for user in users {
|
||||
if let Some(user) = user.and_then(|x| serde_json::from_str::<User>(&x).ok()) {
|
||||
remaining_strings.retain(|x| {
|
||||
@@ -252,24 +233,21 @@ impl User {
|
||||
.await?;
|
||||
|
||||
for user in db_users {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", USERS_NAMESPACE, user.id.0))
|
||||
.arg(serde_json::to_string(&user)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
USERS_NAMESPACE,
|
||||
user.id.0,
|
||||
serde_json::to_string(&user)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!(
|
||||
"{}:{}",
|
||||
redis
|
||||
.set(
|
||||
USER_USERNAMES_NAMESPACE,
|
||||
user.username.to_lowercase()
|
||||
))
|
||||
.arg(user.id.0)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
user.username.to_lowercase(),
|
||||
user.id.0,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
found_users.push(user);
|
||||
}
|
||||
@@ -371,24 +349,19 @@ impl User {
|
||||
|
||||
pub async fn clear_caches(
|
||||
user_ids: &[(UserId, Option<String>)],
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
for (id, username) in user_ids {
|
||||
cmd.arg(format!("{}:{}", USERS_NAMESPACE, id.0));
|
||||
if let Some(username) = username {
|
||||
cmd.arg(format!(
|
||||
"{}:{}",
|
||||
USER_USERNAMES_NAMESPACE,
|
||||
username.to_lowercase()
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
redis
|
||||
.delete_many(user_ids.into_iter().flat_map(|(id, username)| {
|
||||
[
|
||||
(USERS_NAMESPACE, Some(id.0.to_string())),
|
||||
(
|
||||
USER_USERNAMES_NAMESPACE,
|
||||
username.clone().map(|i| i.to_lowercase()),
|
||||
),
|
||||
]
|
||||
}))
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -396,7 +369,7 @@ impl User {
|
||||
id: UserId,
|
||||
full: bool,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<()>, DatabaseError> {
|
||||
let user = Self::get_id(id, &mut *transaction, redis).await?;
|
||||
|
||||
|
||||
@@ -1,16 +1,16 @@
|
||||
use super::ids::*;
|
||||
use super::DatabaseError;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::projects::{FileType, VersionStatus};
|
||||
use chrono::{DateTime, Utc};
|
||||
use itertools::Itertools;
|
||||
use redis::cmd;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
use std::collections::HashMap;
|
||||
use std::iter;
|
||||
|
||||
const VERSIONS_NAMESPACE: &str = "versions";
|
||||
const VERSION_FILES_NAMESPACE: &str = "versions_files";
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct VersionBuilder {
|
||||
@@ -78,7 +78,7 @@ impl DependencyBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct VersionFileBuilder {
|
||||
pub url: String,
|
||||
pub filename: String,
|
||||
@@ -130,7 +130,7 @@ impl VersionFileBuilder {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HashBuilder {
|
||||
pub algorithm: String,
|
||||
pub hash: Vec<u8>,
|
||||
@@ -263,7 +263,7 @@ impl Version {
|
||||
|
||||
pub async fn remove_full(
|
||||
id: VersionId,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
) -> Result<Option<()>, DatabaseError> {
|
||||
let result = Self::get(id, &mut *transaction, redis).await?;
|
||||
@@ -398,7 +398,7 @@ impl Version {
|
||||
pub async fn get<'a, 'b, E>(
|
||||
id: VersionId,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<QueryVersion>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -411,7 +411,7 @@ impl Version {
|
||||
pub async fn get_many<'a, E>(
|
||||
version_ids: &[VersionId],
|
||||
exec: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<QueryVersion>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres>,
|
||||
@@ -424,18 +424,10 @@ impl Version {
|
||||
|
||||
let mut version_ids_parsed: Vec<i64> = version_ids.iter().map(|x| x.0).collect();
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_versions = Vec::new();
|
||||
|
||||
let versions = cmd("MGET")
|
||||
.arg(
|
||||
version_ids_parsed
|
||||
.iter()
|
||||
.map(|x| format!("{}:{}", VERSIONS_NAMESPACE, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
let versions = redis
|
||||
.multi_get::<String, _>(VERSIONS_NAMESPACE, version_ids_parsed.clone())
|
||||
.await?;
|
||||
|
||||
for version in versions {
|
||||
@@ -588,12 +580,13 @@ impl Version {
|
||||
.await?;
|
||||
|
||||
for version in db_versions {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", VERSIONS_NAMESPACE, version.inner.id.0))
|
||||
.arg(serde_json::to_string(&version)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
VERSIONS_NAMESPACE,
|
||||
version.inner.id.0,
|
||||
serde_json::to_string(&version)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
found_versions.push(version);
|
||||
@@ -608,7 +601,7 @@ impl Version {
|
||||
hash: String,
|
||||
version_id: Option<VersionId>,
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<SingleFile>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
|
||||
@@ -625,7 +618,7 @@ impl Version {
|
||||
algorithm: String,
|
||||
hashes: &[String],
|
||||
executor: E,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<SingleFile>, DatabaseError>
|
||||
where
|
||||
E: sqlx::Executor<'a, Database = sqlx::Postgres> + Copy,
|
||||
@@ -638,18 +631,16 @@ impl Version {
|
||||
|
||||
let mut file_ids_parsed = hashes.to_vec();
|
||||
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut found_files = Vec::new();
|
||||
|
||||
let files = cmd("MGET")
|
||||
.arg(
|
||||
let files = redis
|
||||
.multi_get::<String, _>(
|
||||
VERSION_FILES_NAMESPACE,
|
||||
file_ids_parsed
|
||||
.iter()
|
||||
.map(|hash| format!("{}:{}_{}", VERSION_FILES_NAMESPACE, algorithm, hash))
|
||||
.map(|hash| format!("{}_{}", algorithm, hash))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<String>>>(&mut redis)
|
||||
.await?;
|
||||
|
||||
for file in files {
|
||||
@@ -726,12 +717,13 @@ impl Version {
|
||||
}
|
||||
|
||||
for (key, mut files) in save_files {
|
||||
cmd("SET")
|
||||
.arg(format!("{}:{}", VERSION_FILES_NAMESPACE, key))
|
||||
.arg(serde_json::to_string(&files)?)
|
||||
.arg("EX")
|
||||
.arg(DEFAULT_EXPIRY)
|
||||
.query_async::<_, ()>(&mut redis)
|
||||
redis
|
||||
.set(
|
||||
VERSION_FILES_NAMESPACE,
|
||||
key,
|
||||
serde_json::to_string(&files)?,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
found_files.append(&mut files);
|
||||
@@ -743,22 +735,19 @@ impl Version {
|
||||
|
||||
pub async fn clear_cache(
|
||||
version: &QueryVersion,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
let mut redis = redis.get().await?;
|
||||
|
||||
let mut cmd = cmd("DEL");
|
||||
|
||||
cmd.arg(format!("{}:{}", VERSIONS_NAMESPACE, version.inner.id.0));
|
||||
|
||||
for file in &version.files {
|
||||
for (algo, hash) in &file.hashes {
|
||||
cmd.arg(format!("{}:{}_{}", VERSION_FILES_NAMESPACE, algo, hash));
|
||||
}
|
||||
}
|
||||
|
||||
cmd.query_async::<_, ()>(&mut redis).await?;
|
||||
|
||||
redis
|
||||
.delete_many(
|
||||
iter::once((VERSIONS_NAMESPACE, Some(version.inner.id.0.to_string()))).chain(
|
||||
version.files.iter().flat_map(|file| {
|
||||
file.hashes.iter().map(|(algo, hash)| {
|
||||
(VERSION_FILES_NAMESPACE, Some(format!("{}_{}", algo, hash)))
|
||||
})
|
||||
}),
|
||||
),
|
||||
)
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
128
src/database/redis.rs
Normal file
128
src/database/redis.rs
Normal file
@@ -0,0 +1,128 @@
|
||||
use super::models::DatabaseError;
|
||||
use deadpool_redis::{Config, Runtime};
|
||||
use redis::{cmd, FromRedisValue, ToRedisArgs};
|
||||
use std::fmt::Display;
|
||||
|
||||
const DEFAULT_EXPIRY: i64 = 1800; // 30 minutes
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct RedisPool {
|
||||
pool: deadpool_redis::Pool,
|
||||
meta_namespace: String,
|
||||
}
|
||||
|
||||
impl RedisPool {
|
||||
// initiate a new redis pool
|
||||
// testing pool uses a hashmap to mimic redis behaviour for very small data sizes (ie: tests)
|
||||
// PANICS: production pool will panic if redis url is not set
|
||||
pub fn new(meta_namespace: Option<String>) -> Self {
|
||||
let redis_pool = Config::from_url(dotenvy::var("REDIS_URL").expect("Redis URL not set"))
|
||||
.builder()
|
||||
.expect("Error building Redis pool")
|
||||
.max_size(
|
||||
dotenvy::var("DATABASE_MAX_CONNECTIONS")
|
||||
.ok()
|
||||
.and_then(|x| x.parse().ok())
|
||||
.unwrap_or(10000),
|
||||
)
|
||||
.runtime(Runtime::Tokio1)
|
||||
.build()
|
||||
.expect("Redis connection failed");
|
||||
|
||||
RedisPool {
|
||||
pool: redis_pool,
|
||||
meta_namespace: meta_namespace.unwrap_or("".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn set<T1, T2>(
|
||||
&self,
|
||||
namespace: &str,
|
||||
id: T1,
|
||||
data: T2,
|
||||
expiry: Option<i64>,
|
||||
) -> Result<(), DatabaseError>
|
||||
where
|
||||
T1: Display,
|
||||
T2: ToRedisArgs,
|
||||
{
|
||||
let mut redis_connection = self.pool.get().await?;
|
||||
|
||||
cmd("SET")
|
||||
.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id))
|
||||
.arg(data)
|
||||
.arg("EX")
|
||||
.arg(expiry.unwrap_or(DEFAULT_EXPIRY))
|
||||
.query_async::<_, ()>(&mut redis_connection)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get<R, T1>(&self, namespace: &str, id: T1) -> Result<Option<R>, DatabaseError>
|
||||
where
|
||||
T1: Display,
|
||||
R: FromRedisValue,
|
||||
{
|
||||
let mut redis_connection = self.pool.get().await?;
|
||||
|
||||
let res = cmd("GET")
|
||||
.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id))
|
||||
.query_async::<_, Option<R>>(&mut redis_connection)
|
||||
.await?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn multi_get<R, T1>(
|
||||
&self,
|
||||
namespace: &str,
|
||||
ids: impl IntoIterator<Item = T1>,
|
||||
) -> Result<Vec<Option<R>>, DatabaseError>
|
||||
where
|
||||
T1: Display,
|
||||
R: FromRedisValue,
|
||||
{
|
||||
let mut redis_connection = self.pool.get().await?;
|
||||
let res = cmd("MGET")
|
||||
.arg(
|
||||
ids.into_iter()
|
||||
.map(|x| format!("{}_{}:{}", self.meta_namespace, namespace, x))
|
||||
.collect::<Vec<_>>(),
|
||||
)
|
||||
.query_async::<_, Vec<Option<R>>>(&mut redis_connection)
|
||||
.await?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
pub async fn delete<T1>(&self, namespace: &str, id: T1) -> Result<(), DatabaseError>
|
||||
where
|
||||
T1: Display,
|
||||
{
|
||||
let mut redis_connection = self.pool.get().await?;
|
||||
|
||||
cmd("DEL")
|
||||
.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id))
|
||||
.query_async::<_, ()>(&mut redis_connection)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn delete_many(
|
||||
&self,
|
||||
iter: impl IntoIterator<Item = (&str, Option<String>)>,
|
||||
) -> Result<(), DatabaseError>
|
||||
where {
|
||||
let mut redis_connection = self.pool.get().await?;
|
||||
|
||||
let mut cmd = cmd("DEL");
|
||||
for (namespace, id) in iter {
|
||||
if let Some(id) = id {
|
||||
cmd.arg(format!("{}_{}:{}", self.meta_namespace, namespace, id));
|
||||
}
|
||||
}
|
||||
cmd.query_async::<_, ()>(&mut redis_connection).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
413
src/lib.rs
Normal file
413
src/lib.rs
Normal file
@@ -0,0 +1,413 @@
|
||||
use std::sync::Arc;
|
||||
|
||||
use actix_web::web;
|
||||
use database::redis::RedisPool;
|
||||
use log::{info, warn};
|
||||
use queue::{
|
||||
analytics::AnalyticsQueue, download::DownloadQueue, payouts::PayoutsQueue, session::AuthQueue,
|
||||
socket::ActiveSockets,
|
||||
};
|
||||
use scheduler::Scheduler;
|
||||
use sqlx::Postgres;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
|
||||
extern crate clickhouse as clickhouse_crate;
|
||||
use clickhouse_crate::Client;
|
||||
use util::cors::default_cors;
|
||||
|
||||
use crate::{
|
||||
queue::payouts::process_payout,
|
||||
search::indexing::index_projects,
|
||||
util::env::{parse_strings_from_var, parse_var},
|
||||
};
|
||||
|
||||
pub mod auth;
|
||||
pub mod clickhouse;
|
||||
pub mod database;
|
||||
pub mod file_hosting;
|
||||
pub mod models;
|
||||
pub mod queue;
|
||||
pub mod ratelimit;
|
||||
pub mod routes;
|
||||
pub mod scheduler;
|
||||
pub mod search;
|
||||
pub mod util;
|
||||
pub mod validate;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Pepper {
|
||||
pub pepper: String,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct LabrinthConfig {
|
||||
pub pool: sqlx::Pool<Postgres>,
|
||||
pub redis_pool: RedisPool,
|
||||
pub clickhouse: Client,
|
||||
pub file_host: Arc<dyn file_hosting::FileHost + Send + Sync>,
|
||||
pub maxmind: Arc<queue::maxmind::MaxMindIndexer>,
|
||||
pub scheduler: Arc<Scheduler>,
|
||||
pub ip_salt: Pepper,
|
||||
pub search_config: search::SearchConfig,
|
||||
pub download_queue: web::Data<DownloadQueue>,
|
||||
pub session_queue: web::Data<AuthQueue>,
|
||||
pub payouts_queue: web::Data<Mutex<PayoutsQueue>>,
|
||||
pub analytics_queue: Arc<AnalyticsQueue>,
|
||||
pub active_sockets: web::Data<RwLock<ActiveSockets>>,
|
||||
}
|
||||
|
||||
pub fn app_setup(
|
||||
pool: sqlx::Pool<Postgres>,
|
||||
redis_pool: RedisPool,
|
||||
clickhouse: &mut Client,
|
||||
file_host: Arc<dyn file_hosting::FileHost + Send + Sync>,
|
||||
maxmind: Arc<queue::maxmind::MaxMindIndexer>,
|
||||
) -> LabrinthConfig {
|
||||
info!(
|
||||
"Starting Labrinth on {}",
|
||||
dotenvy::var("BIND_ADDR").unwrap()
|
||||
);
|
||||
|
||||
let search_config = search::SearchConfig {
|
||||
address: dotenvy::var("MEILISEARCH_ADDR").unwrap(),
|
||||
key: dotenvy::var("MEILISEARCH_KEY").unwrap(),
|
||||
};
|
||||
|
||||
let mut scheduler = scheduler::Scheduler::new();
|
||||
|
||||
// The interval in seconds at which the local database is indexed
|
||||
// for searching. Defaults to 1 hour if unset.
|
||||
let local_index_interval =
|
||||
std::time::Duration::from_secs(parse_var("LOCAL_INDEX_INTERVAL").unwrap_or(3600));
|
||||
|
||||
let pool_ref = pool.clone();
|
||||
let search_config_ref = search_config.clone();
|
||||
scheduler.run(local_index_interval, move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let search_config_ref = search_config_ref.clone();
|
||||
async move {
|
||||
info!("Indexing local database");
|
||||
let result = index_projects(pool_ref, &search_config_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Local project indexing failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing local database");
|
||||
}
|
||||
});
|
||||
|
||||
// Changes statuses of scheduled projects/versions
|
||||
let pool_ref = pool.clone();
|
||||
// TODO: Clear cache when these are run
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
info!("Releasing scheduled versions/projects!");
|
||||
|
||||
async move {
|
||||
let projects_results = sqlx::query!(
|
||||
"
|
||||
UPDATE mods
|
||||
SET status = requested_status
|
||||
WHERE status = $1 AND approved < CURRENT_DATE AND requested_status IS NOT NULL
|
||||
",
|
||||
crate::models::projects::ProjectStatus::Scheduled.as_str(),
|
||||
)
|
||||
.execute(&pool_ref)
|
||||
.await;
|
||||
|
||||
if let Err(e) = projects_results {
|
||||
warn!("Syncing scheduled releases for projects failed: {:?}", e);
|
||||
}
|
||||
|
||||
let versions_results = sqlx::query!(
|
||||
"
|
||||
UPDATE versions
|
||||
SET status = requested_status
|
||||
WHERE status = $1 AND date_published < CURRENT_DATE AND requested_status IS NOT NULL
|
||||
",
|
||||
crate::models::projects::VersionStatus::Scheduled.as_str(),
|
||||
)
|
||||
.execute(&pool_ref)
|
||||
.await;
|
||||
|
||||
if let Err(e) = versions_results {
|
||||
warn!("Syncing scheduled releases for versions failed: {:?}", e);
|
||||
}
|
||||
|
||||
info!("Finished releasing scheduled versions/projects");
|
||||
}
|
||||
});
|
||||
|
||||
scheduler::schedule_versions(&mut scheduler, pool.clone());
|
||||
|
||||
let download_queue = web::Data::new(DownloadQueue::new());
|
||||
|
||||
let pool_ref = pool.clone();
|
||||
let download_queue_ref = download_queue.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let download_queue_ref = download_queue_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Indexing download queue");
|
||||
let result = download_queue_ref.index(&pool_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Indexing download queue failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing download queue");
|
||||
}
|
||||
});
|
||||
|
||||
let session_queue = web::Data::new(AuthQueue::new());
|
||||
|
||||
let pool_ref = pool.clone();
|
||||
let redis_ref = redis_pool.clone();
|
||||
let session_queue_ref = session_queue.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 30), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let redis_ref = redis_ref.clone();
|
||||
let session_queue_ref = session_queue_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Indexing sessions queue");
|
||||
let result = session_queue_ref.index(&pool_ref, &redis_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Indexing sessions queue failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing sessions queue");
|
||||
}
|
||||
});
|
||||
|
||||
let reader = maxmind.clone();
|
||||
{
|
||||
let reader_ref = reader.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 60 * 24), move || {
|
||||
let reader_ref = reader_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Downloading MaxMind GeoLite2 country database");
|
||||
let result = reader_ref.index().await;
|
||||
if let Err(e) = result {
|
||||
warn!(
|
||||
"Downloading MaxMind GeoLite2 country database failed: {:?}",
|
||||
e
|
||||
);
|
||||
}
|
||||
info!("Done downloading MaxMind GeoLite2 country database");
|
||||
}
|
||||
});
|
||||
}
|
||||
info!("Downloading MaxMind GeoLite2 country database");
|
||||
|
||||
let analytics_queue = Arc::new(AnalyticsQueue::new());
|
||||
{
|
||||
let client_ref = clickhouse.clone();
|
||||
let analytics_queue_ref = analytics_queue.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
||||
let client_ref = client_ref.clone();
|
||||
let analytics_queue_ref = analytics_queue_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Indexing analytics queue");
|
||||
let result = analytics_queue_ref.index(client_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Indexing analytics queue failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing analytics queue");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let pool_ref = pool.clone();
|
||||
let redis_ref = redis_pool.clone();
|
||||
let client_ref = clickhouse.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 60 * 6), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let redis_ref = redis_ref.clone();
|
||||
let client_ref = client_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Started running payouts");
|
||||
let result = process_payout(&pool_ref, &redis_ref, &client_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Payouts run failed: {:?}", e);
|
||||
}
|
||||
info!("Done running payouts");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let ip_salt = Pepper {
|
||||
pepper: models::ids::Base62Id(models::ids::random_base62(11)).to_string(),
|
||||
};
|
||||
|
||||
let payouts_queue = web::Data::new(Mutex::new(PayoutsQueue::new()));
|
||||
let active_sockets = web::Data::new(RwLock::new(ActiveSockets::default()));
|
||||
|
||||
LabrinthConfig {
|
||||
pool,
|
||||
redis_pool,
|
||||
clickhouse: clickhouse.clone(),
|
||||
file_host,
|
||||
maxmind,
|
||||
scheduler: Arc::new(scheduler),
|
||||
ip_salt,
|
||||
download_queue,
|
||||
search_config,
|
||||
session_queue,
|
||||
payouts_queue,
|
||||
analytics_queue,
|
||||
active_sockets,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn app_config(cfg: &mut web::ServiceConfig, labrinth_config: LabrinthConfig) {
|
||||
cfg.app_data(
|
||||
web::FormConfig::default()
|
||||
.error_handler(|err, _req| routes::ApiError::Validation(err.to_string()).into()),
|
||||
)
|
||||
.app_data(
|
||||
web::PathConfig::default()
|
||||
.error_handler(|err, _req| routes::ApiError::Validation(err.to_string()).into()),
|
||||
)
|
||||
.app_data(
|
||||
web::QueryConfig::default()
|
||||
.error_handler(|err, _req| routes::ApiError::Validation(err.to_string()).into()),
|
||||
)
|
||||
.app_data(
|
||||
web::JsonConfig::default()
|
||||
.error_handler(|err, _req| routes::ApiError::Validation(err.to_string()).into()),
|
||||
)
|
||||
.app_data(web::Data::new(labrinth_config.redis_pool.clone()))
|
||||
.app_data(web::Data::new(labrinth_config.pool.clone()))
|
||||
.app_data(web::Data::new(labrinth_config.file_host.clone()))
|
||||
.app_data(web::Data::new(labrinth_config.search_config.clone()))
|
||||
.app_data(labrinth_config.download_queue.clone())
|
||||
.app_data(labrinth_config.session_queue.clone())
|
||||
.app_data(labrinth_config.payouts_queue.clone())
|
||||
.app_data(web::Data::new(labrinth_config.ip_salt.clone()))
|
||||
.app_data(web::Data::new(labrinth_config.analytics_queue.clone()))
|
||||
.app_data(web::Data::new(labrinth_config.clickhouse.clone()))
|
||||
.app_data(web::Data::new(labrinth_config.maxmind.clone()))
|
||||
.app_data(labrinth_config.active_sockets.clone())
|
||||
.configure(routes::v2::config)
|
||||
.configure(routes::v3::config)
|
||||
.configure(routes::root_config)
|
||||
.default_service(web::get().wrap(default_cors()).to(routes::not_found));
|
||||
}
|
||||
|
||||
// This is so that env vars not used immediately don't panic at runtime
|
||||
pub fn check_env_vars() -> bool {
|
||||
let mut failed = false;
|
||||
|
||||
fn check_var<T: std::str::FromStr>(var: &'static str) -> bool {
|
||||
let check = parse_var::<T>(var).is_none();
|
||||
if check {
|
||||
warn!(
|
||||
"Variable `{}` missing in dotenv or not of type `{}`",
|
||||
var,
|
||||
std::any::type_name::<T>()
|
||||
);
|
||||
}
|
||||
check
|
||||
}
|
||||
|
||||
failed |= check_var::<String>("SITE_URL");
|
||||
failed |= check_var::<String>("CDN_URL");
|
||||
failed |= check_var::<String>("LABRINTH_ADMIN_KEY");
|
||||
failed |= check_var::<String>("RATE_LIMIT_IGNORE_KEY");
|
||||
failed |= check_var::<String>("DATABASE_URL");
|
||||
failed |= check_var::<String>("MEILISEARCH_ADDR");
|
||||
failed |= check_var::<String>("MEILISEARCH_KEY");
|
||||
failed |= check_var::<String>("REDIS_URL");
|
||||
failed |= check_var::<String>("BIND_ADDR");
|
||||
failed |= check_var::<String>("SELF_ADDR");
|
||||
|
||||
failed |= check_var::<String>("STORAGE_BACKEND");
|
||||
|
||||
let storage_backend = dotenvy::var("STORAGE_BACKEND").ok();
|
||||
match storage_backend.as_deref() {
|
||||
Some("backblaze") => {
|
||||
failed |= check_var::<String>("BACKBLAZE_KEY_ID");
|
||||
failed |= check_var::<String>("BACKBLAZE_KEY");
|
||||
failed |= check_var::<String>("BACKBLAZE_BUCKET_ID");
|
||||
}
|
||||
Some("s3") => {
|
||||
failed |= check_var::<String>("S3_ACCESS_TOKEN");
|
||||
failed |= check_var::<String>("S3_SECRET");
|
||||
failed |= check_var::<String>("S3_URL");
|
||||
failed |= check_var::<String>("S3_REGION");
|
||||
failed |= check_var::<String>("S3_BUCKET_NAME");
|
||||
}
|
||||
Some("local") => {
|
||||
failed |= check_var::<String>("MOCK_FILE_PATH");
|
||||
}
|
||||
Some(backend) => {
|
||||
warn!("Variable `STORAGE_BACKEND` contains an invalid value: {}. Expected \"backblaze\", \"s3\", or \"local\".", backend);
|
||||
failed |= true;
|
||||
}
|
||||
_ => {
|
||||
warn!("Variable `STORAGE_BACKEND` is not set!");
|
||||
failed |= true;
|
||||
}
|
||||
}
|
||||
|
||||
failed |= check_var::<usize>("LOCAL_INDEX_INTERVAL");
|
||||
failed |= check_var::<usize>("VERSION_INDEX_INTERVAL");
|
||||
|
||||
if parse_strings_from_var("WHITELISTED_MODPACK_DOMAINS").is_none() {
|
||||
warn!("Variable `WHITELISTED_MODPACK_DOMAINS` missing in dotenv or not a json array of strings");
|
||||
failed |= true;
|
||||
}
|
||||
|
||||
if parse_strings_from_var("ALLOWED_CALLBACK_URLS").is_none() {
|
||||
warn!("Variable `ALLOWED_CALLBACK_URLS` missing in dotenv or not a json array of strings");
|
||||
failed |= true;
|
||||
}
|
||||
|
||||
failed |= check_var::<String>("PAYPAL_API_URL");
|
||||
failed |= check_var::<String>("PAYPAL_CLIENT_ID");
|
||||
failed |= check_var::<String>("PAYPAL_CLIENT_SECRET");
|
||||
|
||||
failed |= check_var::<String>("GITHUB_CLIENT_ID");
|
||||
failed |= check_var::<String>("GITHUB_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("GITLAB_CLIENT_ID");
|
||||
failed |= check_var::<String>("GITLAB_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("DISCORD_CLIENT_ID");
|
||||
failed |= check_var::<String>("DISCORD_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("MICROSOFT_CLIENT_ID");
|
||||
failed |= check_var::<String>("MICROSOFT_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("GOOGLE_CLIENT_ID");
|
||||
failed |= check_var::<String>("GOOGLE_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("STEAM_API_KEY");
|
||||
|
||||
failed |= check_var::<String>("TURNSTILE_SECRET");
|
||||
|
||||
failed |= check_var::<String>("SMTP_USERNAME");
|
||||
failed |= check_var::<String>("SMTP_PASSWORD");
|
||||
failed |= check_var::<String>("SMTP_HOST");
|
||||
|
||||
failed |= check_var::<String>("SITE_VERIFY_EMAIL_PATH");
|
||||
failed |= check_var::<String>("SITE_RESET_PASSWORD_PATH");
|
||||
|
||||
failed |= check_var::<String>("BEEHIIV_PUBLICATION_ID");
|
||||
failed |= check_var::<String>("BEEHIIV_API_KEY");
|
||||
|
||||
if parse_strings_from_var("ANALYTICS_ALLOWED_ORIGINS").is_none() {
|
||||
warn!(
|
||||
"Variable `ANALYTICS_ALLOWED_ORIGINS` missing in dotenv or not a json array of strings"
|
||||
);
|
||||
failed |= true;
|
||||
}
|
||||
|
||||
failed |= check_var::<String>("CLICKHOUSE_URL");
|
||||
failed |= check_var::<String>("CLICKHOUSE_USER");
|
||||
failed |= check_var::<String>("CLICKHOUSE_PASSWORD");
|
||||
failed |= check_var::<String>("CLICKHOUSE_DATABASE");
|
||||
|
||||
failed |= check_var::<String>("MAXMIND_LICENSE_KEY");
|
||||
|
||||
failed |= check_var::<u64>("PAYOUTS_BUDGET");
|
||||
|
||||
failed
|
||||
}
|
||||
391
src/main.rs
391
src/main.rs
@@ -1,34 +1,15 @@
|
||||
use crate::file_hosting::S3Host;
|
||||
use crate::queue::analytics::AnalyticsQueue;
|
||||
use crate::queue::download::DownloadQueue;
|
||||
use crate::queue::payouts::{process_payout, PayoutsQueue};
|
||||
use crate::queue::session::AuthQueue;
|
||||
use crate::queue::socket::ActiveSockets;
|
||||
use crate::ratelimit::errors::ARError;
|
||||
use crate::ratelimit::memory::{MemoryStore, MemoryStoreActor};
|
||||
use crate::ratelimit::middleware::RateLimiter;
|
||||
use crate::util::cors::default_cors;
|
||||
use crate::util::env::{parse_strings_from_var, parse_var};
|
||||
use actix_web::{web, App, HttpServer};
|
||||
use deadpool_redis::{Config, Runtime};
|
||||
use actix_web::{App, HttpServer};
|
||||
use env_logger::Env;
|
||||
use log::{error, info, warn};
|
||||
use search::indexing::index_projects;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use labrinth::database::redis::RedisPool;
|
||||
use labrinth::file_hosting::S3Host;
|
||||
use labrinth::ratelimit::errors::ARError;
|
||||
use labrinth::ratelimit::memory::{MemoryStore, MemoryStoreActor};
|
||||
use labrinth::ratelimit::middleware::RateLimiter;
|
||||
use labrinth::util::env::parse_var;
|
||||
use labrinth::{check_env_vars, clickhouse, database, file_hosting, queue};
|
||||
use log::{error, info};
|
||||
|
||||
mod auth;
|
||||
mod clickhouse;
|
||||
mod database;
|
||||
mod file_hosting;
|
||||
mod models;
|
||||
mod queue;
|
||||
mod ratelimit;
|
||||
mod routes;
|
||||
mod scheduler;
|
||||
mod search;
|
||||
mod util;
|
||||
mod validate;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct Pepper {
|
||||
@@ -63,11 +44,6 @@ async fn main() -> std::io::Result<()> {
|
||||
dotenvy::var("BIND_ADDR").unwrap()
|
||||
);
|
||||
|
||||
let search_config = search::SearchConfig {
|
||||
address: dotenvy::var("MEILISEARCH_ADDR").unwrap(),
|
||||
key: dotenvy::var("MEILISEARCH_KEY").unwrap(),
|
||||
};
|
||||
|
||||
database::check_for_migrations()
|
||||
.await
|
||||
.expect("An error occurred while running migrations.");
|
||||
@@ -78,18 +54,7 @@ async fn main() -> std::io::Result<()> {
|
||||
.expect("Database connection failed");
|
||||
|
||||
// Redis connector
|
||||
let redis_pool = Config::from_url(dotenvy::var("REDIS_URL").expect("Redis URL not set"))
|
||||
.builder()
|
||||
.expect("Error building Redis pool")
|
||||
.max_size(
|
||||
dotenvy::var("DATABASE_MAX_CONNECTIONS")
|
||||
.ok()
|
||||
.and_then(|x| x.parse().ok())
|
||||
.unwrap_or(10000),
|
||||
)
|
||||
.runtime(Runtime::Tokio1)
|
||||
.build()
|
||||
.expect("Redis connection failed");
|
||||
let redis_pool = RedisPool::new(None);
|
||||
|
||||
let storage_backend = dotenvy::var("STORAGE_BACKEND").unwrap_or_else(|_| "local".to_string());
|
||||
|
||||
@@ -116,184 +81,23 @@ async fn main() -> std::io::Result<()> {
|
||||
_ => panic!("Invalid storage backend specified. Aborting startup!"),
|
||||
};
|
||||
|
||||
let mut scheduler = scheduler::Scheduler::new();
|
||||
|
||||
// The interval in seconds at which the local database is indexed
|
||||
// for searching. Defaults to 1 hour if unset.
|
||||
let local_index_interval =
|
||||
std::time::Duration::from_secs(parse_var("LOCAL_INDEX_INTERVAL").unwrap_or(3600));
|
||||
|
||||
let pool_ref = pool.clone();
|
||||
let search_config_ref = search_config.clone();
|
||||
scheduler.run(local_index_interval, move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let search_config_ref = search_config_ref.clone();
|
||||
async move {
|
||||
info!("Indexing local database");
|
||||
let result = index_projects(pool_ref, &search_config_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Local project indexing failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing local database");
|
||||
}
|
||||
});
|
||||
|
||||
// Changes statuses of scheduled projects/versions
|
||||
let pool_ref = pool.clone();
|
||||
// TODO: Clear cache when these are run
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
info!("Releasing scheduled versions/projects!");
|
||||
|
||||
async move {
|
||||
let projects_results = sqlx::query!(
|
||||
"
|
||||
UPDATE mods
|
||||
SET status = requested_status
|
||||
WHERE status = $1 AND approved < CURRENT_DATE AND requested_status IS NOT NULL
|
||||
",
|
||||
crate::models::projects::ProjectStatus::Scheduled.as_str(),
|
||||
)
|
||||
.execute(&pool_ref)
|
||||
.await;
|
||||
|
||||
if let Err(e) = projects_results {
|
||||
warn!("Syncing scheduled releases for projects failed: {:?}", e);
|
||||
}
|
||||
|
||||
let versions_results = sqlx::query!(
|
||||
"
|
||||
UPDATE versions
|
||||
SET status = requested_status
|
||||
WHERE status = $1 AND date_published < CURRENT_DATE AND requested_status IS NOT NULL
|
||||
",
|
||||
crate::models::projects::VersionStatus::Scheduled.as_str(),
|
||||
)
|
||||
.execute(&pool_ref)
|
||||
.await;
|
||||
|
||||
if let Err(e) = versions_results {
|
||||
warn!("Syncing scheduled releases for versions failed: {:?}", e);
|
||||
}
|
||||
|
||||
info!("Finished releasing scheduled versions/projects");
|
||||
}
|
||||
});
|
||||
|
||||
scheduler::schedule_versions(&mut scheduler, pool.clone());
|
||||
|
||||
let download_queue = web::Data::new(DownloadQueue::new());
|
||||
|
||||
let pool_ref = pool.clone();
|
||||
let download_queue_ref = download_queue.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let download_queue_ref = download_queue_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Indexing download queue");
|
||||
let result = download_queue_ref.index(&pool_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Indexing download queue failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing download queue");
|
||||
}
|
||||
});
|
||||
|
||||
let session_queue = web::Data::new(AuthQueue::new());
|
||||
|
||||
let pool_ref = pool.clone();
|
||||
let redis_ref = redis_pool.clone();
|
||||
let session_queue_ref = session_queue.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 30), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let redis_ref = redis_ref.clone();
|
||||
let session_queue_ref = session_queue_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Indexing sessions queue");
|
||||
let result = session_queue_ref.index(&pool_ref, &redis_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Indexing sessions queue failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing sessions queue");
|
||||
}
|
||||
});
|
||||
|
||||
info!("Initializing clickhouse connection");
|
||||
let clickhouse = clickhouse::init_client().await.unwrap();
|
||||
let mut clickhouse = clickhouse::init_client().await.unwrap();
|
||||
|
||||
let reader = Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
||||
{
|
||||
let reader_ref = reader.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 60 * 24), move || {
|
||||
let reader_ref = reader_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Downloading MaxMind GeoLite2 country database");
|
||||
let result = reader_ref.index().await;
|
||||
if let Err(e) = result {
|
||||
warn!(
|
||||
"Downloading MaxMind GeoLite2 country database failed: {:?}",
|
||||
e
|
||||
);
|
||||
}
|
||||
info!("Done downloading MaxMind GeoLite2 country database");
|
||||
}
|
||||
});
|
||||
}
|
||||
info!("Downloading MaxMind GeoLite2 country database");
|
||||
|
||||
let analytics_queue = Arc::new(AnalyticsQueue::new());
|
||||
{
|
||||
let client_ref = clickhouse.clone();
|
||||
let analytics_queue_ref = analytics_queue.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 5), move || {
|
||||
let client_ref = client_ref.clone();
|
||||
let analytics_queue_ref = analytics_queue_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Indexing analytics queue");
|
||||
let result = analytics_queue_ref.index(client_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Indexing analytics queue failed: {:?}", e);
|
||||
}
|
||||
info!("Done indexing analytics queue");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let pool_ref = pool.clone();
|
||||
let redis_ref = redis_pool.clone();
|
||||
let client_ref = clickhouse.clone();
|
||||
scheduler.run(std::time::Duration::from_secs(60 * 60 * 6), move || {
|
||||
let pool_ref = pool_ref.clone();
|
||||
let redis_ref = redis_ref.clone();
|
||||
let client_ref = client_ref.clone();
|
||||
|
||||
async move {
|
||||
info!("Started running payouts");
|
||||
let result = process_payout(&pool_ref, &redis_ref, &client_ref).await;
|
||||
if let Err(e) = result {
|
||||
warn!("Payouts run failed: {:?}", e);
|
||||
}
|
||||
info!("Done running payouts");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let ip_salt = Pepper {
|
||||
pepper: models::ids::Base62Id(models::ids::random_base62(11)).to_string(),
|
||||
};
|
||||
|
||||
let payouts_queue = web::Data::new(Mutex::new(PayoutsQueue::new()));
|
||||
let active_sockets = web::Data::new(RwLock::new(ActiveSockets::default()));
|
||||
let maxmind_reader = Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
||||
|
||||
let store = MemoryStore::new();
|
||||
|
||||
info!("Starting Actix HTTP server!");
|
||||
|
||||
let labrinth_config = labrinth::app_setup(
|
||||
pool.clone(),
|
||||
redis_pool.clone(),
|
||||
&mut clickhouse,
|
||||
file_host.clone(),
|
||||
maxmind_reader.clone(),
|
||||
);
|
||||
|
||||
// Init App
|
||||
HttpServer::new(move || {
|
||||
App::new()
|
||||
@@ -320,160 +124,9 @@ async fn main() -> std::io::Result<()> {
|
||||
.with_ignore_key(dotenvy::var("RATE_LIMIT_IGNORE_KEY").ok()),
|
||||
)
|
||||
.wrap(sentry_actix::Sentry::new())
|
||||
.app_data(
|
||||
web::FormConfig::default().error_handler(|err, _req| {
|
||||
routes::ApiError::Validation(err.to_string()).into()
|
||||
}),
|
||||
)
|
||||
.app_data(
|
||||
web::PathConfig::default().error_handler(|err, _req| {
|
||||
routes::ApiError::Validation(err.to_string()).into()
|
||||
}),
|
||||
)
|
||||
.app_data(
|
||||
web::QueryConfig::default().error_handler(|err, _req| {
|
||||
routes::ApiError::Validation(err.to_string()).into()
|
||||
}),
|
||||
)
|
||||
.app_data(
|
||||
web::JsonConfig::default().error_handler(|err, _req| {
|
||||
routes::ApiError::Validation(err.to_string()).into()
|
||||
}),
|
||||
)
|
||||
.app_data(web::Data::new(redis_pool.clone()))
|
||||
.app_data(web::Data::new(pool.clone()))
|
||||
.app_data(web::Data::new(file_host.clone()))
|
||||
.app_data(web::Data::new(search_config.clone()))
|
||||
.app_data(download_queue.clone())
|
||||
.app_data(session_queue.clone())
|
||||
.app_data(payouts_queue.clone())
|
||||
.app_data(web::Data::new(ip_salt.clone()))
|
||||
.app_data(web::Data::new(analytics_queue.clone()))
|
||||
.app_data(web::Data::new(clickhouse.clone()))
|
||||
.app_data(web::Data::new(reader.clone()))
|
||||
.app_data(active_sockets.clone())
|
||||
.configure(routes::v2::config)
|
||||
.configure(routes::v3::config)
|
||||
.configure(routes::root_config)
|
||||
.default_service(web::get().wrap(default_cors()).to(routes::not_found))
|
||||
.configure(|cfg| labrinth::app_config(cfg, labrinth_config.clone()))
|
||||
})
|
||||
.bind(dotenvy::var("BIND_ADDR").unwrap())?
|
||||
.run()
|
||||
.await
|
||||
}
|
||||
|
||||
// This is so that env vars not used immediately don't panic at runtime
|
||||
fn check_env_vars() -> bool {
|
||||
let mut failed = false;
|
||||
|
||||
fn check_var<T: std::str::FromStr>(var: &'static str) -> bool {
|
||||
let check = parse_var::<T>(var).is_none();
|
||||
if check {
|
||||
warn!(
|
||||
"Variable `{}` missing in dotenv or not of type `{}`",
|
||||
var,
|
||||
std::any::type_name::<T>()
|
||||
);
|
||||
}
|
||||
check
|
||||
}
|
||||
|
||||
failed |= check_var::<String>("SITE_URL");
|
||||
failed |= check_var::<String>("CDN_URL");
|
||||
failed |= check_var::<String>("LABRINTH_ADMIN_KEY");
|
||||
failed |= check_var::<String>("RATE_LIMIT_IGNORE_KEY");
|
||||
failed |= check_var::<String>("DATABASE_URL");
|
||||
failed |= check_var::<String>("MEILISEARCH_ADDR");
|
||||
failed |= check_var::<String>("MEILISEARCH_KEY");
|
||||
failed |= check_var::<String>("REDIS_URL");
|
||||
failed |= check_var::<String>("BIND_ADDR");
|
||||
failed |= check_var::<String>("SELF_ADDR");
|
||||
|
||||
failed |= check_var::<String>("STORAGE_BACKEND");
|
||||
|
||||
let storage_backend = dotenvy::var("STORAGE_BACKEND").ok();
|
||||
match storage_backend.as_deref() {
|
||||
Some("backblaze") => {
|
||||
failed |= check_var::<String>("BACKBLAZE_KEY_ID");
|
||||
failed |= check_var::<String>("BACKBLAZE_KEY");
|
||||
failed |= check_var::<String>("BACKBLAZE_BUCKET_ID");
|
||||
}
|
||||
Some("s3") => {
|
||||
failed |= check_var::<String>("S3_ACCESS_TOKEN");
|
||||
failed |= check_var::<String>("S3_SECRET");
|
||||
failed |= check_var::<String>("S3_URL");
|
||||
failed |= check_var::<String>("S3_REGION");
|
||||
failed |= check_var::<String>("S3_BUCKET_NAME");
|
||||
}
|
||||
Some("local") => {
|
||||
failed |= check_var::<String>("MOCK_FILE_PATH");
|
||||
}
|
||||
Some(backend) => {
|
||||
warn!("Variable `STORAGE_BACKEND` contains an invalid value: {}. Expected \"backblaze\", \"s3\", or \"local\".", backend);
|
||||
failed |= true;
|
||||
}
|
||||
_ => {
|
||||
warn!("Variable `STORAGE_BACKEND` is not set!");
|
||||
failed |= true;
|
||||
}
|
||||
}
|
||||
|
||||
failed |= check_var::<usize>("LOCAL_INDEX_INTERVAL");
|
||||
failed |= check_var::<usize>("VERSION_INDEX_INTERVAL");
|
||||
|
||||
if parse_strings_from_var("WHITELISTED_MODPACK_DOMAINS").is_none() {
|
||||
warn!("Variable `WHITELISTED_MODPACK_DOMAINS` missing in dotenv or not a json array of strings");
|
||||
failed |= true;
|
||||
}
|
||||
|
||||
if parse_strings_from_var("ALLOWED_CALLBACK_URLS").is_none() {
|
||||
warn!("Variable `ALLOWED_CALLBACK_URLS` missing in dotenv or not a json array of strings");
|
||||
failed |= true;
|
||||
}
|
||||
|
||||
failed |= check_var::<String>("PAYPAL_API_URL");
|
||||
failed |= check_var::<String>("PAYPAL_CLIENT_ID");
|
||||
failed |= check_var::<String>("PAYPAL_CLIENT_SECRET");
|
||||
|
||||
failed |= check_var::<String>("GITHUB_CLIENT_ID");
|
||||
failed |= check_var::<String>("GITHUB_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("GITLAB_CLIENT_ID");
|
||||
failed |= check_var::<String>("GITLAB_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("DISCORD_CLIENT_ID");
|
||||
failed |= check_var::<String>("DISCORD_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("MICROSOFT_CLIENT_ID");
|
||||
failed |= check_var::<String>("MICROSOFT_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("GOOGLE_CLIENT_ID");
|
||||
failed |= check_var::<String>("GOOGLE_CLIENT_SECRET");
|
||||
failed |= check_var::<String>("STEAM_API_KEY");
|
||||
|
||||
failed |= check_var::<String>("TURNSTILE_SECRET");
|
||||
|
||||
failed |= check_var::<String>("SMTP_USERNAME");
|
||||
failed |= check_var::<String>("SMTP_PASSWORD");
|
||||
failed |= check_var::<String>("SMTP_HOST");
|
||||
|
||||
failed |= check_var::<String>("SITE_VERIFY_EMAIL_PATH");
|
||||
failed |= check_var::<String>("SITE_RESET_PASSWORD_PATH");
|
||||
|
||||
failed |= check_var::<String>("BEEHIIV_PUBLICATION_ID");
|
||||
failed |= check_var::<String>("BEEHIIV_API_KEY");
|
||||
|
||||
if parse_strings_from_var("ANALYTICS_ALLOWED_ORIGINS").is_none() {
|
||||
warn!(
|
||||
"Variable `ANALYTICS_ALLOWED_ORIGINS` missing in dotenv or not a json array of strings"
|
||||
);
|
||||
failed |= true;
|
||||
}
|
||||
|
||||
failed |= check_var::<String>("CLICKHOUSE_URL");
|
||||
failed |= check_var::<String>("CLICKHOUSE_USER");
|
||||
failed |= check_var::<String>("CLICKHOUSE_PASSWORD");
|
||||
failed |= check_var::<String>("CLICKHOUSE_DATABASE");
|
||||
|
||||
failed |= check_var::<String>("MAXMIND_LICENSE_KEY");
|
||||
|
||||
failed |= check_var::<u64>("PAYOUTS_BUDGET");
|
||||
|
||||
failed
|
||||
}
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
use crate::models::projects::SideType;
|
||||
use crate::parse_strings_from_var;
|
||||
use crate::{models::projects::SideType, util::env::parse_strings_from_var};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use validator::Validate;
|
||||
|
||||
|
||||
@@ -51,7 +51,7 @@ bitflags::bitflags! {
|
||||
const VERSION_READ = 1 << 15;
|
||||
// write to a version's data (metadata, files, etc)
|
||||
const VERSION_WRITE = 1 << 16;
|
||||
// delete a project
|
||||
// delete a version
|
||||
const VERSION_DELETE = 1 << 17;
|
||||
|
||||
// create a report
|
||||
@@ -103,26 +103,26 @@ bitflags::bitflags! {
|
||||
// delete an organization
|
||||
const ORGANIZATION_DELETE = 1 << 38;
|
||||
|
||||
const ALL = 0b111111111111111111111111111111111111111;
|
||||
const NOT_RESTRICTED = 0b1111111100000011111111111111100111;
|
||||
const NONE = 0b0;
|
||||
}
|
||||
}
|
||||
|
||||
impl Scopes {
|
||||
// these scopes cannot be specified in a personal access token
|
||||
pub fn restricted(&self) -> bool {
|
||||
self.contains(
|
||||
Scopes::PAT_CREATE
|
||||
| Scopes::PAT_READ
|
||||
| Scopes::PAT_WRITE
|
||||
| Scopes::PAT_DELETE
|
||||
| Scopes::SESSION_READ
|
||||
| Scopes::SESSION_DELETE
|
||||
| Scopes::USER_AUTH_WRITE
|
||||
| Scopes::USER_DELETE
|
||||
| Scopes::PERFORM_ANALYTICS,
|
||||
)
|
||||
pub fn restricted() -> Scopes {
|
||||
Scopes::PAT_CREATE
|
||||
| Scopes::PAT_READ
|
||||
| Scopes::PAT_WRITE
|
||||
| Scopes::PAT_DELETE
|
||||
| Scopes::SESSION_READ
|
||||
| Scopes::SESSION_DELETE
|
||||
| Scopes::USER_AUTH_WRITE
|
||||
| Scopes::USER_DELETE
|
||||
| Scopes::PERFORM_ANALYTICS
|
||||
}
|
||||
|
||||
pub fn is_restricted(&self) -> bool {
|
||||
self.intersects(Self::restricted())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -4,7 +4,7 @@ use chrono::{DateTime, Utc};
|
||||
use rust_decimal::Decimal;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Copy, Clone, PartialEq, Eq, Serialize, Deserialize, Debug)]
|
||||
#[serde(from = "Base62Id")]
|
||||
#[serde(into = "Base62Id")]
|
||||
pub struct UserId(pub u64);
|
||||
@@ -35,7 +35,7 @@ impl Default for Badges {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct User {
|
||||
pub id: UserId,
|
||||
pub username: String,
|
||||
@@ -57,7 +57,7 @@ pub struct User {
|
||||
pub github_id: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone)]
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct UserPayoutData {
|
||||
pub balance: Decimal,
|
||||
pub payout_wallet: Option<RecipientWallet>,
|
||||
@@ -156,7 +156,7 @@ impl From<DBUser> for User {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone)]
|
||||
#[derive(Serialize, Deserialize, PartialEq, Eq, Clone, Debug)]
|
||||
#[serde(rename_all = "lowercase")]
|
||||
pub enum Role {
|
||||
Developer,
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use crate::models::projects::MonetizationStatus;
|
||||
use crate::routes::ApiError;
|
||||
use crate::util::env::parse_var;
|
||||
use crate::{database::redis::RedisPool, models::projects::MonetizationStatus};
|
||||
use base64::Engine;
|
||||
use chrono::{DateTime, Datelike, Duration, Utc, Weekday};
|
||||
use rust_decimal::Decimal;
|
||||
@@ -203,7 +203,7 @@ impl PayoutsQueue {
|
||||
|
||||
pub async fn process_payout(
|
||||
pool: &PgPool,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
client: &clickhouse::Client,
|
||||
) -> Result<(), ApiError> {
|
||||
let start: DateTime<Utc> = DateTime::from_utc(
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::auth::session::SessionMetadata;
|
||||
use crate::database::models::pat_item::PersonalAccessToken;
|
||||
use crate::database::models::session_item::Session;
|
||||
use crate::database::models::{DatabaseError, PatId, SessionId, UserId};
|
||||
use crate::database::redis::RedisPool;
|
||||
use chrono::Utc;
|
||||
use sqlx::PgPool;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
@@ -42,11 +43,7 @@ impl AuthQueue {
|
||||
std::mem::replace(&mut queue, HashSet::with_capacity(len))
|
||||
}
|
||||
|
||||
pub async fn index(
|
||||
&self,
|
||||
pool: &PgPool,
|
||||
redis: &deadpool_redis::Pool,
|
||||
) -> Result<(), DatabaseError> {
|
||||
pub async fn index(&self, pool: &PgPool, redis: &RedisPool) -> Result<(), DatabaseError> {
|
||||
let session_queue = self.take_sessions().await;
|
||||
let pat_queue = self.take_pats().await;
|
||||
|
||||
|
||||
@@ -20,7 +20,7 @@ impl MemoryStore {
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// use actix_ratelimit::MemoryStore;
|
||||
/// use labrinth::ratelimit::memory::MemoryStore;
|
||||
///
|
||||
/// let store = MemoryStore::new();
|
||||
/// ```
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
use crate::auth::get_user_from_headers;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::analytics::{PageView, Playtime};
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::queue::analytics::AnalyticsQueue;
|
||||
use crate::queue::maxmind::MaxMindIndexer;
|
||||
use crate::queue::session::AuthQueue;
|
||||
use crate::routes::ApiError;
|
||||
use crate::util::env::parse_strings_from_var;
|
||||
use crate::AnalyticsQueue;
|
||||
use actix_web::{post, web};
|
||||
use actix_web::{HttpRequest, HttpResponse};
|
||||
use chrono::Utc;
|
||||
@@ -63,7 +64,7 @@ pub async fn page_view_ingest(
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
url_input: web::Json<UrlInput>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(&req, &**pool, &redis, &session_queue, None)
|
||||
.await
|
||||
@@ -169,7 +170,7 @@ pub async fn playtime_ingest(
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
playtime_input: web::Json<HashMap<crate::models::ids::VersionId, PlaytimeInput>>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (_, user) = get_user_from_headers(
|
||||
&req,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use crate::database::models::categories::Loader;
|
||||
use crate::database::models::project_item::QueryProject;
|
||||
use crate::database::models::version_item::{QueryFile, QueryVersion};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::models::projects::{ProjectId, VersionId};
|
||||
use crate::queue::session::AuthQueue;
|
||||
@@ -71,7 +72,7 @@ pub async fn maven_metadata(
|
||||
req: HttpRequest,
|
||||
params: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let project_id = params.into_inner().0;
|
||||
@@ -156,7 +157,7 @@ async fn find_version(
|
||||
project: &QueryProject,
|
||||
vcoords: &String,
|
||||
pool: &PgPool,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Option<QueryVersion>, ApiError> {
|
||||
let id_option = crate::models::ids::base62_impl::parse_base62(vcoords)
|
||||
.ok()
|
||||
@@ -245,7 +246,7 @@ pub async fn version_file(
|
||||
req: HttpRequest,
|
||||
params: web::Path<(String, String, String)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (project_id, vnum, file) = params.into_inner();
|
||||
@@ -306,7 +307,7 @@ pub async fn version_file_sha1(
|
||||
req: HttpRequest,
|
||||
params: web::Path<(String, String, String)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (project_id, vnum, file) = params.into_inner();
|
||||
@@ -348,7 +349,7 @@ pub async fn version_file_sha512(
|
||||
req: HttpRequest,
|
||||
params: web::Path<(String, String, String)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (project_id, vnum, file) = params.into_inner();
|
||||
|
||||
@@ -6,6 +6,7 @@ use sqlx::PgPool;
|
||||
|
||||
use crate::auth::{filter_authorized_versions, get_user_from_headers, is_authorized};
|
||||
use crate::database;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::models::projects::VersionType;
|
||||
use crate::queue::session::AuthQueue;
|
||||
@@ -32,7 +33,7 @@ pub async fn forge_updates(
|
||||
web::Query(neo): web::Query<NeoForge>,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
const ERROR: &str = "The specified project does not exist!";
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
use crate::auth::validate::get_user_record_from_bearer_token;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::analytics::Download;
|
||||
use crate::models::ids::ProjectId;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::queue::analytics::AnalyticsQueue;
|
||||
use crate::queue::download::DownloadQueue;
|
||||
use crate::queue::maxmind::MaxMindIndexer;
|
||||
use crate::queue::session::AuthQueue;
|
||||
use crate::routes::ApiError;
|
||||
use crate::util::guards::admin_key_guard;
|
||||
use crate::DownloadQueue;
|
||||
use actix_web::{patch, web, HttpRequest, HttpResponse};
|
||||
use chrono::Utc;
|
||||
use serde::Deserialize;
|
||||
@@ -37,7 +38,7 @@ pub struct DownloadBody {
|
||||
pub async fn count_download(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
maxmind: web::Data<Arc<MaxMindIndexer>>,
|
||||
analytics_queue: web::Data<Arc<AnalyticsQueue>>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
|
||||
@@ -1,10 +1,5 @@
|
||||
use super::ApiError;
|
||||
use actix_web::{get, web, HttpRequest, HttpResponse};
|
||||
use chrono::{Duration, NaiveDate, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::{
|
||||
auth::{filter_authorized_projects, filter_authorized_versions, get_user_from_headers},
|
||||
database::models::{project_item, user_item, version_item},
|
||||
@@ -17,6 +12,11 @@ use crate::{
|
||||
},
|
||||
queue::session::AuthQueue,
|
||||
};
|
||||
use actix_web::{get, web, HttpRequest, HttpResponse};
|
||||
use chrono::{Duration, NaiveDate, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::PgPool;
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
cfg.service(
|
||||
@@ -70,7 +70,7 @@ pub async fn playtimes_get(
|
||||
data: web::Query<GetData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
&req,
|
||||
@@ -153,7 +153,7 @@ pub async fn views_get(
|
||||
data: web::Query<GetData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
&req,
|
||||
@@ -236,7 +236,7 @@ pub async fn downloads_get(
|
||||
data: web::Query<GetData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
&req,
|
||||
@@ -322,7 +322,7 @@ pub async fn countries_downloads_get(
|
||||
data: web::Query<GetData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
&req,
|
||||
@@ -406,7 +406,7 @@ pub async fn countries_views_get(
|
||||
data: web::Query<GetData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
&req,
|
||||
@@ -476,7 +476,7 @@ async fn filter_allowed_ids(
|
||||
version_ids: Option<Vec<String>>,
|
||||
user_option: Option<crate::models::users::User>,
|
||||
pool: &web::Data<PgPool>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(Option<Vec<ProjectId>>, Option<Vec<VersionId>>), ApiError> {
|
||||
if project_ids.is_some() && version_ids.is_some() {
|
||||
return Err(ApiError::InvalidInput(
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use crate::auth::checks::{filter_authorized_collections, is_authorized_collection};
|
||||
use crate::auth::get_user_from_headers;
|
||||
use crate::database;
|
||||
use crate::database::models::{collection_item, generate_collection_id, project_item};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::collections::{Collection, CollectionStatus};
|
||||
use crate::models::ids::base62_impl::parse_base62;
|
||||
@@ -11,6 +11,7 @@ use crate::queue::session::AuthQueue;
|
||||
use crate::routes::ApiError;
|
||||
use crate::util::routes::read_from_payload;
|
||||
use crate::util::validate::validation_errors_to_string;
|
||||
use crate::{database, models};
|
||||
use actix_web::web::Data;
|
||||
use actix_web::{delete, get, patch, post, web, HttpRequest, HttpResponse};
|
||||
use chrono::Utc;
|
||||
@@ -56,7 +57,7 @@ pub async fn collection_create(
|
||||
req: HttpRequest,
|
||||
collection_create_data: web::Json<CollectionCreateData>,
|
||||
client: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, CreateError> {
|
||||
let collection_create_data = collection_create_data.into_inner();
|
||||
@@ -130,7 +131,7 @@ pub async fn collections_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<CollectionIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let ids = serde_json::from_str::<Vec<&str>>(&ids.ids)?;
|
||||
@@ -162,7 +163,7 @@ pub async fn collection_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0;
|
||||
@@ -208,19 +209,18 @@ pub async fn collection_edit(
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
new_collection: web::Json<EditCollection>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
let user = get_user_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::COLLECTION_WRITE]),
|
||||
)
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
.ok();
|
||||
.await?
|
||||
.1;
|
||||
|
||||
new_collection
|
||||
.validate()
|
||||
@@ -231,7 +231,7 @@ pub async fn collection_edit(
|
||||
let result = database::models::Collection::get(id, &**pool, &redis).await?;
|
||||
|
||||
if let Some(collection_item) = result {
|
||||
if !is_authorized_collection(&collection_item, &user_option).await? {
|
||||
if !can_modify_collection(&collection_item, &user) {
|
||||
return Ok(HttpResponse::Unauthorized().body(""));
|
||||
}
|
||||
|
||||
@@ -268,27 +268,25 @@ pub async fn collection_edit(
|
||||
}
|
||||
|
||||
if let Some(status) = &new_collection.status {
|
||||
if let Some(user) = user_option {
|
||||
if !(user.role.is_mod()
|
||||
|| collection_item.status.is_approved() && status.can_be_requested())
|
||||
{
|
||||
return Err(ApiError::CustomAuthentication(
|
||||
"You don't have permission to set this status!".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
sqlx::query!(
|
||||
"
|
||||
UPDATE collections
|
||||
SET status = $1
|
||||
WHERE (id = $2)
|
||||
",
|
||||
status.to_string(),
|
||||
id as database::models::ids::CollectionId,
|
||||
)
|
||||
.execute(&mut *transaction)
|
||||
.await?;
|
||||
if !(user.role.is_mod()
|
||||
|| collection_item.status.is_approved() && status.can_be_requested())
|
||||
{
|
||||
return Err(ApiError::CustomAuthentication(
|
||||
"You don't have permission to set this status!".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
sqlx::query!(
|
||||
"
|
||||
UPDATE collections
|
||||
SET status = $1
|
||||
WHERE (id = $2)
|
||||
",
|
||||
status.to_string(),
|
||||
id as database::models::ids::CollectionId,
|
||||
)
|
||||
.execute(&mut *transaction)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(new_project_ids) = &new_collection.new_projects {
|
||||
@@ -348,23 +346,22 @@ pub async fn collection_icon_edit(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
mut payload: web::Payload,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
if let Some(content_type) = crate::util::ext::get_image_content_type(&ext.ext) {
|
||||
let cdn_url = dotenvy::var("CDN_URL")?;
|
||||
let user_option = get_user_from_headers(
|
||||
let user = get_user_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::COLLECTION_WRITE]),
|
||||
)
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
.ok();
|
||||
.await?
|
||||
.1;
|
||||
|
||||
let string = info.into_inner().0;
|
||||
let id = database::models::CollectionId(parse_base62(&string)? as i64);
|
||||
@@ -374,7 +371,7 @@ pub async fn collection_icon_edit(
|
||||
ApiError::InvalidInput("The specified collection does not exist!".to_string())
|
||||
})?;
|
||||
|
||||
if !is_authorized_collection(&collection_item, &user_option).await? {
|
||||
if !can_modify_collection(&collection_item, &user) {
|
||||
return Ok(HttpResponse::Unauthorized().body(""));
|
||||
}
|
||||
|
||||
@@ -434,20 +431,20 @@ pub async fn delete_collection_icon(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
let user = get_user_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::COLLECTION_WRITE]),
|
||||
)
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
.ok();
|
||||
.await?
|
||||
.1;
|
||||
|
||||
let string = info.into_inner().0;
|
||||
let id = database::models::CollectionId(parse_base62(&string)? as i64);
|
||||
let collection_item = database::models::Collection::get(id, &**pool, &redis)
|
||||
@@ -455,7 +452,7 @@ pub async fn delete_collection_icon(
|
||||
.ok_or_else(|| {
|
||||
ApiError::InvalidInput("The specified collection does not exist!".to_string())
|
||||
})?;
|
||||
if !is_authorized_collection(&collection_item, &user_option).await? {
|
||||
if !can_modify_collection(&collection_item, &user) {
|
||||
return Ok(HttpResponse::Unauthorized().body(""));
|
||||
}
|
||||
|
||||
@@ -493,19 +490,18 @@ pub async fn collection_delete(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_option = get_user_from_headers(
|
||||
let user = get_user_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::COLLECTION_DELETE]),
|
||||
)
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
.ok();
|
||||
.await?
|
||||
.1;
|
||||
|
||||
let string = info.into_inner().0;
|
||||
let id = database::models::CollectionId(parse_base62(&string)? as i64);
|
||||
@@ -514,7 +510,7 @@ pub async fn collection_delete(
|
||||
.ok_or_else(|| {
|
||||
ApiError::InvalidInput("The specified collection does not exist!".to_string())
|
||||
})?;
|
||||
if !is_authorized_collection(&collection, &user_option).await? {
|
||||
if !can_modify_collection(&collection, &user) {
|
||||
return Ok(HttpResponse::Unauthorized().body(""));
|
||||
}
|
||||
let mut transaction = pool.begin().await?;
|
||||
@@ -531,3 +527,10 @@ pub async fn collection_delete(
|
||||
Ok(HttpResponse::NotFound().body(""))
|
||||
}
|
||||
}
|
||||
|
||||
fn can_modify_collection(
|
||||
collection: &database::models::Collection,
|
||||
user: &models::users::User,
|
||||
) -> bool {
|
||||
collection.user_id == user.id.into() || user.role.is_mod()
|
||||
}
|
||||
|
||||
@@ -3,6 +3,7 @@ use std::sync::Arc;
|
||||
use crate::auth::{get_user_from_headers, is_authorized, is_authorized_version};
|
||||
use crate::database;
|
||||
use crate::database::models::{project_item, report_item, thread_item, version_item};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::ids::{ThreadMessageId, VersionId};
|
||||
use crate::models::images::{Image, ImageContext};
|
||||
@@ -41,7 +42,7 @@ pub async fn images_add(
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
mut payload: web::Payload,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
if let Some(content_type) = crate::util::ext::get_image_content_type(&data.ext) {
|
||||
|
||||
@@ -1,8 +1,9 @@
|
||||
use super::ApiError;
|
||||
use crate::auth::check_is_moderator_from_headers;
|
||||
use crate::database;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::projects::ProjectStatus;
|
||||
use crate::queue::session::AuthQueue;
|
||||
use crate::{auth::check_is_moderator_from_headers, models::pats::Scopes};
|
||||
use actix_web::{get, web, HttpRequest, HttpResponse};
|
||||
use serde::Deserialize;
|
||||
use sqlx::PgPool;
|
||||
@@ -25,11 +26,18 @@ fn default_count() -> i16 {
|
||||
pub async fn get_projects(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
count: web::Query<ResultCount>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
check_is_moderator_from_headers(&req, &**pool, &redis, &session_queue).await?;
|
||||
check_is_moderator_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::PROJECT_READ]),
|
||||
)
|
||||
.await?;
|
||||
|
||||
use futures::stream::TryStreamExt;
|
||||
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::auth::get_user_from_headers;
|
||||
use crate::database;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::NotificationId;
|
||||
use crate::models::notifications::Notification;
|
||||
use crate::models::pats::Scopes;
|
||||
@@ -17,7 +18,7 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
cfg.service(
|
||||
web::scope("notification")
|
||||
.service(notification_get)
|
||||
.service(notifications_read)
|
||||
.service(notification_read)
|
||||
.service(notification_delete),
|
||||
);
|
||||
}
|
||||
@@ -32,7 +33,7 @@ pub async fn notifications_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<NotificationIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -72,7 +73,7 @@ pub async fn notification_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(NotificationId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -106,7 +107,7 @@ pub async fn notification_read(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(NotificationId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -149,7 +150,7 @@ pub async fn notification_delete(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(NotificationId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -192,7 +193,7 @@ pub async fn notifications_read(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<NotificationIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -237,7 +238,7 @@ pub async fn notifications_delete(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<NotificationIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
|
||||
@@ -4,6 +4,7 @@ use std::sync::Arc;
|
||||
use crate::auth::{filter_authorized_projects, get_user_from_headers};
|
||||
use crate::database::models::team_item::TeamMember;
|
||||
use crate::database::models::{generate_organization_id, team_item, Organization};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::ids::base62_impl::parse_base62;
|
||||
use crate::models::organizations::OrganizationId;
|
||||
@@ -39,16 +40,14 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
|
||||
#[derive(Deserialize, Validate)]
|
||||
pub struct NewOrganization {
|
||||
#[validate(length(min = 3, max = 256))]
|
||||
pub description: String,
|
||||
#[validate(
|
||||
length(min = 3, max = 64),
|
||||
regex = "crate::util::validate::RE_URL_SAFE"
|
||||
)]
|
||||
// Title of the organization, also used as slug
|
||||
pub title: String,
|
||||
#[serde(default = "crate::models::teams::ProjectPermissions::default")]
|
||||
pub default_project_permissions: ProjectPermissions,
|
||||
#[validate(length(min = 3, max = 256))]
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
#[post("organization")]
|
||||
@@ -56,7 +55,7 @@ pub async fn organization_create(
|
||||
req: HttpRequest,
|
||||
new_organization: web::Json<NewOrganization>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, CreateError> {
|
||||
let current_user = get_user_from_headers(
|
||||
@@ -143,7 +142,7 @@ pub async fn organization_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let id = info.into_inner().0;
|
||||
@@ -208,7 +207,7 @@ pub async fn organizations_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<OrganizationIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let ids = serde_json::from_str::<Vec<&str>>(&ids.ids)?;
|
||||
@@ -289,7 +288,6 @@ pub struct OrganizationEdit {
|
||||
)]
|
||||
// Title of the organization, also used as slug
|
||||
pub title: Option<String>,
|
||||
pub default_project_permissions: Option<ProjectPermissions>,
|
||||
}
|
||||
|
||||
#[patch("{id}")]
|
||||
@@ -298,7 +296,7 @@ pub async fn organizations_edit(
|
||||
info: web::Path<(String,)>,
|
||||
new_organization: web::Json<OrganizationEdit>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -434,7 +432,7 @@ pub async fn organization_delete(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -498,7 +496,7 @@ pub async fn organization_projects_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let info = info.into_inner().0;
|
||||
@@ -507,7 +505,7 @@ pub async fn organization_projects_get(
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::ORGANIZATION_READ]),
|
||||
Some(&[Scopes::ORGANIZATION_READ, Scopes::PROJECT_READ]),
|
||||
)
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
@@ -519,7 +517,7 @@ pub async fn organization_projects_get(
|
||||
let project_ids = sqlx::query!(
|
||||
"
|
||||
SELECT m.id FROM organizations o
|
||||
LEFT JOIN mods m ON m.id = o.id
|
||||
INNER JOIN mods m ON m.organization_id = o.id
|
||||
WHERE (o.id = $1 AND $1 IS NOT NULL) OR (o.title = $2 AND $2 IS NOT NULL)
|
||||
",
|
||||
possible_organization_id.map(|x| x as i64),
|
||||
@@ -547,7 +545,7 @@ pub async fn organization_projects_add(
|
||||
info: web::Path<(String,)>,
|
||||
project_info: web::Json<OrganizationProjectAdd>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let info = info.into_inner().0;
|
||||
@@ -649,7 +647,7 @@ pub async fn organization_projects_remove(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String, String)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (organization_id, project_id) = info.into_inner();
|
||||
@@ -743,7 +741,7 @@ pub async fn organization_icon_edit(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
mut payload: web::Payload,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
@@ -848,7 +846,7 @@ pub async fn delete_organization_icon(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
|
||||
@@ -2,6 +2,7 @@ use super::version_creation::InitialVersionData;
|
||||
use crate::auth::{get_user_from_headers, AuthenticationError};
|
||||
use crate::database::models::thread_item::ThreadBuilder;
|
||||
use crate::database::models::{self, image_item};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::{FileHost, FileHostingError};
|
||||
use crate::models::error::ApiError;
|
||||
use crate::models::ids::ImageId;
|
||||
@@ -283,7 +284,7 @@ pub async fn project_create(
|
||||
req: HttpRequest,
|
||||
mut payload: Multipart,
|
||||
client: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
file_host: Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, CreateError> {
|
||||
@@ -354,7 +355,7 @@ async fn project_create_inner(
|
||||
file_host: &dyn FileHost,
|
||||
uploaded_files: &mut Vec<UploadedFile>,
|
||||
pool: &PgPool,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
session_queue: &AuthQueue,
|
||||
) -> Result<HttpResponse, CreateError> {
|
||||
// The base URL for files uploaded to backblaze
|
||||
@@ -405,7 +406,6 @@ async fn project_create_inner(
|
||||
"`data` field must come before file fields",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut data = Vec::new();
|
||||
while let Some(chunk) = field.next().await {
|
||||
data.extend_from_slice(&chunk.map_err(CreateError::MultipartError)?);
|
||||
|
||||
@@ -3,6 +3,7 @@ use crate::database;
|
||||
use crate::database::models::image_item;
|
||||
use crate::database::models::notification_item::NotificationBuilder;
|
||||
use crate::database::models::thread_item::ThreadMessageBuilder;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models;
|
||||
use crate::models::ids::base62_impl::parse_base62;
|
||||
@@ -79,7 +80,7 @@ pub struct RandomProjects {
|
||||
pub async fn random_projects_get(
|
||||
web::Query(count): web::Query<RandomProjects>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
count
|
||||
.validate()
|
||||
@@ -119,7 +120,7 @@ pub async fn projects_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<ProjectIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let ids = serde_json::from_str::<Vec<&str>>(&ids.ids)?;
|
||||
@@ -146,13 +147,12 @@ pub async fn project_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0;
|
||||
|
||||
let project_data = database::models::Project::get(&string, &**pool, &redis).await?;
|
||||
|
||||
let user_option = get_user_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
@@ -177,7 +177,7 @@ pub async fn project_get(
|
||||
pub async fn project_get_check(
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let slug = info.into_inner().0;
|
||||
|
||||
@@ -203,7 +203,7 @@ pub async fn dependency_list(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0;
|
||||
@@ -275,7 +275,7 @@ pub async fn dependency_list(
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Validate)]
|
||||
#[derive(Serialize, Deserialize, Validate)]
|
||||
pub struct EditProject {
|
||||
#[validate(
|
||||
length(min = 3, max = 64),
|
||||
@@ -381,7 +381,7 @@ pub async fn project_edit(
|
||||
pool: web::Data<PgPool>,
|
||||
config: web::Data<SearchConfig>,
|
||||
new_project: web::Json<EditProject>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -997,7 +997,6 @@ pub async fn project_edit(
|
||||
.execute(&mut *transaction)
|
||||
.await?;
|
||||
}
|
||||
|
||||
if let Some(donations) = &new_project.donation_urls {
|
||||
if !perms.contains(ProjectPermissions::EDIT_DETAILS) {
|
||||
return Err(ApiError::CustomAuthentication(
|
||||
@@ -1244,7 +1243,7 @@ pub async fn projects_edit(
|
||||
web::Query(ids): web::Query<ProjectIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
bulk_edit_project: web::Json<BulkEditProject>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -1622,7 +1621,7 @@ pub async fn project_schedule(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
scheduling_data: web::Json<SchedulingData>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -1724,7 +1723,7 @@ pub async fn project_icon_edit(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
mut payload: web::Payload,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
@@ -1840,7 +1839,7 @@ pub async fn delete_project_icon(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -1943,7 +1942,7 @@ pub async fn add_gallery_item(
|
||||
web::Query(item): web::Query<GalleryCreateQuery>,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
mut payload: web::Payload,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
@@ -2106,7 +2105,7 @@ pub async fn edit_gallery_item(
|
||||
web::Query(item): web::Query<GalleryEditQuery>,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -2269,7 +2268,7 @@ pub async fn delete_gallery_item(
|
||||
web::Query(item): web::Query<GalleryDeleteQuery>,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -2375,7 +2374,7 @@ pub async fn project_delete(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
config: web::Data<SearchConfig>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -2465,7 +2464,7 @@ pub async fn project_follow(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -2544,7 +2543,7 @@ pub async fn project_unfollow(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::auth::{check_is_moderator_from_headers, get_user_from_headers};
|
||||
use crate::database;
|
||||
use crate::database::models::image_item;
|
||||
use crate::database::models::thread_item::{ThreadBuilder, ThreadMessageBuilder};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::ImageId;
|
||||
use crate::models::ids::{base62_impl::parse_base62, ProjectId, UserId, VersionId};
|
||||
use crate::models::images::{Image, ImageContext};
|
||||
@@ -44,7 +45,7 @@ pub async fn report_create(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
mut body: web::Payload,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let mut transaction = pool.begin().await?;
|
||||
@@ -235,7 +236,7 @@ fn default_all() -> bool {
|
||||
pub async fn reports(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
count: web::Query<ReportsRequestOptions>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -310,7 +311,7 @@ pub async fn reports_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<ReportIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let report_ids: Vec<crate::database::models::ids::ReportId> =
|
||||
@@ -345,7 +346,7 @@ pub async fn reports_get(
|
||||
pub async fn report_get(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
info: web::Path<(crate::models::reports::ReportId,)>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -385,7 +386,7 @@ pub struct EditReport {
|
||||
pub async fn report_edit(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
info: web::Path<(crate::models::reports::ReportId,)>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
edit_report: web::Json<EditReport>,
|
||||
@@ -404,7 +405,7 @@ pub async fn report_edit(
|
||||
let report = crate::database::models::report_item::Report::get(id, &**pool).await?;
|
||||
|
||||
if let Some(report) = report {
|
||||
if !user.role.is_mod() && report.user_id != Some(user.id.into()) {
|
||||
if !user.role.is_mod() && report.reporter != user.id.into() {
|
||||
return Ok(HttpResponse::NotFound().body(""));
|
||||
}
|
||||
|
||||
@@ -492,10 +493,17 @@ pub async fn report_delete(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
info: web::Path<(crate::models::reports::ReportId,)>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
check_is_moderator_from_headers(&req, &**pool, &redis, &session_queue).await?;
|
||||
check_is_moderator_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::REPORT_DELETE]),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut transaction = pool.begin().await?;
|
||||
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
use super::ApiError;
|
||||
use crate::database::models;
|
||||
use crate::database::models::categories::{DonationPlatform, ProjectType, ReportType, SideType};
|
||||
use crate::database::redis::RedisPool;
|
||||
use actix_web::{get, web, HttpResponse};
|
||||
use chrono::{DateTime, Utc};
|
||||
use models::categories::{Category, GameVersion, Loader};
|
||||
@@ -32,7 +33,7 @@ pub struct CategoryData {
|
||||
#[get("category")]
|
||||
pub async fn category_list(
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let results = Category::list(&**pool, &redis)
|
||||
.await?
|
||||
@@ -58,7 +59,7 @@ pub struct LoaderData {
|
||||
#[get("loader")]
|
||||
pub async fn loader_list(
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let mut results = Loader::list(&**pool, &redis)
|
||||
.await?
|
||||
@@ -94,7 +95,7 @@ pub struct GameVersionQuery {
|
||||
pub async fn game_version_list(
|
||||
pool: web::Data<PgPool>,
|
||||
query: web::Query<GameVersionQuery>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let results: Vec<GameVersionQueryData> = if query.type_.is_some() || query.major.is_some() {
|
||||
GameVersion::list_filter(query.type_.as_deref(), query.major, &**pool, &redis).await?
|
||||
@@ -172,7 +173,7 @@ pub struct DonationPlatformQueryData {
|
||||
#[get("donation_platform")]
|
||||
pub async fn donation_platform_list(
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let results: Vec<DonationPlatformQueryData> = DonationPlatform::list(&**pool, &redis)
|
||||
.await?
|
||||
@@ -188,7 +189,7 @@ pub async fn donation_platform_list(
|
||||
#[get("report_type")]
|
||||
pub async fn report_type_list(
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let results = ReportType::list(&**pool, &redis).await?;
|
||||
Ok(HttpResponse::Ok().json(results))
|
||||
@@ -197,7 +198,7 @@ pub async fn report_type_list(
|
||||
#[get("project_type")]
|
||||
pub async fn project_type_list(
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let results = ProjectType::list(&**pool, &redis).await?;
|
||||
Ok(HttpResponse::Ok().json(results))
|
||||
@@ -206,7 +207,7 @@ pub async fn project_type_list(
|
||||
#[get("side_type")]
|
||||
pub async fn side_type_list(
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let results = SideType::list(&**pool, &redis).await?;
|
||||
Ok(HttpResponse::Ok().json(results))
|
||||
|
||||
@@ -2,6 +2,7 @@ use crate::auth::{get_user_from_headers, is_authorized};
|
||||
use crate::database::models::notification_item::NotificationBuilder;
|
||||
use crate::database::models::team_item::TeamAssociationId;
|
||||
use crate::database::models::{Organization, Team, TeamMember};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::database::Project;
|
||||
use crate::models::notifications::NotificationBody;
|
||||
use crate::models::pats::Scopes;
|
||||
@@ -37,7 +38,7 @@ pub async fn team_members_get_project(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0;
|
||||
@@ -116,7 +117,7 @@ pub async fn team_members_get_organization(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0;
|
||||
@@ -182,7 +183,7 @@ pub async fn team_members_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(TeamId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let id = info.into_inner().0;
|
||||
@@ -244,7 +245,7 @@ pub async fn teams_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<TeamIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
use itertools::Itertools;
|
||||
@@ -309,7 +310,7 @@ pub async fn join_team(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(TeamId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let team_id = info.into_inner().0.into();
|
||||
@@ -389,7 +390,7 @@ pub async fn add_team_member(
|
||||
info: web::Path<(TeamId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
new_member: web::Json<NewTeamMember>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let team_id = info.into_inner().0.into();
|
||||
@@ -452,7 +453,6 @@ pub async fn add_team_member(
|
||||
let organization_permissions =
|
||||
OrganizationPermissions::get_permissions_by_role(¤t_user.role, &member)
|
||||
.unwrap_or_default();
|
||||
println!("{:?}", organization_permissions);
|
||||
if !organization_permissions.contains(OrganizationPermissions::MANAGE_INVITES) {
|
||||
return Err(ApiError::CustomAuthentication(
|
||||
"You don't have permission to invite users to this organization".to_string(),
|
||||
@@ -571,7 +571,7 @@ pub async fn edit_team_member(
|
||||
info: web::Path<(TeamId, UserId)>,
|
||||
pool: web::Data<PgPool>,
|
||||
edit_member: web::Json<EditTeamMember>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let ids = info.into_inner();
|
||||
@@ -724,7 +724,7 @@ pub async fn transfer_ownership(
|
||||
info: web::Path<(TeamId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
new_owner: web::Json<TransferOwnership>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let id = info.into_inner().0;
|
||||
@@ -822,7 +822,7 @@ pub async fn remove_team_member(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(TeamId, UserId)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let ids = info.into_inner();
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::database;
|
||||
use crate::database::models::image_item;
|
||||
use crate::database::models::notification_item::NotificationBuilder;
|
||||
use crate::database::models::thread_item::ThreadMessageBuilder;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::ids::ThreadMessageId;
|
||||
use crate::models::images::{Image, ImageContext};
|
||||
@@ -83,7 +84,7 @@ pub async fn filter_authorized_threads(
|
||||
threads: Vec<database::models::Thread>,
|
||||
user: &User,
|
||||
pool: &web::Data<PgPool>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<Vec<Thread>, ApiError> {
|
||||
let user_id: database::models::UserId = user.id.into();
|
||||
|
||||
@@ -225,7 +226,7 @@ pub async fn thread_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(ThreadId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0.into();
|
||||
@@ -276,7 +277,7 @@ pub async fn threads_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<ThreadIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -313,7 +314,7 @@ pub async fn thread_send_message(
|
||||
info: web::Path<(ThreadId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
new_message: web::Json<NewThreadMessage>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -508,10 +509,17 @@ pub async fn thread_send_message(
|
||||
pub async fn moderation_inbox(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = check_is_moderator_from_headers(&req, &**pool, &redis, &session_queue).await?;
|
||||
let user = check_is_moderator_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::THREAD_READ]),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let ids = sqlx::query!(
|
||||
"
|
||||
@@ -527,7 +535,6 @@ pub async fn moderation_inbox(
|
||||
|
||||
let threads_data = database::models::Thread::get_many(&ids, &**pool).await?;
|
||||
let threads = filter_authorized_threads(threads_data, &user, &pool, &redis).await?;
|
||||
|
||||
Ok(HttpResponse::Ok().json(threads))
|
||||
}
|
||||
|
||||
@@ -536,10 +543,17 @@ pub async fn thread_read(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(ThreadId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
check_is_moderator_from_headers(&req, &**pool, &redis, &session_queue).await?;
|
||||
check_is_moderator_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::THREAD_READ]),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let id = info.into_inner().0;
|
||||
let mut transaction = pool.begin().await?;
|
||||
@@ -565,7 +579,7 @@ pub async fn message_delete(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(ThreadMessageId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
use crate::auth::{get_user_from_headers, AuthenticationError};
|
||||
use crate::database::models::User;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::collections::{Collection, CollectionStatus};
|
||||
use crate::models::notifications::Notification;
|
||||
@@ -46,7 +47,7 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
pub async fn user_auth_get(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (scopes, mut user) = get_user_from_headers(
|
||||
@@ -66,17 +67,7 @@ pub async fn user_auth_get(
|
||||
user.payout_data = None;
|
||||
}
|
||||
|
||||
Ok(HttpResponse::Ok().json(
|
||||
get_user_from_headers(
|
||||
&req,
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::USER_READ]),
|
||||
)
|
||||
.await?
|
||||
.1,
|
||||
))
|
||||
Ok(HttpResponse::Ok().json(user))
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
@@ -88,7 +79,7 @@ pub struct UserIds {
|
||||
pub async fn users_get(
|
||||
web::Query(ids): web::Query<UserIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_ids = serde_json::from_str::<Vec<String>>(&ids.ids)?;
|
||||
|
||||
@@ -103,7 +94,7 @@ pub async fn users_get(
|
||||
pub async fn user_get(
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user_data = User::get(&info.into_inner().0, &**pool, &redis).await?;
|
||||
|
||||
@@ -120,7 +111,7 @@ pub async fn projects_list(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -164,7 +155,7 @@ pub async fn collections_list(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -250,7 +241,7 @@ pub async fn user_edit(
|
||||
info: web::Path<(String,)>,
|
||||
new_user: web::Json<EditUser>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let (scopes, user) = get_user_from_headers(
|
||||
@@ -471,7 +462,7 @@ pub async fn user_icon_edit(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_host: web::Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
mut payload: web::Payload,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
@@ -560,7 +551,7 @@ pub async fn user_delete(
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
removal_type: web::Query<RemovalType>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -608,7 +599,7 @@ pub async fn user_follows(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -664,7 +655,7 @@ pub async fn user_notifications(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -712,7 +703,7 @@ pub async fn user_payouts(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
@@ -797,7 +788,7 @@ pub async fn user_payouts_request(
|
||||
pool: web::Data<PgPool>,
|
||||
data: web::Json<PayoutData>,
|
||||
payouts_queue: web::Data<Mutex<PayoutsQueue>>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let mut payouts_queue = payouts_queue.lock().await;
|
||||
|
||||
@@ -5,6 +5,7 @@ use crate::database::models::version_item::{
|
||||
DependencyBuilder, VersionBuilder, VersionFileBuilder,
|
||||
};
|
||||
use crate::database::models::{self, image_item, Organization};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::file_hosting::FileHost;
|
||||
use crate::models::images::{Image, ImageContext, ImageId};
|
||||
use crate::models::notifications::NotificationBody;
|
||||
@@ -89,7 +90,7 @@ pub async fn version_create(
|
||||
req: HttpRequest,
|
||||
mut payload: Multipart,
|
||||
client: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
file_host: Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, CreateError> {
|
||||
@@ -129,7 +130,7 @@ async fn version_create_inner(
|
||||
req: HttpRequest,
|
||||
payload: &mut Multipart,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
file_host: &dyn FileHost,
|
||||
uploaded_files: &mut Vec<UploadedFile>,
|
||||
pool: &PgPool,
|
||||
@@ -507,7 +508,7 @@ pub async fn upload_file_to_version(
|
||||
url_data: web::Path<(VersionId,)>,
|
||||
mut payload: Multipart,
|
||||
client: Data<PgPool>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
file_host: Data<Arc<dyn FileHost + Send + Sync>>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, CreateError> {
|
||||
@@ -551,7 +552,7 @@ async fn upload_file_to_version_inner(
|
||||
payload: &mut Multipart,
|
||||
client: Data<PgPool>,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: Data<deadpool_redis::Pool>,
|
||||
redis: Data<RedisPool>,
|
||||
file_host: &dyn FileHost,
|
||||
uploaded_files: &mut Vec<UploadedFile>,
|
||||
version_id: models::VersionId,
|
||||
@@ -729,6 +730,9 @@ async fn upload_file_to_version_inner(
|
||||
}
|
||||
}
|
||||
|
||||
// Clear version cache
|
||||
models::Version::clear_cache(&version, &redis).await?;
|
||||
|
||||
Ok(HttpResponse::NoContent().body(""))
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@ use crate::auth::{
|
||||
filter_authorized_projects, filter_authorized_versions, get_user_from_headers,
|
||||
is_authorized_version,
|
||||
};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::ids::VersionId;
|
||||
use crate::models::pats::Scopes;
|
||||
use crate::models::projects::VersionType;
|
||||
@@ -21,7 +22,8 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
.service(delete_file)
|
||||
.service(get_version_from_hash)
|
||||
.service(download_version)
|
||||
.service(get_update_from_hash),
|
||||
.service(get_update_from_hash)
|
||||
.service(get_projects_from_hashes),
|
||||
);
|
||||
|
||||
cfg.service(
|
||||
@@ -32,7 +34,7 @@ pub fn config(cfg: &mut web::ServiceConfig) {
|
||||
);
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct HashQuery {
|
||||
#[serde(default = "default_algorithm")]
|
||||
pub algorithm: String,
|
||||
@@ -49,7 +51,7 @@ pub async fn get_version_from_hash(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
hash_query: web::Query<HashQuery>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -63,7 +65,6 @@ pub async fn get_version_from_hash(
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
.ok();
|
||||
|
||||
let hash = info.into_inner().0.to_lowercase();
|
||||
let file = database::models::Version::get_file_from_hash(
|
||||
hash_query.algorithm.clone(),
|
||||
@@ -73,10 +74,8 @@ pub async fn get_version_from_hash(
|
||||
&redis,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if let Some(file) = file {
|
||||
let version = database::models::Version::get(file.version_id, &**pool, &redis).await?;
|
||||
|
||||
if let Some(version) = version {
|
||||
if !is_authorized_version(&version.inner, &user_option, &pool).await? {
|
||||
return Ok(HttpResponse::NotFound().body(""));
|
||||
@@ -102,7 +101,7 @@ pub async fn download_version(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
hash_query: web::Query<HashQuery>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -152,7 +151,7 @@ pub async fn delete_file(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
hash_query: web::Query<HashQuery>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -274,7 +273,7 @@ pub async fn get_update_from_hash(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
hash_query: web::Query<HashQuery>,
|
||||
update_data: web::Json<UpdateData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
@@ -343,6 +342,7 @@ pub async fn get_update_from_hash(
|
||||
// Requests above with multiple versions below
|
||||
#[derive(Deserialize)]
|
||||
pub struct FileHashes {
|
||||
#[serde(default = "default_algorithm")]
|
||||
pub algorithm: String,
|
||||
pub hashes: Vec<String>,
|
||||
}
|
||||
@@ -352,7 +352,7 @@ pub struct FileHashes {
|
||||
pub async fn get_versions_from_hashes(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_data: web::Json<FileHashes>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -400,7 +400,7 @@ pub async fn get_versions_from_hashes(
|
||||
pub async fn get_projects_from_hashes(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
file_data: web::Json<FileHashes>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -409,7 +409,7 @@ pub async fn get_projects_from_hashes(
|
||||
&**pool,
|
||||
&redis,
|
||||
&session_queue,
|
||||
Some(&[Scopes::VERSION_READ]),
|
||||
Some(&[Scopes::PROJECT_READ, Scopes::VERSION_READ]),
|
||||
)
|
||||
.await
|
||||
.map(|x| x.1)
|
||||
@@ -447,6 +447,7 @@ pub async fn get_projects_from_hashes(
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ManyUpdateData {
|
||||
#[serde(default = "default_algorithm")]
|
||||
pub algorithm: String,
|
||||
pub hashes: Vec<String>,
|
||||
pub loaders: Option<Vec<String>>,
|
||||
@@ -458,7 +459,7 @@ pub struct ManyUpdateData {
|
||||
pub async fn update_files(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
update_data: web::Json<ManyUpdateData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -550,6 +551,7 @@ pub struct FileUpdateData {
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ManyFileUpdateData {
|
||||
#[serde(default = "default_algorithm")]
|
||||
pub algorithm: String,
|
||||
pub hashes: Vec<FileUpdateData>,
|
||||
}
|
||||
@@ -558,7 +560,7 @@ pub struct ManyFileUpdateData {
|
||||
pub async fn update_individual_files(
|
||||
req: HttpRequest,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
update_data: web::Json<ManyFileUpdateData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
|
||||
@@ -4,6 +4,7 @@ use crate::auth::{
|
||||
};
|
||||
use crate::database;
|
||||
use crate::database::models::{image_item, Organization};
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models;
|
||||
use crate::models::ids::base62_impl::parse_base62;
|
||||
use crate::models::images::ImageContext;
|
||||
@@ -49,7 +50,7 @@ pub async fn version_list(
|
||||
info: web::Path<(String,)>,
|
||||
web::Query(filters): web::Query<VersionListFilters>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let string = info.into_inner().0;
|
||||
@@ -170,7 +171,7 @@ pub async fn version_project_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(String, String)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let id = info.into_inner();
|
||||
@@ -221,7 +222,7 @@ pub async fn versions_get(
|
||||
req: HttpRequest,
|
||||
web::Query(ids): web::Query<VersionIds>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let version_ids = serde_json::from_str::<Vec<models::ids::VersionId>>(&ids.ids)?
|
||||
@@ -251,7 +252,7 @@ pub async fn version_get(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(models::ids::VersionId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let id = info.into_inner().0;
|
||||
@@ -318,7 +319,7 @@ pub async fn version_edit(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(models::ids::VersionId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
new_version: web::Json<EditVersion>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -738,7 +739,7 @@ pub async fn version_schedule(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(models::ids::VersionId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
scheduling_data: web::Json<SchedulingData>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
@@ -835,7 +836,7 @@ pub async fn version_delete(
|
||||
req: HttpRequest,
|
||||
info: web::Path<(models::ids::VersionId,)>,
|
||||
pool: web::Data<PgPool>,
|
||||
redis: web::Data<deadpool_redis::Pool>,
|
||||
redis: web::Data<RedisPool>,
|
||||
session_queue: web::Data<AuthQueue>,
|
||||
) -> Result<HttpResponse, ApiError> {
|
||||
let user = get_user_from_headers(
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use crate::database;
|
||||
use crate::database::models::image_item;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::images::ImageContext;
|
||||
use crate::routes::ApiError;
|
||||
use color_thief::ColorFormat;
|
||||
use image::imageops::FilterType;
|
||||
use image::{EncodableLayout, ImageError};
|
||||
|
||||
use crate::database;
|
||||
use crate::database::models::image_item;
|
||||
use crate::models::images::ImageContext;
|
||||
use crate::routes::ApiError;
|
||||
|
||||
pub fn get_color_from_img(data: &[u8]) -> Result<Option<u32>, ImageError> {
|
||||
let image = image::load_from_memory(data)?
|
||||
.resize(256, 256, FilterType::Nearest)
|
||||
@@ -26,7 +26,7 @@ pub async fn delete_unused_images(
|
||||
context: ImageContext,
|
||||
reference_strings: Vec<&str>,
|
||||
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
) -> Result<(), ApiError> {
|
||||
let uploaded_images = database::models::Image::get_many_contexted(context, transaction).await?;
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
use crate::database::models::categories::GameVersion;
|
||||
use crate::database::redis::RedisPool;
|
||||
use crate::models::projects::ProjectId;
|
||||
use crate::routes::ApiError;
|
||||
use chrono::{DateTime, Utc};
|
||||
@@ -72,7 +73,7 @@ const PLUGIN_LOADERS: &[&str] = &[
|
||||
pub async fn send_discord_webhook(
|
||||
project_id: ProjectId,
|
||||
pool: &PgPool,
|
||||
redis: &deadpool_redis::Pool,
|
||||
redis: &RedisPool,
|
||||
webhook_url: String,
|
||||
message: Option<String>,
|
||||
) -> Result<(), ApiError> {
|
||||
|
||||
82
tests/common/actix.rs
Normal file
82
tests/common/actix.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
use actix_web::test::TestRequest;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
|
||||
// Multipart functionality (actix-test does not innately support multipart)
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MultipartSegment {
|
||||
pub name: String,
|
||||
pub filename: Option<String>,
|
||||
pub content_type: Option<String>,
|
||||
pub data: MultipartSegmentData,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
#[allow(dead_code)]
|
||||
pub enum MultipartSegmentData {
|
||||
Text(String),
|
||||
Binary(Vec<u8>),
|
||||
}
|
||||
|
||||
pub trait AppendsMultipart {
|
||||
fn set_multipart(self, data: Vec<MultipartSegment>) -> Self;
|
||||
}
|
||||
|
||||
impl AppendsMultipart for TestRequest {
|
||||
fn set_multipart(self, data: Vec<MultipartSegment>) -> Self {
|
||||
let (boundary, payload) = generate_multipart(data);
|
||||
self.append_header((
|
||||
"Content-Type",
|
||||
format!("multipart/form-data; boundary={}", boundary),
|
||||
))
|
||||
.set_payload(payload)
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_multipart(data: Vec<MultipartSegment>) -> (String, Bytes) {
|
||||
let mut boundary = String::from("----WebKitFormBoundary");
|
||||
boundary.push_str(&rand::random::<u64>().to_string());
|
||||
boundary.push_str(&rand::random::<u64>().to_string());
|
||||
boundary.push_str(&rand::random::<u64>().to_string());
|
||||
|
||||
let mut payload = BytesMut::new();
|
||||
|
||||
for segment in data {
|
||||
payload.extend_from_slice(
|
||||
format!(
|
||||
"--{boundary}\r\nContent-Disposition: form-data; name=\"{name}\"",
|
||||
boundary = boundary,
|
||||
name = segment.name
|
||||
)
|
||||
.as_bytes(),
|
||||
);
|
||||
|
||||
if let Some(filename) = &segment.filename {
|
||||
payload.extend_from_slice(
|
||||
format!("; filename=\"{filename}\"", filename = filename).as_bytes(),
|
||||
);
|
||||
}
|
||||
if let Some(content_type) = &segment.content_type {
|
||||
payload.extend_from_slice(
|
||||
format!(
|
||||
"\r\nContent-Type: {content_type}",
|
||||
content_type = content_type
|
||||
)
|
||||
.as_bytes(),
|
||||
);
|
||||
}
|
||||
payload.extend_from_slice(b"\r\n\r\n");
|
||||
|
||||
match &segment.data {
|
||||
MultipartSegmentData::Text(text) => {
|
||||
payload.extend_from_slice(text.as_bytes());
|
||||
}
|
||||
MultipartSegmentData::Binary(binary) => {
|
||||
payload.extend_from_slice(binary);
|
||||
}
|
||||
}
|
||||
payload.extend_from_slice(b"\r\n");
|
||||
}
|
||||
payload.extend_from_slice(format!("--{boundary}--\r\n", boundary = boundary).as_bytes());
|
||||
|
||||
(boundary, Bytes::from(payload))
|
||||
}
|
||||
134
tests/common/database.rs
Normal file
134
tests/common/database.rs
Normal file
@@ -0,0 +1,134 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use labrinth::database::redis::RedisPool;
|
||||
use sqlx::{postgres::PgPoolOptions, PgPool};
|
||||
use std::time::Duration;
|
||||
use url::Url;
|
||||
|
||||
// The dummy test database adds a fair bit of 'dummy' data to test with.
|
||||
// Some constants are used to refer to that data, and are described here.
|
||||
// The rest can be accessed in the TestEnvironment 'dummy' field.
|
||||
|
||||
// The user IDs are as follows:
|
||||
pub const ADMIN_USER_ID: &str = "1";
|
||||
pub const MOD_USER_ID: &str = "2";
|
||||
pub const USER_USER_ID: &str = "3"; // This is the 'main' user ID, and is used for most tests.
|
||||
pub const FRIEND_USER_ID: &str = "4"; // This is exactly the same as USER_USER_ID, but could be used for testing friend-only endpoints (ie: teams, etc)
|
||||
pub const ENEMY_USER_ID: &str = "5"; // This is exactly the same as USER_USER_ID, but could be used for testing friend-only endpoints (ie: teams, etc)
|
||||
|
||||
pub const ADMIN_USER_ID_PARSED: i64 = 1;
|
||||
pub const MOD_USER_ID_PARSED: i64 = 2;
|
||||
pub const USER_USER_ID_PARSED: i64 = 3;
|
||||
pub const FRIEND_USER_ID_PARSED: i64 = 4;
|
||||
pub const ENEMY_USER_ID_PARSED: i64 = 5;
|
||||
|
||||
// These are full-scoped PATs- as if the user was logged in (including illegal scopes).
|
||||
pub const ADMIN_USER_PAT: &str = "mrp_patadmin";
|
||||
pub const MOD_USER_PAT: &str = "mrp_patmoderator";
|
||||
pub const USER_USER_PAT: &str = "mrp_patuser";
|
||||
pub const FRIEND_USER_PAT: &str = "mrp_patfriend";
|
||||
pub const ENEMY_USER_PAT: &str = "mrp_patenemy";
|
||||
|
||||
pub struct TemporaryDatabase {
|
||||
pub pool: PgPool,
|
||||
pub redis_pool: RedisPool,
|
||||
pub database_name: String,
|
||||
}
|
||||
|
||||
impl TemporaryDatabase {
|
||||
// Creates a temporary database like sqlx::test does
|
||||
// 1. Logs into the main database
|
||||
// 2. Creates a new randomly generated database
|
||||
// 3. Runs migrations on the new database
|
||||
// 4. (Optionally, by using create_with_dummy) adds dummy data to the database
|
||||
// If a db is created with create_with_dummy, it must be cleaned up with cleanup.
|
||||
// This means that dbs will only 'remain' if a test fails (for examination of the db), and will be cleaned up otherwise.
|
||||
pub async fn create() -> Self {
|
||||
let temp_database_name = generate_random_database_name();
|
||||
println!("Creating temporary database: {}", &temp_database_name);
|
||||
|
||||
let database_url = dotenvy::var("DATABASE_URL").expect("No database URL");
|
||||
let mut url = Url::parse(&database_url).expect("Invalid database URL");
|
||||
let pool = PgPool::connect(&database_url)
|
||||
.await
|
||||
.expect("Connection to database failed");
|
||||
|
||||
// Create the temporary database
|
||||
let create_db_query = format!("CREATE DATABASE {}", &temp_database_name);
|
||||
|
||||
sqlx::query(&create_db_query)
|
||||
.execute(&pool)
|
||||
.await
|
||||
.expect("Database creation failed");
|
||||
|
||||
pool.close().await;
|
||||
|
||||
// Modify the URL to switch to the temporary database
|
||||
url.set_path(&format!("/{}", &temp_database_name));
|
||||
let temp_db_url = url.to_string();
|
||||
|
||||
let pool = PgPoolOptions::new()
|
||||
.min_connections(0)
|
||||
.max_connections(4)
|
||||
.max_lifetime(Some(Duration::from_secs(60 * 60)))
|
||||
.connect(&temp_db_url)
|
||||
.await
|
||||
.expect("Connection to temporary database failed");
|
||||
|
||||
// Performs migrations
|
||||
let migrations = sqlx::migrate!("./migrations");
|
||||
migrations.run(&pool).await.expect("Migrations failed");
|
||||
|
||||
// Gets new Redis pool
|
||||
let redis_pool = RedisPool::new(Some(temp_database_name.clone()));
|
||||
|
||||
Self {
|
||||
pool,
|
||||
database_name: temp_database_name,
|
||||
redis_pool,
|
||||
}
|
||||
}
|
||||
|
||||
// Deletes the temporary database
|
||||
// If a temporary db is created, it must be cleaned up with cleanup.
|
||||
// This means that dbs will only 'remain' if a test fails (for examination of the db), and will be cleaned up otherwise.
|
||||
pub async fn cleanup(mut self) {
|
||||
let database_url = dotenvy::var("DATABASE_URL").expect("No database URL");
|
||||
self.pool.close().await;
|
||||
|
||||
self.pool = PgPool::connect(&database_url)
|
||||
.await
|
||||
.expect("Connection to main database failed");
|
||||
|
||||
// Forcibly terminate all existing connections to this version of the temporary database
|
||||
// We are done and deleting it, so we don't need them anymore
|
||||
let terminate_query = format!(
|
||||
"SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE datname = '{}' AND pid <> pg_backend_pid()",
|
||||
&self.database_name
|
||||
);
|
||||
sqlx::query(&terminate_query)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// Execute the deletion query asynchronously
|
||||
let drop_db_query = format!("DROP DATABASE IF EXISTS {}", &self.database_name);
|
||||
sqlx::query(&drop_db_query)
|
||||
.execute(&self.pool)
|
||||
.await
|
||||
.expect("Database deletion failed");
|
||||
}
|
||||
}
|
||||
|
||||
fn generate_random_database_name() -> String {
|
||||
// Generate a random database name here
|
||||
// You can use your logic to create a unique name
|
||||
// For example, you can use a random string as you did before
|
||||
// or append a timestamp, etc.
|
||||
|
||||
// We will use a random string starting with "labrinth_tests_db_"
|
||||
// and append a 6-digit number to it.
|
||||
let mut database_name = String::from("labrinth_tests_db_");
|
||||
database_name.push_str(&rand::random::<u64>().to_string()[..6]);
|
||||
database_name
|
||||
}
|
||||
229
tests/common/dummy_data.rs
Normal file
229
tests/common/dummy_data.rs
Normal file
@@ -0,0 +1,229 @@
|
||||
use actix_web::test::{self, TestRequest};
|
||||
use labrinth::{models::projects::Project, models::projects::Version};
|
||||
use serde_json::json;
|
||||
use sqlx::Executor;
|
||||
|
||||
use crate::common::{
|
||||
actix::AppendsMultipart,
|
||||
database::{MOD_USER_PAT, USER_USER_PAT},
|
||||
};
|
||||
|
||||
use super::{
|
||||
actix::{MultipartSegment, MultipartSegmentData},
|
||||
environment::TestEnvironment,
|
||||
};
|
||||
|
||||
pub struct DummyData {
|
||||
pub alpha_team_id: String,
|
||||
pub beta_team_id: String,
|
||||
|
||||
pub alpha_project_id: String,
|
||||
pub beta_project_id: String,
|
||||
|
||||
pub alpha_project_slug: String,
|
||||
pub beta_project_slug: String,
|
||||
|
||||
pub alpha_version_id: String,
|
||||
pub beta_version_id: String,
|
||||
|
||||
pub alpha_thread_id: String,
|
||||
pub beta_thread_id: String,
|
||||
|
||||
pub alpha_file_hash: String,
|
||||
pub beta_file_hash: String,
|
||||
}
|
||||
|
||||
pub async fn add_dummy_data(test_env: &TestEnvironment) -> DummyData {
|
||||
// Adds basic dummy data to the database directly with sql (user, pats)
|
||||
let pool = &test_env.db.pool.clone();
|
||||
pool.execute(include_str!("../files/dummy_data.sql"))
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let (alpha_project, alpha_version) = add_project_alpha(test_env).await;
|
||||
let (beta_project, beta_version) = add_project_beta(test_env).await;
|
||||
|
||||
DummyData {
|
||||
alpha_team_id: alpha_project.team.to_string(),
|
||||
beta_team_id: beta_project.team.to_string(),
|
||||
|
||||
alpha_project_id: alpha_project.id.to_string(),
|
||||
beta_project_id: beta_project.id.to_string(),
|
||||
|
||||
alpha_project_slug: alpha_project.slug.unwrap(),
|
||||
beta_project_slug: beta_project.slug.unwrap(),
|
||||
|
||||
alpha_version_id: alpha_version.id.to_string(),
|
||||
beta_version_id: beta_version.id.to_string(),
|
||||
|
||||
alpha_thread_id: alpha_project.thread_id.to_string(),
|
||||
beta_thread_id: beta_project.thread_id.to_string(),
|
||||
|
||||
alpha_file_hash: alpha_version.files[0].hashes["sha1"].clone(),
|
||||
beta_file_hash: beta_version.files[0].hashes["sha1"].clone(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn add_project_alpha(test_env: &TestEnvironment) -> (Project, Version) {
|
||||
// Adds dummy data to the database with sqlx (projects, versions, threads)
|
||||
// Generate test project data.
|
||||
let json_data = json!(
|
||||
{
|
||||
"title": "Test Project Alpha",
|
||||
"slug": "alpha",
|
||||
"description": "A dummy project for testing with.",
|
||||
"body": "This project is approved, and versions are listed.",
|
||||
"client_side": "required",
|
||||
"server_side": "optional",
|
||||
"initial_versions": [{
|
||||
"file_parts": ["dummy-project-alpha.jar"],
|
||||
"version_number": "1.2.3",
|
||||
"version_title": "start",
|
||||
"dependencies": [],
|
||||
"game_versions": ["1.20.1"] ,
|
||||
"release_channel": "release",
|
||||
"loaders": ["fabric"],
|
||||
"featured": true
|
||||
}],
|
||||
"categories": [],
|
||||
"license_id": "MIT"
|
||||
}
|
||||
);
|
||||
|
||||
// Basic json
|
||||
let json_segment = MultipartSegment {
|
||||
name: "data".to_string(),
|
||||
filename: None,
|
||||
content_type: Some("application/json".to_string()),
|
||||
data: MultipartSegmentData::Text(serde_json::to_string(&json_data).unwrap()),
|
||||
};
|
||||
|
||||
// Basic file
|
||||
let file_segment = MultipartSegment {
|
||||
name: "dummy-project-alpha.jar".to_string(),
|
||||
filename: Some("dummy-project-alpha.jar".to_string()),
|
||||
content_type: Some("application/java-archive".to_string()),
|
||||
data: MultipartSegmentData::Binary(
|
||||
include_bytes!("../../tests/files/dummy-project-alpha.jar").to_vec(),
|
||||
),
|
||||
};
|
||||
|
||||
// Add a project.
|
||||
let req = TestRequest::post()
|
||||
.uri("/v2/project")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_multipart(vec![json_segment.clone(), file_segment.clone()])
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
// Approve as a moderator.
|
||||
let req = TestRequest::patch()
|
||||
.uri("/v2/project/alpha")
|
||||
.append_header(("Authorization", MOD_USER_PAT))
|
||||
.set_json(json!(
|
||||
{
|
||||
"status": "approved"
|
||||
}
|
||||
))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 204);
|
||||
|
||||
// Get project
|
||||
let req = TestRequest::get()
|
||||
.uri("/v2/project/alpha")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
let project: Project = test::read_body_json(resp).await;
|
||||
|
||||
// Get project's versions
|
||||
let req = TestRequest::get()
|
||||
.uri("/v2/project/alpha/version")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
let versions: Vec<Version> = test::read_body_json(resp).await;
|
||||
let version = versions.into_iter().next().unwrap();
|
||||
|
||||
(project, version)
|
||||
}
|
||||
|
||||
pub async fn add_project_beta(test_env: &TestEnvironment) -> (Project, Version) {
|
||||
// Adds dummy data to the database with sqlx (projects, versions, threads)
|
||||
// Generate test project data.
|
||||
let json_data = json!(
|
||||
{
|
||||
"title": "Test Project Beta",
|
||||
"slug": "beta",
|
||||
"description": "A dummy project for testing with.",
|
||||
"body": "This project is not-yet-approved, and versions are draft.",
|
||||
"client_side": "required",
|
||||
"server_side": "optional",
|
||||
"initial_versions": [{
|
||||
"file_parts": ["dummy-project-beta.jar"],
|
||||
"version_number": "1.2.3",
|
||||
"version_title": "start",
|
||||
"status": "unlisted",
|
||||
"requested_status": "unlisted",
|
||||
"dependencies": [],
|
||||
"game_versions": ["1.20.1"] ,
|
||||
"release_channel": "release",
|
||||
"loaders": ["fabric"],
|
||||
"featured": true
|
||||
}],
|
||||
"status": "private",
|
||||
"requested_status": "private",
|
||||
"categories": [],
|
||||
"license_id": "MIT"
|
||||
}
|
||||
);
|
||||
|
||||
// Basic json
|
||||
let json_segment = MultipartSegment {
|
||||
name: "data".to_string(),
|
||||
filename: None,
|
||||
content_type: Some("application/json".to_string()),
|
||||
data: MultipartSegmentData::Text(serde_json::to_string(&json_data).unwrap()),
|
||||
};
|
||||
|
||||
// Basic file
|
||||
let file_segment = MultipartSegment {
|
||||
name: "dummy-project-beta.jar".to_string(),
|
||||
filename: Some("dummy-project-beta.jar".to_string()),
|
||||
content_type: Some("application/java-archive".to_string()),
|
||||
data: MultipartSegmentData::Binary(
|
||||
include_bytes!("../../tests/files/dummy-project-beta.jar").to_vec(),
|
||||
),
|
||||
};
|
||||
|
||||
// Add a project.
|
||||
let req = TestRequest::post()
|
||||
.uri("/v2/project")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_multipart(vec![json_segment.clone(), file_segment.clone()])
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
// Get project
|
||||
let req = TestRequest::get()
|
||||
.uri("/v2/project/beta")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
let project: Project = test::read_body_json(resp).await;
|
||||
|
||||
// Get project's versions
|
||||
let req = TestRequest::get()
|
||||
.uri("/v2/project/beta/version")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
let versions: Vec<Version> = test::read_body_json(resp).await;
|
||||
let version = versions.into_iter().next().unwrap();
|
||||
|
||||
(project, version)
|
||||
}
|
||||
71
tests/common/environment.rs
Normal file
71
tests/common/environment.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use super::{database::TemporaryDatabase, dummy_data};
|
||||
use crate::common::setup;
|
||||
use actix_web::{dev::ServiceResponse, test, App};
|
||||
|
||||
// A complete test environment, with a test actix app and a database.
|
||||
// Must be called in an #[actix_rt::test] context. It also simulates a
|
||||
// temporary sqlx db like #[sqlx::test] would.
|
||||
// Use .call(req) on it directly to make a test call as if test::call_service(req) were being used.
|
||||
pub struct TestEnvironment {
|
||||
test_app: Box<dyn LocalService>,
|
||||
pub db: TemporaryDatabase,
|
||||
|
||||
pub dummy: Option<dummy_data::DummyData>,
|
||||
}
|
||||
|
||||
impl TestEnvironment {
|
||||
pub async fn build_with_dummy() -> Self {
|
||||
let mut test_env = Self::build().await;
|
||||
let dummy = dummy_data::add_dummy_data(&test_env).await;
|
||||
test_env.dummy = Some(dummy);
|
||||
test_env
|
||||
}
|
||||
|
||||
pub async fn build() -> Self {
|
||||
let db = TemporaryDatabase::create().await;
|
||||
let labrinth_config = setup(&db).await;
|
||||
let app = App::new().configure(|cfg| labrinth::app_config(cfg, labrinth_config.clone()));
|
||||
let test_app = test::init_service(app).await;
|
||||
Self {
|
||||
test_app: Box::new(test_app),
|
||||
db,
|
||||
dummy: None,
|
||||
}
|
||||
}
|
||||
pub async fn cleanup(self) {
|
||||
self.db.cleanup().await;
|
||||
}
|
||||
|
||||
pub async fn call(&self, req: actix_http::Request) -> ServiceResponse {
|
||||
self.test_app.call(req).await.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
trait LocalService {
|
||||
fn call(
|
||||
&self,
|
||||
req: actix_http::Request,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<ServiceResponse, actix_web::Error>>>,
|
||||
>;
|
||||
}
|
||||
impl<S> LocalService for S
|
||||
where
|
||||
S: actix_web::dev::Service<
|
||||
actix_http::Request,
|
||||
Response = ServiceResponse,
|
||||
Error = actix_web::Error,
|
||||
>,
|
||||
S::Future: 'static,
|
||||
{
|
||||
fn call(
|
||||
&self,
|
||||
req: actix_http::Request,
|
||||
) -> std::pin::Pin<
|
||||
Box<dyn std::future::Future<Output = Result<ServiceResponse, actix_web::Error>>>,
|
||||
> {
|
||||
Box::pin(self.call(req))
|
||||
}
|
||||
}
|
||||
40
tests/common/mod.rs
Normal file
40
tests/common/mod.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
use labrinth::{check_env_vars, clickhouse};
|
||||
use labrinth::{file_hosting, queue, LabrinthConfig};
|
||||
use std::sync::Arc;
|
||||
|
||||
use self::database::TemporaryDatabase;
|
||||
|
||||
pub mod actix;
|
||||
pub mod database;
|
||||
pub mod dummy_data;
|
||||
pub mod environment;
|
||||
pub mod pats;
|
||||
pub mod scopes;
|
||||
|
||||
// Testing equivalent to 'setup' function, producing a LabrinthConfig
|
||||
// If making a test, you should probably use environment::TestEnvironment::build_with_dummy() (which calls this)
|
||||
pub async fn setup(db: &TemporaryDatabase) -> LabrinthConfig {
|
||||
println!("Setting up labrinth config");
|
||||
|
||||
dotenvy::dotenv().ok();
|
||||
|
||||
if check_env_vars() {
|
||||
println!("Some environment variables are missing!");
|
||||
}
|
||||
|
||||
let pool = db.pool.clone();
|
||||
let redis_pool = db.redis_pool.clone();
|
||||
let file_host: Arc<dyn file_hosting::FileHost + Send + Sync> =
|
||||
Arc::new(file_hosting::MockHost::new());
|
||||
let mut clickhouse = clickhouse::init_client().await.unwrap();
|
||||
|
||||
let maxmind_reader = Arc::new(queue::maxmind::MaxMindIndexer::new().await.unwrap());
|
||||
|
||||
labrinth::app_setup(
|
||||
pool.clone(),
|
||||
redis_pool.clone(),
|
||||
&mut clickhouse,
|
||||
file_host.clone(),
|
||||
maxmind_reader.clone(),
|
||||
)
|
||||
}
|
||||
30
tests/common/pats.rs
Normal file
30
tests/common/pats.rs
Normal file
@@ -0,0 +1,30 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
use chrono::Utc;
|
||||
use labrinth::{
|
||||
database::{self, models::generate_pat_id},
|
||||
models::pats::Scopes,
|
||||
};
|
||||
|
||||
use super::database::TemporaryDatabase;
|
||||
|
||||
// Creates a PAT with the given scopes, and returns the access token
|
||||
// Interfacing with the db directly, rather than using a ourte,
|
||||
// allows us to test with scopes that are not allowed to be created by PATs
|
||||
pub async fn create_test_pat(scopes: Scopes, user_id: i64, db: &TemporaryDatabase) -> String {
|
||||
let mut transaction = db.pool.begin().await.unwrap();
|
||||
let id = generate_pat_id(&mut transaction).await.unwrap();
|
||||
let pat = database::models::pat_item::PersonalAccessToken {
|
||||
id,
|
||||
name: format!("test_pat_{}", scopes.bits()),
|
||||
access_token: format!("mrp_{}", id.0),
|
||||
scopes,
|
||||
user_id: database::models::ids::UserId(user_id),
|
||||
created: Utc::now(),
|
||||
expires: Utc::now() + chrono::Duration::days(1),
|
||||
last_used: None,
|
||||
};
|
||||
pat.insert(&mut transaction).await.unwrap();
|
||||
transaction.commit().await.unwrap();
|
||||
pat.access_token
|
||||
}
|
||||
124
tests/common/scopes.rs
Normal file
124
tests/common/scopes.rs
Normal file
@@ -0,0 +1,124 @@
|
||||
#![allow(dead_code)]
|
||||
use actix_web::test::{self, TestRequest};
|
||||
use labrinth::models::pats::Scopes;
|
||||
|
||||
use super::{database::USER_USER_ID_PARSED, environment::TestEnvironment, pats::create_test_pat};
|
||||
|
||||
// A reusable test type that works for any scope test testing an endpoint that:
|
||||
// - returns a known 'expected_failure_code' if the scope is not present (defaults to 401)
|
||||
// - returns a 200-299 if the scope is present
|
||||
// - returns failure and success JSON bodies for requests that are 200 (for performing non-simple follow-up tests on)
|
||||
// This uses a builder format, so you can chain methods to set the parameters to non-defaults (most will probably be not need to be set).
|
||||
pub struct ScopeTest<'a> {
|
||||
test_env: &'a TestEnvironment,
|
||||
// Scopes expected to fail on this test. By default, this is all scopes except the success scopes.
|
||||
// (To ensure we have isolated the scope we are testing)
|
||||
failure_scopes: Option<Scopes>,
|
||||
// User ID to use for the PATs. By default, this is the USER_USER_ID_PARSED constant.
|
||||
user_id: i64,
|
||||
// The code that is expected to be returned if the scope is not present. By default, this is 401 (Unauthorized)
|
||||
expected_failure_code: u16,
|
||||
}
|
||||
|
||||
impl<'a> ScopeTest<'a> {
|
||||
pub fn new(test_env: &'a TestEnvironment) -> Self {
|
||||
Self {
|
||||
test_env,
|
||||
failure_scopes: None,
|
||||
user_id: USER_USER_ID_PARSED,
|
||||
expected_failure_code: 401,
|
||||
}
|
||||
}
|
||||
|
||||
// Set non-standard failure scopes
|
||||
// If not set, it will be set to all scopes except the success scopes
|
||||
// (eg: if a combination of scopes is needed, but you want to make sure that the endpoint does not work with all-but-one of them)
|
||||
pub fn with_failure_scopes(mut self, scopes: Scopes) -> Self {
|
||||
self.failure_scopes = Some(scopes);
|
||||
self
|
||||
}
|
||||
|
||||
// Set the user ID to use
|
||||
// (eg: a moderator, or friend)
|
||||
pub fn with_user_id(mut self, user_id: i64) -> Self {
|
||||
self.user_id = user_id;
|
||||
self
|
||||
}
|
||||
|
||||
// If a non-401 code is expected.
|
||||
// (eg: a 404 for a hidden resource, or 200 for a resource with hidden values deeper in)
|
||||
pub fn with_failure_code(mut self, code: u16) -> Self {
|
||||
self.expected_failure_code = code;
|
||||
self
|
||||
}
|
||||
|
||||
// Call the endpoint generated by req_gen twice, once with a PAT with the failure scopes, and once with the success scopes.
|
||||
// success_scopes : the scopes that we are testing that should succeed
|
||||
// returns a tuple of (failure_body, success_body)
|
||||
// Should return a String error if on unexpected status code, allowing unwrapping in tests.
|
||||
pub async fn test<T>(
|
||||
&self,
|
||||
req_gen: T,
|
||||
success_scopes: Scopes,
|
||||
) -> Result<(serde_json::Value, serde_json::Value), String>
|
||||
where
|
||||
T: Fn() -> TestRequest,
|
||||
{
|
||||
// First, create a PAT with failure scopes
|
||||
let failure_scopes = self
|
||||
.failure_scopes
|
||||
.unwrap_or(Scopes::all() ^ success_scopes);
|
||||
let access_token_all_others =
|
||||
create_test_pat(failure_scopes, self.user_id, &self.test_env.db).await;
|
||||
|
||||
// Create a PAT with the success scopes
|
||||
let access_token = create_test_pat(success_scopes, self.user_id, &self.test_env.db).await;
|
||||
|
||||
// Perform test twice, once with each PAT
|
||||
// the first time, we expect a 401 (or known failure code)
|
||||
let req = req_gen()
|
||||
.append_header(("Authorization", access_token_all_others.as_str()))
|
||||
.to_request();
|
||||
let resp = self.test_env.call(req).await;
|
||||
|
||||
if resp.status().as_u16() != self.expected_failure_code {
|
||||
return Err(format!(
|
||||
"Expected failure code {}, got {}",
|
||||
self.expected_failure_code,
|
||||
resp.status().as_u16()
|
||||
));
|
||||
}
|
||||
|
||||
let failure_body = if resp.status() == 200
|
||||
&& resp.headers().contains_key("Content-Type")
|
||||
&& resp.headers().get("Content-Type").unwrap() == "application/json"
|
||||
{
|
||||
test::read_body_json(resp).await
|
||||
} else {
|
||||
serde_json::Value::Null
|
||||
};
|
||||
|
||||
// The second time, we expect a success code
|
||||
let req = req_gen()
|
||||
.append_header(("Authorization", access_token.as_str()))
|
||||
.to_request();
|
||||
let resp = self.test_env.call(req).await;
|
||||
|
||||
if !(resp.status().is_success() || resp.status().is_redirection()) {
|
||||
return Err(format!(
|
||||
"Expected success code, got {}",
|
||||
resp.status().as_u16()
|
||||
));
|
||||
}
|
||||
|
||||
let success_body = if resp.status() == 200
|
||||
&& resp.headers().contains_key("Content-Type")
|
||||
&& resp.headers().get("Content-Type").unwrap() == "application/json"
|
||||
{
|
||||
test::read_body_json(resp).await
|
||||
} else {
|
||||
serde_json::Value::Null
|
||||
};
|
||||
Ok((failure_body, success_body))
|
||||
}
|
||||
}
|
||||
BIN
tests/files/200x200.png
Normal file
BIN
tests/files/200x200.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 606 B |
BIN
tests/files/basic-mod-different.jar
Normal file
BIN
tests/files/basic-mod-different.jar
Normal file
Binary file not shown.
BIN
tests/files/basic-mod.jar
Normal file
BIN
tests/files/basic-mod.jar
Normal file
Binary file not shown.
BIN
tests/files/dummy-project-alpha.jar
Normal file
BIN
tests/files/dummy-project-alpha.jar
Normal file
Binary file not shown.
BIN
tests/files/dummy-project-beta.jar
Normal file
BIN
tests/files/dummy-project-beta.jar
Normal file
Binary file not shown.
36
tests/files/dummy_data.sql
Normal file
36
tests/files/dummy_data.sql
Normal file
@@ -0,0 +1,36 @@
|
||||
-- Dummy test data for use in tests.
|
||||
-- IDs are listed as integers, followed by their equivalent base 62 representation.
|
||||
|
||||
-- Inserts 5 dummy users for testing, with slight differences
|
||||
-- 'Friend' and 'enemy' function like 'user', but we can use them to simulate 'other' users that may or may not be able to access certain things
|
||||
-- IDs 1-5, 1-5
|
||||
INSERT INTO users (id, username, name, email, role) VALUES (1, 'admin', 'Administrator Test', 'admin@modrinth.com', 'admin');
|
||||
INSERT INTO users (id, username, name, email, role) VALUES (2, 'moderator', 'Moderator Test', 'moderator@modrinth.com', 'moderator');
|
||||
INSERT INTO users (id, username, name, email, role) VALUES (3, 'user', 'User Test', 'user@modrinth.com', 'developer');
|
||||
INSERT INTO users (id, username, name, email, role) VALUES (4, 'friend', 'Friend Test', 'friend@modrinth.com', 'developer');
|
||||
INSERT INTO users (id, username, name, email, role) VALUES (5, 'enemy', 'Enemy Test', 'enemy@modrinth.com', 'developer');
|
||||
|
||||
-- Full PATs for each user, with different scopes
|
||||
-- These are not legal PATs, as they contain all scopes- they mimic permissions of a logged in user
|
||||
-- IDs: 50-54, o p q r s
|
||||
INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (50, 1, 'admin-pat', 'mrp_patadmin', B'11111111111111111111111111111111111'::BIGINT, '2030-08-18 15:48:58.435729+00');
|
||||
INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (51, 2, 'moderator-pat', 'mrp_patmoderator', B'11111111111111111111111111111111111'::BIGINT, '2030-08-18 15:48:58.435729+00');
|
||||
INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (52, 3, 'user-pat', 'mrp_patuser', B'11111111111111111111111111111111111'::BIGINT, '2030-08-18 15:48:58.435729+00');
|
||||
INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (53, 4, 'friend-pat', 'mrp_patfriend', B'11111111111111111111111111111111111'::BIGINT, '2030-08-18 15:48:58.435729+00');
|
||||
INSERT INTO pats (id, user_id, name, access_token, scopes, expires) VALUES (54, 5, 'enemy-pat', 'mrp_patenemy', B'11111111111111111111111111111111111'::BIGINT, '2030-08-18 15:48:58.435729+00');
|
||||
|
||||
-- -- Sample game versions, loaders, categories
|
||||
INSERT INTO game_versions (id, version, type, created)
|
||||
VALUES (20000, '1.20.1', 'release', timezone('utc', now()));
|
||||
|
||||
INSERT INTO loaders (id, loader) VALUES (1, 'fabric');
|
||||
INSERT INTO loaders_project_types (joining_loader_id, joining_project_type_id) VALUES (1,1);
|
||||
INSERT INTO loaders_project_types (joining_loader_id, joining_project_type_id) VALUES (1,2);
|
||||
|
||||
INSERT INTO categories (id, category, project_type) VALUES (1, 'combat', 1);
|
||||
INSERT INTO categories (id, category, project_type) VALUES (2, 'decoration', 1);
|
||||
INSERT INTO categories (id, category, project_type) VALUES (3, 'economy', 1);
|
||||
|
||||
INSERT INTO categories (id, category, project_type) VALUES (4, 'combat', 2);
|
||||
INSERT INTO categories (id, category, project_type) VALUES (5, 'decoration', 2);
|
||||
INSERT INTO categories (id, category, project_type) VALUES (6, 'economy', 2);
|
||||
BIN
tests/files/simple-zip.zip
Normal file
BIN
tests/files/simple-zip.zip
Normal file
Binary file not shown.
292
tests/pats.rs
Normal file
292
tests/pats.rs
Normal file
@@ -0,0 +1,292 @@
|
||||
use actix_web::test;
|
||||
use chrono::{Duration, Utc};
|
||||
use common::database::*;
|
||||
use labrinth::models::pats::Scopes;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::common::environment::TestEnvironment;
|
||||
|
||||
// importing common module.
|
||||
mod common;
|
||||
|
||||
// Full pat test:
|
||||
// - create a PAT and ensure it can be used for the scope
|
||||
// - ensure access token is not returned for any PAT in GET
|
||||
// - ensure PAT can be patched to change scopes
|
||||
// - ensure PAT can be patched to change expiry
|
||||
// - ensure expired PATs cannot be used
|
||||
// - ensure PATs can be deleted
|
||||
#[actix_rt::test]
|
||||
pub async fn pat_full_test() {
|
||||
let test_env = TestEnvironment::build_with_dummy().await;
|
||||
|
||||
// Create a PAT for a full test
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
|
||||
"name": "test_pat_scopes Test",
|
||||
"expires": Utc::now() + Duration::days(1),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 200);
|
||||
let success: serde_json::Value = test::read_body_json(resp).await;
|
||||
let id = success["id"].as_str().unwrap();
|
||||
|
||||
// Has access token and correct scopes
|
||||
assert!(success["access_token"].as_str().is_some());
|
||||
assert_eq!(
|
||||
success["scopes"].as_u64().unwrap(),
|
||||
Scopes::COLLECTION_CREATE.bits()
|
||||
);
|
||||
let access_token = success["access_token"].as_str().unwrap();
|
||||
|
||||
// Get PAT again
|
||||
let req = test::TestRequest::get()
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.uri("/v2/pat")
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 200);
|
||||
let success: serde_json::Value = test::read_body_json(resp).await;
|
||||
|
||||
// Ensure access token is NOT returned for any PATs
|
||||
for pat in success.as_array().unwrap() {
|
||||
assert!(pat["access_token"].as_str().is_none());
|
||||
}
|
||||
|
||||
// Create mock test for using PAT
|
||||
let mock_pat_test = |token: &str| {
|
||||
let token = token.to_string();
|
||||
async {
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/collection")
|
||||
.append_header(("Authorization", token))
|
||||
.set_json(json!({
|
||||
"title": "Test Collection 1",
|
||||
"description": "Test Collection Description"
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
resp.status().as_u16()
|
||||
}
|
||||
};
|
||||
|
||||
assert_eq!(mock_pat_test(access_token).await, 200);
|
||||
|
||||
// Change scopes and test again
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": 0,
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 204);
|
||||
assert_eq!(mock_pat_test(access_token).await, 401); // No longer works
|
||||
|
||||
// Change scopes back, and set expiry to the past, and test again
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": Scopes::COLLECTION_CREATE,
|
||||
"expires": Utc::now() + Duration::seconds(1), // expires in 1 second
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 204);
|
||||
|
||||
// Wait 1 second before testing again for expiry
|
||||
tokio::time::sleep(Duration::seconds(1).to_std().unwrap()).await;
|
||||
assert_eq!(mock_pat_test(access_token).await, 401); // No longer works
|
||||
|
||||
// Change everything back to normal and test again
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"expires": Utc::now() + Duration::days(1), // no longer expired!
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 204);
|
||||
assert_eq!(mock_pat_test(access_token).await, 200); // Works again
|
||||
|
||||
// Patching to a bad expiry should fail
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"expires": Utc::now() - Duration::days(1), // Past
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 400);
|
||||
|
||||
// Similar to above with PAT creation, patching to a bad scope should fail
|
||||
for i in 0..64 {
|
||||
let scope = Scopes::from_bits_truncate(1 << i);
|
||||
if !Scopes::all().contains(scope) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": scope.bits(),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(
|
||||
resp.status().as_u16(),
|
||||
if scope.is_restricted() { 400 } else { 204 }
|
||||
);
|
||||
}
|
||||
|
||||
// Delete PAT
|
||||
let req = test::TestRequest::delete()
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 204);
|
||||
|
||||
// Cleanup test db
|
||||
test_env.cleanup().await;
|
||||
}
|
||||
|
||||
// Test illegal PAT setting, both in POST and PATCH
|
||||
#[actix_rt::test]
|
||||
pub async fn bad_pats() {
|
||||
let test_env = TestEnvironment::build_with_dummy().await;
|
||||
|
||||
// Creating a PAT with no name should fail
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
|
||||
"expires": Utc::now() + Duration::days(1),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 400);
|
||||
|
||||
// Name too short or too long should fail
|
||||
for name in ["n", "this_name_is_too_long".repeat(16).as_str()] {
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"name": name,
|
||||
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
|
||||
"expires": Utc::now() + Duration::days(1),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 400);
|
||||
}
|
||||
|
||||
// Creating a PAT with an expiry in the past should fail
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": Scopes::COLLECTION_CREATE, // Collection create as an easily tested example
|
||||
"name": "test_pat_scopes Test",
|
||||
"expires": Utc::now() - Duration::days(1),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 400);
|
||||
|
||||
// Make a PAT with each scope, with the result varying by whether that scope is restricted
|
||||
for i in 0..64 {
|
||||
let scope = Scopes::from_bits_truncate(1 << i);
|
||||
if !Scopes::all().contains(scope) {
|
||||
continue;
|
||||
}
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": scope.bits(),
|
||||
"name": format!("test_pat_scopes Name {}", i),
|
||||
"expires": Utc::now() + Duration::days(1),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(
|
||||
resp.status().as_u16(),
|
||||
if scope.is_restricted() { 400 } else { 200 }
|
||||
);
|
||||
}
|
||||
|
||||
// Create a 'good' PAT for patching
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": Scopes::COLLECTION_CREATE,
|
||||
"name": "test_pat_scopes Test",
|
||||
"expires": Utc::now() + Duration::days(1),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 200);
|
||||
let success: serde_json::Value = test::read_body_json(resp).await;
|
||||
let id = success["id"].as_str().unwrap();
|
||||
|
||||
// Patching to a bad name should fail
|
||||
for name in ["n", "this_name_is_too_long".repeat(16).as_str()] {
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/pat")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"name": name,
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 400);
|
||||
}
|
||||
|
||||
// Patching to a bad expiry should fail
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"expires": Utc::now() - Duration::days(1), // Past
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status().as_u16(), 400);
|
||||
|
||||
// Similar to above with PAT creation, patching to a bad scope should fail
|
||||
for i in 0..64 {
|
||||
let scope = Scopes::from_bits_truncate(1 << i);
|
||||
if !Scopes::all().contains(scope) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/pat/{}", id))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"scopes": scope.bits(),
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(
|
||||
resp.status().as_u16(),
|
||||
if scope.is_restricted() { 400 } else { 204 }
|
||||
);
|
||||
}
|
||||
|
||||
// Cleanup test db
|
||||
test_env.cleanup().await;
|
||||
}
|
||||
461
tests/project.rs
Normal file
461
tests/project.rs
Normal file
@@ -0,0 +1,461 @@
|
||||
use actix_web::test;
|
||||
use labrinth::database::models::project_item::{PROJECTS_NAMESPACE, PROJECTS_SLUGS_NAMESPACE};
|
||||
use labrinth::models::ids::base62_impl::parse_base62;
|
||||
use serde_json::json;
|
||||
|
||||
use crate::common::database::*;
|
||||
|
||||
use crate::common::{actix::AppendsMultipart, environment::TestEnvironment};
|
||||
|
||||
// importing common module.
|
||||
mod common;
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_get_project() {
|
||||
// Test setup and dummy data
|
||||
let test_env = TestEnvironment::build_with_dummy().await;
|
||||
let alpha_project_id = &test_env.dummy.as_ref().unwrap().alpha_project_id;
|
||||
let beta_project_id = &test_env.dummy.as_ref().unwrap().beta_project_id;
|
||||
let alpha_project_slug = &test_env.dummy.as_ref().unwrap().alpha_project_slug;
|
||||
let alpha_version_id = &test_env.dummy.as_ref().unwrap().alpha_version_id;
|
||||
|
||||
// Perform request on dummy data
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&format!("/v2/project/{alpha_project_id}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
let status = resp.status();
|
||||
let body: serde_json::Value = test::read_body_json(resp).await;
|
||||
|
||||
assert_eq!(status, 200);
|
||||
assert_eq!(body["id"], json!(alpha_project_id));
|
||||
assert_eq!(body["slug"], json!(alpha_project_slug));
|
||||
let versions = body["versions"].as_array().unwrap();
|
||||
assert!(!versions.is_empty());
|
||||
assert_eq!(versions[0], json!(alpha_version_id));
|
||||
|
||||
// Confirm that the request was cached
|
||||
assert_eq!(
|
||||
test_env
|
||||
.db
|
||||
.redis_pool
|
||||
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, alpha_project_slug)
|
||||
.await
|
||||
.unwrap(),
|
||||
Some(parse_base62(alpha_project_id).unwrap() as i64)
|
||||
);
|
||||
|
||||
let cached_project = test_env
|
||||
.db
|
||||
.redis_pool
|
||||
.get::<String, _>(PROJECTS_NAMESPACE, parse_base62(alpha_project_id).unwrap())
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
let cached_project: serde_json::Value = serde_json::from_str(&cached_project).unwrap();
|
||||
assert_eq!(cached_project["inner"]["slug"], json!(alpha_project_slug));
|
||||
|
||||
// Make the request again, this time it should be cached
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&format!("/v2/project/{alpha_project_id}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
let status = resp.status();
|
||||
assert_eq!(status, 200);
|
||||
|
||||
let body: serde_json::Value = test::read_body_json(resp).await;
|
||||
assert_eq!(body["id"], json!(alpha_project_id));
|
||||
assert_eq!(body["slug"], json!(alpha_project_slug));
|
||||
|
||||
// Request should fail on non-existent project
|
||||
let req = test::TestRequest::get()
|
||||
.uri("/v2/project/nonexistent")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 404);
|
||||
|
||||
// Similarly, request should fail on non-authorized user, on a yet-to-be-approved or hidden project, with a 404 (hiding the existence of the project)
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&format!("/v2/project/{beta_project_id}"))
|
||||
.append_header(("Authorization", ENEMY_USER_PAT))
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 404);
|
||||
|
||||
// Cleanup test db
|
||||
test_env.cleanup().await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
async fn test_add_remove_project() {
|
||||
// Test setup and dummy data
|
||||
let test_env = TestEnvironment::build_with_dummy().await;
|
||||
|
||||
// Generate test project data.
|
||||
let mut json_data = json!(
|
||||
{
|
||||
"title": "Test_Add_Project project",
|
||||
"slug": "demo",
|
||||
"description": "Example description.",
|
||||
"body": "Example body.",
|
||||
"client_side": "required",
|
||||
"server_side": "optional",
|
||||
"initial_versions": [{
|
||||
"file_parts": ["basic-mod.jar"],
|
||||
"version_number": "1.2.3",
|
||||
"version_title": "start",
|
||||
"dependencies": [],
|
||||
"game_versions": ["1.20.1"] ,
|
||||
"release_channel": "release",
|
||||
"loaders": ["fabric"],
|
||||
"featured": true
|
||||
}],
|
||||
"categories": [],
|
||||
"license_id": "MIT"
|
||||
}
|
||||
);
|
||||
|
||||
// Basic json
|
||||
let json_segment = common::actix::MultipartSegment {
|
||||
name: "data".to_string(),
|
||||
filename: None,
|
||||
content_type: Some("application/json".to_string()),
|
||||
data: common::actix::MultipartSegmentData::Text(serde_json::to_string(&json_data).unwrap()),
|
||||
};
|
||||
|
||||
// Basic json, with a different file
|
||||
json_data["initial_versions"][0]["file_parts"][0] = json!("basic-mod-different.jar");
|
||||
let json_diff_file_segment = common::actix::MultipartSegment {
|
||||
data: common::actix::MultipartSegmentData::Text(serde_json::to_string(&json_data).unwrap()),
|
||||
..json_segment.clone()
|
||||
};
|
||||
|
||||
// Basic json, with a different file, and a different slug
|
||||
json_data["slug"] = json!("new_demo");
|
||||
json_data["initial_versions"][0]["file_parts"][0] = json!("basic-mod-different.jar");
|
||||
let json_diff_slug_file_segment = common::actix::MultipartSegment {
|
||||
data: common::actix::MultipartSegmentData::Text(serde_json::to_string(&json_data).unwrap()),
|
||||
..json_segment.clone()
|
||||
};
|
||||
|
||||
// Basic file
|
||||
let file_segment = common::actix::MultipartSegment {
|
||||
name: "basic-mod.jar".to_string(),
|
||||
filename: Some("basic-mod.jar".to_string()),
|
||||
content_type: Some("application/java-archive".to_string()),
|
||||
data: common::actix::MultipartSegmentData::Binary(
|
||||
include_bytes!("../tests/files/basic-mod.jar").to_vec(),
|
||||
),
|
||||
};
|
||||
|
||||
// Differently named file, with the same content (for hash testing)
|
||||
let file_diff_name_segment = common::actix::MultipartSegment {
|
||||
name: "basic-mod-different.jar".to_string(),
|
||||
filename: Some("basic-mod-different.jar".to_string()),
|
||||
content_type: Some("application/java-archive".to_string()),
|
||||
data: common::actix::MultipartSegmentData::Binary(
|
||||
include_bytes!("../tests/files/basic-mod.jar").to_vec(),
|
||||
),
|
||||
};
|
||||
|
||||
// Differently named file, with different content
|
||||
let file_diff_name_content_segment = common::actix::MultipartSegment {
|
||||
name: "basic-mod-different.jar".to_string(),
|
||||
filename: Some("basic-mod-different.jar".to_string()),
|
||||
content_type: Some("application/java-archive".to_string()),
|
||||
data: common::actix::MultipartSegmentData::Binary(
|
||||
include_bytes!("../tests/files/basic-mod-different.jar").to_vec(),
|
||||
),
|
||||
};
|
||||
|
||||
// Add a project- simple, should work.
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/project")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_multipart(vec![json_segment.clone(), file_segment.clone()])
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
|
||||
let status = resp.status();
|
||||
assert_eq!(status, 200);
|
||||
|
||||
// Get the project we just made, and confirm that it's correct
|
||||
let req = test::TestRequest::get()
|
||||
.uri("/v2/project/demo")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
let body: serde_json::Value = test::read_body_json(resp).await;
|
||||
let versions = body["versions"].as_array().unwrap();
|
||||
assert!(versions.len() == 1);
|
||||
let uploaded_version_id = &versions[0];
|
||||
|
||||
// Checks files to ensure they were uploaded and correctly identify the file
|
||||
let hash = sha1::Sha1::from(include_bytes!("../tests/files/basic-mod.jar"))
|
||||
.digest()
|
||||
.to_string();
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&format!("/v2/version_file/{hash}?algorithm=sha1"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
let body: serde_json::Value = test::read_body_json(resp).await;
|
||||
let file_version_id = &body["id"];
|
||||
assert_eq!(&file_version_id, &uploaded_version_id);
|
||||
|
||||
// Reusing with a different slug and the same file should fail
|
||||
// Even if that file is named differently
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/project")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_multipart(vec![
|
||||
json_diff_slug_file_segment.clone(), // Different slug, different file name
|
||||
file_diff_name_segment.clone(), // Different file name, same content
|
||||
])
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 400);
|
||||
|
||||
// Reusing with the same slug and a different file should fail
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/project")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_multipart(vec![
|
||||
json_diff_file_segment.clone(), // Same slug, different file name
|
||||
file_diff_name_content_segment.clone(), // Different file name, different content
|
||||
])
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 400);
|
||||
|
||||
// Different slug, different file should succeed
|
||||
let req = test::TestRequest::post()
|
||||
.uri("/v2/project")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_multipart(vec![
|
||||
json_diff_slug_file_segment.clone(), // Different slug, different file name
|
||||
file_diff_name_content_segment.clone(), // Different file name, same content
|
||||
])
|
||||
.to_request();
|
||||
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
// Get
|
||||
let req = test::TestRequest::get()
|
||||
.uri("/v2/project/demo")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 200);
|
||||
let body: serde_json::Value = test::read_body_json(resp).await;
|
||||
let id = body["id"].to_string();
|
||||
|
||||
// Remove the project
|
||||
let req = test::TestRequest::delete()
|
||||
.uri("/v2/project/demo")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 204);
|
||||
|
||||
// Confirm that the project is gone from the cache
|
||||
assert_eq!(
|
||||
test_env
|
||||
.db
|
||||
.redis_pool
|
||||
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, "demo")
|
||||
.await
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
assert_eq!(
|
||||
test_env
|
||||
.db
|
||||
.redis_pool
|
||||
.get::<i64, _>(PROJECTS_SLUGS_NAMESPACE, id)
|
||||
.await
|
||||
.unwrap(),
|
||||
None
|
||||
);
|
||||
|
||||
// Old slug no longer works
|
||||
let req = test::TestRequest::get()
|
||||
.uri("/v2/project/demo")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 404);
|
||||
|
||||
// Cleanup test db
|
||||
test_env.cleanup().await;
|
||||
}
|
||||
|
||||
#[actix_rt::test]
|
||||
pub async fn test_patch_project() {
|
||||
let test_env = TestEnvironment::build_with_dummy().await;
|
||||
let alpha_project_slug = &test_env.dummy.as_ref().unwrap().alpha_project_slug;
|
||||
let beta_project_slug = &test_env.dummy.as_ref().unwrap().beta_project_slug;
|
||||
|
||||
// First, we do some patch requests that should fail.
|
||||
// Failure because the user is not authorized.
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", ENEMY_USER_PAT))
|
||||
.set_json(json!({
|
||||
"title": "Test_Add_Project project - test 1",
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 401);
|
||||
|
||||
// Failure because we are setting URL fields to invalid urls.
|
||||
for url_type in ["issues_url", "source_url", "wiki_url", "discord_url"] {
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
url_type: "w.fake.url",
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 400);
|
||||
}
|
||||
|
||||
// Failure because these are illegal requested statuses for a normal user.
|
||||
for req in ["unknown", "processing", "withheld", "scheduled"] {
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"requested_status": req,
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 400);
|
||||
}
|
||||
|
||||
// Failure because these should not be able to be set by a non-mod
|
||||
for key in ["moderation_message", "moderation_message_body"] {
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
key: "test",
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 401);
|
||||
|
||||
// (should work for a mod, though)
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", MOD_USER_PAT))
|
||||
.set_json(json!({
|
||||
key: "test",
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 204);
|
||||
}
|
||||
|
||||
// Failure because the slug is already taken.
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"slug": beta_project_slug, // the other dummy project has this slug
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 400);
|
||||
|
||||
// Not allowed to directly set status, as 'beta_project_slug' (the other project) is "processing" and cannot have its status changed like this.
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{beta_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"status": "private"
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 401);
|
||||
|
||||
// Sucessful request to patch many fields.
|
||||
let req = test::TestRequest::patch()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.set_json(json!({
|
||||
"slug": "newslug",
|
||||
"title": "New successful title",
|
||||
"description": "New successful description",
|
||||
"body": "New successful body",
|
||||
"categories": ["combat"],
|
||||
"license_id": "MIT",
|
||||
"issues_url": "https://github.com",
|
||||
"discord_url": "https://discord.gg",
|
||||
"wiki_url": "https://wiki.com",
|
||||
"client_side": "optional",
|
||||
"server_side": "required",
|
||||
"donation_urls": [{
|
||||
"id": "patreon",
|
||||
"platform": "Patreon",
|
||||
"url": "https://patreon.com"
|
||||
}]
|
||||
}))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 204);
|
||||
|
||||
// Old slug no longer works
|
||||
let req = test::TestRequest::get()
|
||||
.uri(&format!("/v2/project/{alpha_project_slug}"))
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 404);
|
||||
|
||||
// Old slug no longer works
|
||||
let req = test::TestRequest::get()
|
||||
.uri("/v2/project/newslug")
|
||||
.append_header(("Authorization", USER_USER_PAT))
|
||||
.to_request();
|
||||
let resp = test_env.call(req).await;
|
||||
assert_eq!(resp.status(), 200);
|
||||
|
||||
let body: serde_json::Value = test::read_body_json(resp).await;
|
||||
assert_eq!(body["slug"], json!("newslug"));
|
||||
assert_eq!(body["title"], json!("New successful title"));
|
||||
assert_eq!(body["description"], json!("New successful description"));
|
||||
assert_eq!(body["body"], json!("New successful body"));
|
||||
assert_eq!(body["categories"], json!(["combat"]));
|
||||
assert_eq!(body["license"]["id"], json!("MIT"));
|
||||
assert_eq!(body["issues_url"], json!("https://github.com"));
|
||||
assert_eq!(body["discord_url"], json!("https://discord.gg"));
|
||||
assert_eq!(body["wiki_url"], json!("https://wiki.com"));
|
||||
assert_eq!(body["client_side"], json!("optional"));
|
||||
assert_eq!(body["server_side"], json!("required"));
|
||||
assert_eq!(
|
||||
body["donation_urls"][0]["url"],
|
||||
json!("https://patreon.com")
|
||||
);
|
||||
|
||||
// Cleanup test db
|
||||
test_env.cleanup().await;
|
||||
}
|
||||
|
||||
// TODO: Missing routes on projects
|
||||
// TODO: using permissions/scopes, can we SEE projects existence that we are not allowed to? (ie 401 instead of 404)
|
||||
1331
tests/scopes.rs
Normal file
1331
tests/scopes.rs
Normal file
File diff suppressed because it is too large
Load Diff
Reference in New Issue
Block a user