Batch inserts [MOD-555] (#726)

* Batch a bunch of inserts, but still more to do

* Insert many for clickhouse (+ tests)

* Batch the remaining ones except those requiring deduplication

* Risky dedups

* Bit o cleanup and formatting

* cargo sqlx prepare

* Add test around batch editing project categories

* Add struct to satisfy clippy

* Fix silly mistake that was caught by the tests!

* Leave room for growth in dummy_data
This commit is contained in:
Jackson Kruger
2023-10-11 13:32:58 -05:00
committed by GitHub
parent dfa43f3c5a
commit d92272ffa0
23 changed files with 1208 additions and 929 deletions

View File

@@ -6,8 +6,12 @@ mod fetch;
pub use fetch::*;
pub async fn init_client() -> clickhouse::error::Result<clickhouse::Client> {
let database = dotenvy::var("CLICKHOUSE_DATABASE").unwrap();
init_client_with_database(&dotenvy::var("CLICKHOUSE_DATABASE").unwrap()).await
}
pub async fn init_client_with_database(
database: &str,
) -> clickhouse::error::Result<clickhouse::Client> {
let client = {
let mut http_connector = HttpConnector::new();
http_connector.enforce_http(false); // allow https URLs

View File

@@ -81,19 +81,19 @@ impl Collection {
.execute(&mut *transaction)
.await?;
for project_id in self.projects.iter() {
sqlx::query!(
"
INSERT INTO collections_mods (collection_id, mod_id)
VALUES ($1, $2)
ON CONFLICT DO NOTHING
",
self.id as CollectionId,
*project_id as ProjectId,
)
.execute(&mut *transaction)
.await?;
}
let (collection_ids, project_ids): (Vec<_>, Vec<_>) =
self.projects.iter().map(|p| (self.id.0, p.0)).unzip();
sqlx::query!(
"
INSERT INTO collections_mods (collection_id, mod_id)
SELECT * FROM UNNEST($1::bigint[], $2::bigint[])
ON CONFLICT DO NOTHING
",
&collection_ids[..],
&project_ids[..],
)
.execute(&mut *transaction)
.await?;
Ok(())
}

View File

@@ -5,6 +5,7 @@ use crate::database::redis::RedisPool;
use crate::models::ids::base62_impl::{parse_base62, to_base62};
use crate::models::projects::{MonetizationStatus, ProjectStatus};
use chrono::{DateTime, Utc};
use itertools::Itertools;
use serde::{Deserialize, Serialize};
pub const PROJECTS_NAMESPACE: &str = "projects";
@@ -20,23 +21,25 @@ pub struct DonationUrl {
}
impl DonationUrl {
pub async fn insert_project(
&self,
pub async fn insert_many_projects(
donation_urls: Vec<Self>,
project_id: ProjectId,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), sqlx::error::Error> {
let (project_ids, platform_ids, urls): (Vec<_>, Vec<_>, Vec<_>) = donation_urls
.into_iter()
.map(|url| (project_id.0, url.platform_id.0, url.url))
.multiunzip();
sqlx::query!(
"
INSERT INTO mods_donations (
joining_mod_id, joining_platform_id, url
)
VALUES (
$1, $2, $3
)
SELECT * FROM UNNEST($1::bigint[], $2::int[], $3::varchar[])
",
project_id as ProjectId,
self.platform_id as DonationPlatformId,
self.url,
&project_ids[..],
&platform_ids[..],
&urls[..],
)
.execute(&mut *transaction)
.await?;
@@ -56,26 +59,76 @@ pub struct GalleryItem {
}
impl GalleryItem {
pub async fn insert(
&self,
pub async fn insert_many(
items: Vec<Self>,
project_id: ProjectId,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), sqlx::error::Error> {
let (project_ids, image_urls, featureds, titles, descriptions, orderings): (
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
) = items
.into_iter()
.map(|gi| {
(
project_id.0,
gi.image_url,
gi.featured,
gi.title,
gi.description,
gi.ordering,
)
})
.multiunzip();
sqlx::query!(
"
INSERT INTO mods_gallery (
mod_id, image_url, featured, title, description, ordering
)
VALUES (
$1, $2, $3, $4, $5, $6
)
SELECT * FROM UNNEST ($1::bigint[], $2::varchar[], $3::bool[], $4::varchar[], $5::varchar[], $6::bigint[])
",
project_id as ProjectId,
self.image_url,
self.featured,
self.title,
self.description,
self.ordering
&project_ids[..],
&image_urls[..],
&featureds[..],
&titles[..] as &[Option<String>],
&descriptions[..] as &[Option<String>],
&orderings[..]
)
.execute(&mut *transaction)
.await?;
Ok(())
}
}
#[derive(derive_new::new)]
pub struct ModCategory {
project_id: ProjectId,
category_id: CategoryId,
is_additional: bool,
}
impl ModCategory {
pub async fn insert_many(
items: Vec<Self>,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), DatabaseError> {
let (project_ids, category_ids, is_additionals): (Vec<_>, Vec<_>, Vec<_>) = items
.into_iter()
.map(|mc| (mc.project_id.0, mc.category_id.0, mc.is_additional))
.multiunzip();
sqlx::query!(
"
INSERT INTO mods_categories (joining_mod_id, joining_category_id, is_additional)
SELECT * FROM UNNEST ($1::bigint[], $2::int[], $3::bool[])
",
&project_ids[..],
&category_ids[..],
&is_additionals[..]
)
.execute(&mut *transaction)
.await?;
@@ -160,46 +213,35 @@ impl ProjectBuilder {
};
project_struct.insert(&mut *transaction).await?;
let ProjectBuilder {
donation_urls,
gallery_items,
categories,
additional_categories,
..
} = self;
for mut version in self.initial_versions {
version.project_id = self.project_id;
version.insert(&mut *transaction).await?;
}
for donation in self.donation_urls {
donation
.insert_project(self.project_id, &mut *transaction)
.await?;
}
for gallery in self.gallery_items {
gallery.insert(self.project_id, &mut *transaction).await?;
}
for category in self.categories {
sqlx::query!(
"
INSERT INTO mods_categories (joining_mod_id, joining_category_id, is_additional)
VALUES ($1, $2, FALSE)
",
self.project_id as ProjectId,
category as CategoryId,
)
.execute(&mut *transaction)
DonationUrl::insert_many_projects(donation_urls, self.project_id, &mut *transaction)
.await?;
}
for category in self.additional_categories {
sqlx::query!(
"
INSERT INTO mods_categories (joining_mod_id, joining_category_id, is_additional)
VALUES ($1, $2, TRUE)
",
self.project_id as ProjectId,
category as CategoryId,
GalleryItem::insert_many(gallery_items, self.project_id, &mut *transaction).await?;
let project_id = self.project_id;
let mod_categories = categories
.into_iter()
.map(|c| ModCategory::new(project_id, c, false))
.chain(
additional_categories
.into_iter()
.map(|c| ModCategory::new(project_id, c, true)),
)
.execute(&mut *transaction)
.await?;
}
.collect_vec();
ModCategory::insert_many(mod_categories, &mut *transaction).await?;
Project::update_game_versions(self.project_id, &mut *transaction).await?;
Project::update_loaders(self.project_id, &mut *transaction).await?;

View File

@@ -41,26 +41,61 @@ impl TeamBuilder {
.execute(&mut *transaction)
.await?;
for member in self.members {
let team_member_id = generate_team_member_id(&mut *transaction).await?;
sqlx::query!(
"
INSERT INTO team_members (id, team_id, user_id, role, permissions, organization_permissions, accepted, payouts_split, ordering)
VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9)
",
team_member_id as TeamMemberId,
team.id as TeamId,
member.user_id as UserId,
member.role,
member.permissions.bits() as i64,
member.organization_permissions.map(|p| p.bits() as i64),
member.accepted,
member.payouts_split,
member.ordering,
)
.execute(&mut *transaction)
.await?;
let mut team_member_ids = Vec::new();
for _ in self.members.iter() {
team_member_ids.push(generate_team_member_id(&mut *transaction).await?.0);
}
let TeamBuilder { members } = self;
let (
team_ids,
user_ids,
roles,
permissions,
organization_permissions,
accepteds,
payouts_splits,
orderings,
): (
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
) = members
.into_iter()
.map(|m| {
(
team.id.0,
m.user_id.0,
m.role,
m.permissions.bits() as i64,
m.organization_permissions.map(|p| p.bits() as i64),
m.accepted,
m.payouts_split,
m.ordering,
)
})
.multiunzip();
sqlx::query!(
"
INSERT INTO team_members (id, team_id, user_id, role, permissions, organization_permissions, accepted, payouts_split, ordering)
SELECT * FROM UNNEST ($1::int8[], $2::int8[], $3::int8[], $4::varchar[], $5::int8[], $6::int8[], $7::bool[], $8::numeric[], $9::int8[])
",
&team_member_ids[..],
&team_ids[..],
&user_ids[..],
&roles[..],
&permissions[..],
&organization_permissions[..] as &[Option<i64>],
&accepteds[..],
&payouts_splits[..],
&orderings[..],
)
.execute(&mut *transaction)
.await?;
Ok(team_id)
}

View File

@@ -90,22 +90,20 @@ impl ThreadBuilder {
.execute(&mut *transaction)
.await?;
for member in &self.members {
sqlx::query!(
"
INSERT INTO threads_members (
thread_id, user_id
)
VALUES (
$1, $2
)
",
thread_id as ThreadId,
*member as UserId,
let (thread_ids, members): (Vec<_>, Vec<_>) =
self.members.iter().map(|m| (thread_id.0, m.0)).unzip();
sqlx::query!(
"
INSERT INTO threads_members (
thread_id, user_id
)
.execute(&mut *transaction)
.await?;
}
SELECT * FROM UNNEST ($1::int8[], $2::int8[])
",
&thread_ids[..],
&members[..],
)
.execute(&mut *transaction)
.await?;
Ok(thread_id)
}

View File

@@ -39,12 +39,59 @@ pub struct DependencyBuilder {
}
impl DependencyBuilder {
pub async fn insert(
self,
pub async fn insert_many(
builders: Vec<Self>,
version_id: VersionId,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), DatabaseError> {
let project_id = if let Some(project_id) = self.project_id {
let mut project_ids = Vec::new();
for dependency in builders.iter() {
project_ids.push(
dependency
.try_get_project_id(transaction)
.await?
.map(|id| id.0),
);
}
let (version_ids, dependency_types, dependency_ids, filenames): (
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
) = builders
.into_iter()
.map(|d| {
(
version_id.0,
d.dependency_type,
d.version_id.map(|v| v.0),
d.file_name,
)
})
.multiunzip();
sqlx::query!(
"
INSERT INTO dependencies (dependent_id, dependency_type, dependency_id, mod_dependency_id, dependency_file_name)
SELECT * FROM UNNEST ($1::bigint[], $2::varchar[], $3::bigint[], $4::bigint[], $5::varchar[])
",
&version_ids[..],
&dependency_types[..],
&dependency_ids[..] as &[Option<i64>],
&project_ids[..] as &[Option<i64>],
&filenames[..] as &[Option<String>],
)
.execute(&mut *transaction)
.await?;
Ok(())
}
async fn try_get_project_id(
&self,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<Option<ProjectId>, DatabaseError> {
Ok(if let Some(project_id) = self.project_id {
Some(project_id)
} else if let Some(version_id) = self.version_id {
sqlx::query!(
@@ -58,23 +105,7 @@ impl DependencyBuilder {
.map(|x| ProjectId(x.mod_id))
} else {
None
};
sqlx::query!(
"
INSERT INTO dependencies (dependent_id, dependency_type, dependency_id, mod_dependency_id, dependency_file_name)
VALUES ($1, $2, $3, $4, $5)
",
version_id as VersionId,
self.dependency_type,
self.version_id.map(|x| x.0),
project_id.map(|x| x.0),
self.file_name,
)
.execute(&mut *transaction)
.await?;
Ok(())
})
}
}
@@ -89,42 +120,70 @@ pub struct VersionFileBuilder {
}
impl VersionFileBuilder {
pub async fn insert(
self,
pub async fn insert_many(
version_files: Vec<Self>,
version_id: VersionId,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<FileId, DatabaseError> {
let file_id = generate_file_id(&mut *transaction).await?;
let (file_ids, version_ids, urls, filenames, primary, sizes, file_types): (
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
Vec<_>,
) = version_files
.iter()
.map(|f| {
(
file_id.0,
version_id.0,
f.url.clone(),
f.filename.clone(),
f.primary,
f.size as i32,
f.file_type.map(|x| x.to_string()),
)
})
.multiunzip();
sqlx::query!(
"
INSERT INTO files (id, version_id, url, filename, is_primary, size, file_type)
VALUES ($1, $2, $3, $4, $5, $6, $7)
SELECT * FROM UNNEST($1::bigint[], $2::bigint[], $3::varchar[], $4::varchar[], $5::bool[], $6::integer[], $7::varchar[])
",
file_id as FileId,
version_id as VersionId,
self.url,
self.filename,
self.primary,
self.size as i32,
self.file_type.map(|x| x.as_str()),
&file_ids[..],
&version_ids[..],
&urls[..],
&filenames[..],
&primary[..],
&sizes[..],
&file_types[..] as &[Option<String>],
)
.execute(&mut *transaction)
.await?;
for hash in self.hashes {
sqlx::query!(
"
INSERT INTO hashes (file_id, algorithm, hash)
VALUES ($1, $2, $3)
",
file_id as FileId,
hash.algorithm,
hash.hash,
)
.execute(&mut *transaction)
.await?;
}
let (file_ids, algorithms, hashes): (Vec<_>, Vec<_>, Vec<_>) = version_files
.into_iter()
.flat_map(|f| {
f.hashes
.into_iter()
.map(|h| (file_id.0, h.algorithm, h.hash))
})
.multiunzip();
sqlx::query!(
"
INSERT INTO hashes (file_id, algorithm, hash)
SELECT * FROM UNNEST($1::bigint[], $2::varchar[], $3::bytea[])
",
&file_ids[..],
&algorithms[..],
&hashes[..],
)
.execute(&mut *transaction)
.await?;
Ok(file_id)
}
@@ -170,44 +229,94 @@ impl VersionBuilder {
.execute(&mut *transaction)
.await?;
for file in self.files {
file.insert(self.version_id, transaction).await?;
}
let VersionBuilder {
dependencies,
loaders,
game_versions,
files,
version_id,
..
} = self;
VersionFileBuilder::insert_many(files, self.version_id, transaction).await?;
for dependency in self.dependencies {
dependency.insert(self.version_id, transaction).await?;
}
DependencyBuilder::insert_many(dependencies, self.version_id, transaction).await?;
for loader in self.loaders.clone() {
sqlx::query!(
"
INSERT INTO loaders_versions (loader_id, version_id)
VALUES ($1, $2)
",
loader as LoaderId,
self.version_id as VersionId,
)
.execute(&mut *transaction)
.await?;
}
let loader_versions = loaders
.iter()
.map(|l| LoaderVersion::new(*l, version_id))
.collect_vec();
LoaderVersion::insert_many(loader_versions, &mut *transaction).await?;
for game_version in self.game_versions.clone() {
sqlx::query!(
"
INSERT INTO game_versions_versions (game_version_id, joining_version_id)
VALUES ($1, $2)
",
game_version as GameVersionId,
self.version_id as VersionId,
)
.execute(&mut *transaction)
.await?;
}
let game_version_versions = game_versions
.iter()
.map(|v| VersionVersion::new(*v, version_id))
.collect_vec();
VersionVersion::insert_many(game_version_versions, &mut *transaction).await?;
Ok(self.version_id)
}
}
#[derive(derive_new::new)]
pub struct LoaderVersion {
pub loader_id: LoaderId,
pub version_id: VersionId,
}
impl LoaderVersion {
pub async fn insert_many(
items: Vec<Self>,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), DatabaseError> {
let (loader_ids, version_ids): (Vec<_>, Vec<_>) = items
.iter()
.map(|l| (l.loader_id.0, l.version_id.0))
.unzip();
sqlx::query!(
"
INSERT INTO loaders_versions (loader_id, version_id)
SELECT * FROM UNNEST($1::integer[], $2::bigint[])
",
&loader_ids[..],
&version_ids[..],
)
.execute(&mut *transaction)
.await?;
Ok(())
}
}
#[derive(derive_new::new)]
pub struct VersionVersion {
pub game_version_id: GameVersionId,
pub joining_version_id: VersionId,
}
impl VersionVersion {
pub async fn insert_many(
items: Vec<Self>,
transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), DatabaseError> {
let (game_version_ids, version_ids): (Vec<_>, Vec<_>) = items
.into_iter()
.map(|i| (i.game_version_id.0, i.joining_version_id.0))
.unzip();
sqlx::query!(
"
INSERT INTO game_versions_versions (game_version_id, joining_version_id)
SELECT * FROM UNNEST($1::integer[], $2::bigint[])
",
&game_version_ids[..],
&version_ids[..],
)
.execute(&mut *transaction)
.await?;
Ok(())
}
}
#[derive(Clone, Deserialize, Serialize)]
pub struct Version {
pub id: VersionId,

View File

@@ -1,6 +1,13 @@
use crate::models::analytics::{Download, PageView, Playtime};
use dashmap::DashSet;
#[cfg(test)]
mod tests;
const VIEWS_TABLENAME: &str = "views";
const DOWNLOADS_TABLENAME: &str = "downloads";
const PLAYTIME_TABLENAME: &str = "playtime";
pub struct AnalyticsQueue {
views_queue: DashSet<PageView>,
downloads_queue: DashSet<Download>,
@@ -17,54 +24,50 @@ impl AnalyticsQueue {
}
}
pub async fn add_view(&self, page_view: PageView) {
pub fn add_view(&self, page_view: PageView) {
self.views_queue.insert(page_view);
}
pub async fn add_download(&self, download: Download) {
pub fn add_download(&self, download: Download) {
self.downloads_queue.insert(download);
}
pub async fn add_playtime(&self, playtime: Playtime) {
pub fn add_playtime(&self, playtime: Playtime) {
self.playtime_queue.insert(playtime);
}
pub async fn index(&self, client: clickhouse::Client) -> Result<(), clickhouse::error::Error> {
let views_queue = self.views_queue.clone();
self.views_queue.clear();
Self::index_queue(&client, &self.views_queue, VIEWS_TABLENAME).await?;
Self::index_queue(&client, &self.downloads_queue, DOWNLOADS_TABLENAME).await?;
Self::index_queue(&client, &self.playtime_queue, PLAYTIME_TABLENAME).await?;
let downloads_queue = self.downloads_queue.clone();
self.downloads_queue.clear();
Ok(())
}
let playtime_queue = self.playtime_queue.clone();
self.playtime_queue.clear();
if !views_queue.is_empty() || !downloads_queue.is_empty() || !playtime_queue.is_empty() {
let mut views = client.insert("views")?;
for view in views_queue {
views.write(&view).await?;
}
views.end().await?;
let mut downloads = client.insert("downloads")?;
for download in downloads_queue {
downloads.write(&download).await?;
}
downloads.end().await?;
let mut playtimes = client.insert("playtime")?;
for playtime in playtime_queue {
playtimes.write(&playtime).await?;
}
playtimes.end().await?;
async fn index_queue<T>(
client: &clickhouse::Client,
queue: &DashSet<T>,
table_name: &str,
) -> Result<(), clickhouse::error::Error>
where
T: serde::Serialize + Eq + std::hash::Hash + Clone + clickhouse::Row,
{
if queue.is_empty() {
return Ok(());
}
let current_queue = queue.clone();
queue.clear();
let mut inserter = client.inserter(table_name)?;
for row in current_queue {
inserter.write(&row).await?;
inserter.commit().await?;
}
inserter.end().await?;
Ok(())
}
}

View File

@@ -0,0 +1,128 @@
use futures::Future;
use uuid::Uuid;
use super::*;
use crate::clickhouse::init_client_with_database;
use std::net::Ipv6Addr;
#[tokio::test]
async fn test_indexing() {
with_test_clickhouse_db(|clickhouse_client| async move {
let analytics = AnalyticsQueue::new();
analytics.add_download(get_default_download());
analytics.add_playtime(get_default_playtime());
analytics.add_view(get_default_views());
analytics.index(clickhouse_client.clone()).await.unwrap();
assert_table_counts(&clickhouse_client, 1, 1, 1).await;
analytics.index(clickhouse_client.clone()).await.unwrap();
assert_table_counts(&clickhouse_client, 1, 1, 1).await;
})
.await;
}
#[tokio::test]
async fn can_insert_many_downloads() {
with_test_clickhouse_db(|clickhouse_client| async move {
let analytics = AnalyticsQueue::new();
let n_downloads = 100_000;
for _ in 0..n_downloads {
analytics.add_download(get_default_download());
}
analytics.index(clickhouse_client.clone()).await.unwrap();
assert_table_count(DOWNLOADS_TABLENAME, &clickhouse_client, n_downloads).await;
})
.await;
}
async fn assert_table_counts(
client: &clickhouse::Client,
downloads: u64,
playtimes: u64,
views: u64,
) {
assert_table_count(DOWNLOADS_TABLENAME, client, downloads).await;
assert_table_count(PLAYTIME_TABLENAME, client, playtimes).await;
assert_table_count(VIEWS_TABLENAME, client, views).await;
}
async fn assert_table_count(table_name: &str, client: &clickhouse::Client, expected_count: u64) {
let count = client
.query(&format!("SELECT COUNT(*) from {table_name}"))
.fetch_one::<u64>()
.await
.unwrap();
assert_eq!(expected_count, count);
}
async fn with_test_clickhouse_db<Fut>(f: impl FnOnce(clickhouse::Client) -> Fut)
where
Fut: Future<Output = ()>,
{
let db_name = format!("test_{}", uuid::Uuid::new_v4().as_simple());
println!("Clickhouse test db: {}", db_name);
let clickhouse_client = init_client_with_database(&db_name)
.await
.expect("A real clickhouse instance should be running locally");
f(clickhouse_client.clone()).await;
clickhouse_client
.query(&format!("DROP DATABASE IF EXISTS {db_name}"))
.execute()
.await
.unwrap();
}
fn get_default_download() -> Download {
Download {
id: Uuid::new_v4(),
recorded: Default::default(),
domain: Default::default(),
site_path: Default::default(),
user_id: Default::default(),
project_id: Default::default(),
version_id: Default::default(),
ip: get_default_ipv6(),
country: Default::default(),
user_agent: Default::default(),
headers: Default::default(),
}
}
fn get_default_playtime() -> Playtime {
Playtime {
id: Uuid::new_v4(),
recorded: Default::default(),
seconds: Default::default(),
user_id: Default::default(),
project_id: Default::default(),
version_id: Default::default(),
loader: Default::default(),
game_version: Default::default(),
parent: Default::default(),
}
}
fn get_default_views() -> PageView {
PageView {
id: Uuid::new_v4(),
recorded: Default::default(),
domain: Default::default(),
site_path: Default::default(),
user_id: Default::default(),
project_id: Default::default(),
ip: get_default_ipv6(),
country: Default::default(),
user_agent: Default::default(),
headers: Default::default(),
}
}
fn get_default_ipv6() -> Ipv6Addr {
Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)
}

View File

@@ -355,6 +355,8 @@ pub async fn process_payout(
};
let mut clear_cache_users = Vec::new();
let (mut insert_user_ids, mut insert_project_ids, mut insert_payouts, mut insert_starts) =
(Vec::new(), Vec::new(), Vec::new(), Vec::new());
for (id, project) in projects_map {
if let Some(value) = &multipliers.values.get(&(id as u64)) {
let project_multiplier: Decimal =
@@ -367,18 +369,10 @@ pub async fn process_payout(
let payout: Decimal = payout * project_multiplier * (split / sum_splits);
if payout > Decimal::ZERO {
sqlx::query!(
"
INSERT INTO payouts_values (user_id, mod_id, amount, created)
VALUES ($1, $2, $3, $4)
",
user_id,
id,
payout,
start
)
.execute(&mut *transaction)
.await?;
insert_user_ids.push(user_id);
insert_project_ids.push(id);
insert_payouts.push(payout);
insert_starts.push(start);
sqlx::query!(
"
@@ -399,6 +393,19 @@ pub async fn process_payout(
}
}
sqlx::query!(
"
INSERT INTO payouts_values (user_id, mod_id, amount, created)
SELECT * FROM UNNEST ($1::bigint[], $2::bigint[], $3::numeric[], $4::timestamptz[])
",
&insert_user_ids[..],
&insert_project_ids[..],
&insert_payouts[..],
&insert_starts[..]
)
.execute(&mut *transaction)
.await?;
if !clear_cache_users.is_empty() {
crate::database::models::User::clear_caches(
&clear_cache_users

View File

@@ -150,7 +150,7 @@ pub async fn page_view_ingest(
view.user_id = user.id.0;
}
analytics_queue.add_view(view).await;
analytics_queue.add_view(view);
Ok(HttpResponse::NoContent().body(""))
}
@@ -202,19 +202,17 @@ pub async fn playtime_ingest(
}
if let Some(version) = versions.iter().find(|x| id == x.inner.id.into()) {
analytics_queue
.add_playtime(Playtime {
id: Default::default(),
recorded: Utc::now().timestamp_nanos() / 100_000,
seconds: playtime.seconds as u64,
user_id: user.id.0,
project_id: version.inner.project_id.0 as u64,
version_id: version.inner.id.0 as u64,
loader: playtime.loader,
game_version: playtime.game_version,
parent: playtime.parent.map(|x| x.0).unwrap_or(0),
})
.await;
analytics_queue.add_playtime(Playtime {
id: Default::default(),
recorded: Utc::now().timestamp_nanos() / 100_000,
seconds: playtime.seconds as u64,
user_id: user.id.0,
project_id: version.inner.project_id.0 as u64,
version_id: version.inner.id.0 as u64,
loader: playtime.loader,
game_version: playtime.game_version,
parent: playtime.parent.map(|x| x.0).unwrap_or(0),
});
}
}

View File

@@ -108,40 +108,36 @@ pub async fn count_download(
let ip = crate::routes::analytics::convert_to_ip_v6(&download_body.ip)
.unwrap_or_else(|_| Ipv4Addr::new(127, 0, 0, 1).to_ipv6_mapped());
analytics_queue
.add_download(Download {
id: Uuid::new_v4(),
recorded: Utc::now().timestamp_nanos() / 100_000,
domain: url.host_str().unwrap_or_default().to_string(),
site_path: url.path().to_string(),
user_id: user
.and_then(|(scopes, x)| {
if scopes.contains(Scopes::PERFORM_ANALYTICS) {
Some(x.id.0 as u64)
} else {
None
}
})
.unwrap_or(0),
project_id: project_id as u64,
version_id: version_id as u64,
ip,
country: maxmind.query(ip).await.unwrap_or_default(),
user_agent: download_body
.headers
.get("user-agent")
.cloned()
.unwrap_or_default(),
headers: download_body
.headers
.clone()
.into_iter()
.filter(|x| {
!crate::routes::analytics::FILTERED_HEADERS.contains(&&*x.0.to_lowercase())
})
.collect(),
})
.await;
analytics_queue.add_download(Download {
id: Uuid::new_v4(),
recorded: Utc::now().timestamp_nanos() / 100_000,
domain: url.host_str().unwrap_or_default().to_string(),
site_path: url.path().to_string(),
user_id: user
.and_then(|(scopes, x)| {
if scopes.contains(Scopes::PERFORM_ANALYTICS) {
Some(x.id.0 as u64)
} else {
None
}
})
.unwrap_or(0),
project_id: project_id as u64,
version_id: version_id as u64,
ip,
country: maxmind.query(ip).await.unwrap_or_default(),
user_agent: download_body
.headers
.get("user-agent")
.cloned()
.unwrap_or_default(),
headers: download_body
.headers
.clone()
.into_iter()
.filter(|x| !crate::routes::analytics::FILTERED_HEADERS.contains(&&*x.0.to_lowercase()))
.collect(),
});
Ok(HttpResponse::NoContent().body(""))
}

View File

@@ -15,6 +15,7 @@ use crate::{database, models};
use actix_web::web::Data;
use actix_web::{delete, get, patch, post, web, HttpRequest, HttpResponse};
use chrono::Utc;
use itertools::Itertools;
use serde::{Deserialize, Serialize};
use sqlx::PgPool;
use std::sync::Arc;
@@ -301,6 +302,11 @@ pub async fn collection_edit(
.execute(&mut *transaction)
.await?;
let collection_item_ids = new_project_ids
.iter()
.map(|_| collection_item.id.0)
.collect_vec();
let mut validated_project_ids = Vec::new();
for project_id in new_project_ids {
let project = database::models::Project::get(project_id, &**pool, &redis)
.await?
@@ -309,20 +315,20 @@ pub async fn collection_edit(
"The specified project {project_id} does not exist!"
))
})?;
// Insert- don't throw an error if it already exists
sqlx::query!(
"
INSERT INTO collections_mods (collection_id, mod_id)
VALUES ($1, $2)
ON CONFLICT DO NOTHING
",
collection_item.id as database::models::ids::CollectionId,
project.inner.id as database::models::ids::ProjectId,
)
.execute(&mut *transaction)
.await?;
validated_project_ids.push(project.inner.id.0);
}
// Insert- don't throw an error if it already exists
sqlx::query!(
"
INSERT INTO collections_mods (collection_id, mod_id)
SELECT * FROM UNNEST ($1::int8[], $2::int8[])
ON CONFLICT DO NOTHING
",
&collection_item_ids[..],
&validated_project_ids[..],
)
.execute(&mut *transaction)
.await?;
}
database::models::Collection::clear_cache(collection_item.id, &redis).await?;

File diff suppressed because it is too large Load Diff

View File

@@ -725,9 +725,7 @@ async fn upload_file_to_version_inner(
"At least one file must be specified".to_string(),
));
} else {
for file_builder in file_builders {
file_builder.insert(version_id, &mut *transaction).await?;
}
VersionFileBuilder::insert_many(file_builders, version_id, &mut *transaction).await?;
}
// Clear version cache

View File

@@ -3,6 +3,7 @@ use crate::auth::{
filter_authorized_versions, get_user_from_headers, is_authorized, is_authorized_version,
};
use crate::database;
use crate::database::models::version_item::{DependencyBuilder, LoaderVersion, VersionVersion};
use crate::database::models::{image_item, Organization};
use crate::database::redis::RedisPool;
use crate::models;
@@ -450,11 +451,12 @@ pub async fn version_edit(
})
.collect::<Vec<database::models::version_item::DependencyBuilder>>();
for dependency in builders {
dependency
.insert(version_item.inner.id, &mut transaction)
.await?;
}
DependencyBuilder::insert_many(
builders,
version_item.inner.id,
&mut transaction,
)
.await?;
}
}
}
@@ -469,6 +471,7 @@ pub async fn version_edit(
.execute(&mut *transaction)
.await?;
let mut version_versions = Vec::new();
for game_version in game_versions {
let game_version_id = database::models::categories::GameVersion::get_id(
&game_version.0,
@@ -481,17 +484,9 @@ pub async fn version_edit(
)
})?;
sqlx::query!(
"
INSERT INTO game_versions_versions (game_version_id, joining_version_id)
VALUES ($1, $2)
",
game_version_id as database::models::ids::GameVersionId,
id as database::models::ids::VersionId,
)
.execute(&mut *transaction)
.await?;
version_versions.push(VersionVersion::new(game_version_id, id));
}
VersionVersion::insert_many(version_versions, &mut transaction).await?;
database::models::Project::update_game_versions(
version_item.inner.project_id,
@@ -510,6 +505,7 @@ pub async fn version_edit(
.execute(&mut *transaction)
.await?;
let mut loader_versions = Vec::new();
for loader in loaders {
let loader_id =
database::models::categories::Loader::get_id(&loader.0, &mut *transaction)
@@ -519,18 +515,9 @@ pub async fn version_edit(
"No database entry for loader provided.".to_string(),
)
})?;
sqlx::query!(
"
INSERT INTO loaders_versions (loader_id, version_id)
VALUES ($1, $2)
",
loader_id as database::models::ids::LoaderId,
id as database::models::ids::VersionId,
)
.execute(&mut *transaction)
.await?;
loader_versions.push(LoaderVersion::new(loader_id, id));
}
LoaderVersion::insert_many(loader_versions, &mut transaction).await?;
database::models::Project::update_loaders(
version_item.inner.project_id,