Files
AstralRinth/src/util/report.rs
Emma Cypress ⚘ 68f7dc9512 Add auto-reporting inappropriate text content (#387)
* Add initial support for blocking inappropriate text content

To make something clear, **nothing** is automatically censored or
deleted as a result of this pull request. This pull request is
meant to add two things:
- Regenerate new IDs (project, version, user, etc.) with profanity
- Send reports to the moderators for new inappropriate content

* Make it build

* Fix logic issue

Co-authored-by: Geometrically <18202329+Geometrically@users.noreply.github.com>
2022-07-09 18:51:55 -07:00

39 lines
1.2 KiB
Rust

use crate::database::models::categories::ReportType;
use crate::database::models::report_item::Report;
use crate::database::models::{
generate_report_id, DatabaseError, ProjectId, UserId, VersionId,
};
use crate::models::users::DELETED_USER;
use censor::Censor;
use time::OffsetDateTime;
pub async fn censor_check(
text: &str,
project: Option<ProjectId>,
version: Option<VersionId>,
user: Option<UserId>,
report_text: String,
mut transaction: &mut sqlx::Transaction<'_, sqlx::Postgres>,
) -> Result<(), DatabaseError> {
let censor = Censor::Standard + Censor::Sex;
if censor.check(text) {
let report_type =
ReportType::get_id("inappropriate", &mut *transaction)
.await?
.expect("No database entry for 'inappropriate' report type");
Report {
id: generate_report_id(&mut transaction).await?,
report_type_id: report_type,
project_id: project,
version_id: version,
user_id: user,
body: report_text,
reporter: UserId::from(DELETED_USER),
created: OffsetDateTime::now_utc(),
}
.insert(&mut transaction)
.await?;
}
Ok(())
}