Add auto-reporting inappropriate text content (#387)

* Add initial support for blocking inappropriate text content

To make something clear, **nothing** is automatically censored or
deleted as a result of this pull request. This pull request is
meant to add two things:
- Regenerate new IDs (project, version, user, etc.) with profanity
- Send reports to the moderators for new inappropriate content

* Make it build

* Fix logic issue

Co-authored-by: Geometrically <18202329+Geometrically@users.noreply.github.com>
This commit is contained in:
Emma Cypress ⚘
2022-07-10 01:51:55 +00:00
committed by GitHub
parent 18d1bc56fd
commit 68f7dc9512
10 changed files with 216 additions and 6 deletions

View File

@@ -393,6 +393,28 @@ async fn version_create_inner(
.insert_many(users, &mut *transaction)
.await?;
if let Some(version_body) = version_data.version_body {
crate::util::report::censor_check(
&*version_body,
None,
Some(models::ids::VersionId::from(version_id)),
None,
"Version created with inappropriate changelog".to_string(),
&mut *transaction,
)
.await?;
}
crate::util::report::censor_check(
&*version_data.version_title,
None,
Some(models::ids::VersionId::from(version_id)),
None,
"Version created with inappropriate name".to_string(),
&mut *transaction,
)
.await?;
let response = Version {
id: builder.version_id.into(),
project_id: builder.project_id.into(),