Add auto-reporting inappropriate text content (#387)

* Add initial support for blocking inappropriate text content

To make something clear, **nothing** is automatically censored or
deleted as a result of this pull request. This pull request is
meant to add two things:
- Regenerate new IDs (project, version, user, etc.) with profanity
- Send reports to the moderators for new inappropriate content

* Make it build

* Fix logic issue

Co-authored-by: Geometrically <18202329+Geometrically@users.noreply.github.com>
This commit is contained in:
Emma Cypress ⚘
2022-07-10 01:51:55 +00:00
committed by GitHub
parent 18d1bc56fd
commit 68f7dc9512
10 changed files with 216 additions and 6 deletions

View File

@@ -1,5 +1,7 @@
use super::DatabaseError;
use crate::models::ids::base62_impl::to_base62;
use crate::models::ids::random_base62_rng;
use censor::Censor;
use sqlx::sqlx_macros::Type;
const ID_RETRY_COUNT: usize = 20;
@@ -13,6 +15,7 @@ macro_rules! generate_ids {
let length = $id_length;
let mut id = random_base62_rng(&mut rng, length);
let mut retry_count = 0;
let censor = Censor::Standard + Censor::Sex;
// Check if ID is unique
loop {
@@ -20,7 +23,7 @@ macro_rules! generate_ids {
.fetch_one(&mut *con)
.await?;
if results.exists.unwrap_or(true) {
if results.exists.unwrap_or(true) || censor.check(&*to_base62(id)) {
id = random_base62_rng(&mut rng, length);
} else {
break;