Fix updating mod version if file hash is the same (#5138)

* Start fixing infinite update glitch

* adjust app cache logic

* more cache logic

* cleanup

* add v2 redirect

* add v2 version file route to config
This commit is contained in:
aecsocket
2026-01-19 21:40:44 +00:00
committed by GitHub
parent 976644d1e6
commit c94dde9b47
4 changed files with 157 additions and 57 deletions

View File

@@ -25,6 +25,7 @@ pub fn config(cfg: &mut web::ServiceConfig) {
web::scope("version_files")
.service(get_versions_from_hashes)
.service(update_files)
.service(update_files_many)
.service(update_individual_files),
);
}
@@ -291,27 +292,70 @@ pub async fn update_files(
hashes: update_data.hashes,
};
let response =
v3::version_file::update_files(pool, redis, web::Json(update_data))
.await
.or_else(v2_reroute::flatten_404_error)?;
let returned_versions = match v3::version_file::update_files(
pool,
redis,
web::Json(update_data),
)
.await
{
Ok(resp) => resp,
Err(ApiError::NotFound) => return Ok(HttpResponse::NotFound().body("")),
Err(err) => return Err(err),
};
// Convert response to V2 format
match v2_reroute::extract_ok_json::<HashMap<String, Version>>(response)
.await
let v3_versions = returned_versions
.0
.into_iter()
.map(|(hash, version)| {
let v2_version = LegacyVersion::from(version);
(hash, v2_version)
})
.collect::<HashMap<_, _>>();
Ok(HttpResponse::Ok().json(v3_versions))
}
#[post("update_many")]
pub async fn update_files_many(
pool: web::Data<ReadOnlyPgPool>,
redis: web::Data<RedisPool>,
update_data: web::Json<ManyUpdateData>,
) -> Result<HttpResponse, ApiError> {
let update_data = update_data.into_inner();
let update_data = v3::version_file::ManyUpdateData {
loaders: update_data.loaders.clone(),
version_types: update_data.version_types.clone(),
game_versions: update_data.game_versions.clone(),
algorithm: update_data.algorithm,
hashes: update_data.hashes,
};
let returned_versions = match v3::version_file::update_files_many(
pool,
redis,
web::Json(update_data),
)
.await
{
Ok(returned_versions) => {
let v3_versions = returned_versions
Ok(resp) => resp,
Err(ApiError::NotFound) => return Ok(HttpResponse::NotFound().body("")),
Err(err) => return Err(err),
};
// Convert response to V2 format
let v3_versions = returned_versions
.0
.into_iter()
.map(|(hash, versions)| {
let v2_versions = versions
.into_iter()
.map(|(hash, version)| {
let v2_version = LegacyVersion::from(version);
(hash, v2_version)
})
.collect::<HashMap<_, _>>();
Ok(HttpResponse::Ok().json(v3_versions))
}
Err(response) => Ok(response),
}
.map(LegacyVersion::from)
.collect::<Vec<_>>();
(hash, v2_versions)
})
.collect::<HashMap<_, _>>();
Ok(HttpResponse::Ok().json(v3_versions))
}
#[derive(Serialize, Deserialize)]

View File

@@ -28,7 +28,10 @@ pub fn config(cfg: &mut web::ServiceConfig) {
);
cfg.service(
web::scope("version_files")
// DEPRECATED - use `update_many` instead
// see `fn update_files` comment
.route("update", web::post().to(update_files))
.route("update_many", web::post().to(update_files_many))
.route("update_individual", web::post().to(update_individual_files))
.route("", web::post().to(get_versions_from_hashes)),
);
@@ -331,11 +334,60 @@ pub struct ManyUpdateData {
pub version_types: Option<Vec<VersionType>>,
}
pub async fn update_files_many(
pool: web::Data<ReadOnlyPgPool>,
redis: web::Data<RedisPool>,
update_data: web::Json<ManyUpdateData>,
) -> Result<web::Json<HashMap<String, Vec<models::projects::Version>>>, ApiError>
{
update_files_internal(pool, redis, update_data)
.await
.map(web::Json)
}
// DEPRECATED - use `update_files_many` instead
//
// This returns a `HashMap<String, Version>` where the key is the file hash.
// But one file hash can have multiple versions associated with it.
// So you can end up in a situation where:
// - file with hash H is linked to versions V1, V2
// - user downloads mod with file hash H
// - every time the app checks for updates:
// - it asks the backend, what is the version of H?
// - backend says V1
// - the app asks, is there a compatible version newer than V1?
// - backend says, yes, V2
// - user updates to V2, but it's the same file, so it's the same hash H,
// and the update button stays
//
// By using `update_files_many`, we can have the app know that both V1 and V2
// are linked to H
//
// This endpoint is kept for backwards compat, since it still works in 99% of
// cases where H only maps to a single version, and for older clients. This
// endpoint will only take the first version for each file hash.
pub async fn update_files(
pool: web::Data<ReadOnlyPgPool>,
redis: web::Data<RedisPool>,
update_data: web::Json<ManyUpdateData>,
) -> Result<HttpResponse, ApiError> {
) -> Result<web::Json<HashMap<String, models::projects::Version>>, ApiError> {
let file_hashes_to_versions =
update_files_internal(pool, redis, update_data).await?;
let resp = file_hashes_to_versions
.into_iter()
.filter_map(|(hash, versions)| {
let first_version = versions.into_iter().next()?;
Some((hash, first_version))
})
.collect();
Ok(web::Json(resp))
}
async fn update_files_internal(
pool: web::Data<ReadOnlyPgPool>,
redis: web::Data<RedisPool>,
update_data: web::Json<ManyUpdateData>,
) -> Result<HashMap<String, Vec<models::projects::Version>>, ApiError> {
let algorithm = update_data
.algorithm
.clone()
@@ -385,21 +437,25 @@ pub async fn update_files(
)
.await?;
let mut response = HashMap::new();
let mut response = HashMap::<String, Vec<models::projects::Version>>::new();
for file in files {
if let Some(version) = versions
.iter()
.find(|x| x.inner.project_id == file.project_id)
&& let Some(hash) = file.hashes.get(&algorithm)
{
response.insert(
hash.clone(),
models::projects::Version::from(version.clone()),
);
// add the version info for this file hash
// note: one file hash can have multiple versions associated with it
// just having a `HashMap<String, Version>` would mean that some version info is lost
// so we return a vec of them instead
response
.entry(hash.clone())
.or_default()
.push(models::projects::Version::from(version.clone()));
}
}
Ok(HttpResponse::Ok().json(response))
Ok(response)
}
#[derive(Serialize, Deserialize)]

View File

@@ -1307,9 +1307,9 @@ impl CachedEntry {
let variations =
futures::future::try_join_all(filtered_keys.iter().map(
|((loaders_key, game_version), hashes)| {
fetch_json::<HashMap<String, Version>>(
fetch_json::<HashMap<String, Vec<Version>>>(
Method::POST,
concat!(env!("MODRINTH_API_URL"), "version_files/update"),
concat!(env!("MODRINTH_API_URL"), "version_files/update_many"),
None,
Some(serde_json::json!({
"algorithm": "sha1",
@@ -1330,28 +1330,30 @@ impl CachedEntry {
&filtered_keys[index];
for hash in hashes {
let version = variation.remove(hash);
let versions = variation.remove(hash);
if let Some(version) = version {
let version_id = version.id.clone();
vals.push((
CacheValue::Version(version).get_entry(),
false,
));
if let Some(versions) = versions {
for version in versions {
let version_id = version.id.clone();
vals.push((
CacheValue::Version(version).get_entry(),
false,
));
vals.push((
CacheValue::FileUpdate(CachedFileUpdate {
hash: hash.clone(),
game_version: game_version.clone(),
loaders: loaders_key
.split('+')
.map(|x| x.to_string())
.collect(),
update_version_id: version_id,
})
.get_entry(),
true,
));
vals.push((
CacheValue::FileUpdate(CachedFileUpdate {
hash: hash.clone(),
game_version: game_version.clone(),
loaders: loaders_key
.split('+')
.map(|x| x.to_string())
.collect(),
update_version_id: version_id,
})
.get_entry(),
true,
));
}
} else {
vals.push((
CacheValueType::FileUpdate.get_empty_entry(

View File

@@ -1009,17 +1009,15 @@ impl Profile {
initial_file.file_name
);
let update_version_id = if let Some(update) = file_updates
.iter()
.find(|x| x.hash == hash.hash)
.map(|x| x.update_version_id.clone())
{
if let Some(metadata) = &file {
if metadata.version_id != update {
Some(update)
} else {
None
}
let update_version_id = if let Some(metadata) = &file {
let update_ids: Vec<String> = file_updates
.iter()
.filter(|x| x.hash == hash.hash)
.map(|x| x.update_version_id.clone())
.collect();
if !update_ids.contains(&metadata.version_id) {
update_ids.into_iter().next()
} else {
None
}