merge main and fix type errors

This commit is contained in:
FrenchGithubUser
2025-10-17 17:00:59 +02:00
147 changed files with 1318 additions and 982 deletions

4
Cargo.lock generated
View File

@@ -566,7 +566,9 @@ name = "arcadia-shared"
version = "0.1.0"
dependencies = [
"anyhow",
"bincode",
"indexmap",
"reqwest",
"serde",
"sqlx",
]
@@ -608,9 +610,9 @@ dependencies = [
"env_logger",
"envconfig",
"futures",
"indexmap",
"log",
"parking_lot",
"percent-encoding",
"reqwest",
"serde",
"serde_bencode",

View File

@@ -29,7 +29,7 @@ POSTGRES_USER=arcadia
POSTGRES_PASSWORD=password
# Connection string for the database.
DATABASE_URL=postgresql://arcadia:password@localhost:5432/arcadia
DATABASE_URL=postgresql://arcadia:password@localhost:4321/arcadia
## Arcadia Configuration

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{web::Data, HttpResponse};
use actix_web_lab::extract::Query;
use arcadia_common::error::Result;

View File

@@ -1,164 +1,158 @@
use crate::services::announce_service::is_torrent_client_allowed;
use crate::Arcadia;
use actix_web::{
dev,
web::{Data, Path},
FromRequest, HttpRequest, HttpResponse, ResponseError,
};
use arcadia_common::{
actix::HttpResponseBuilderExt,
error::announce::Error as AnnounceError,
models::tracker::announce::{Announce, AnnounceResponse, TorrentEvent},
};
use arcadia_storage::{redis::RedisPoolInterface, sqlx::types::ipnetwork::IpNetwork};
use std::future::{self, Ready};
// use crate::services::announce_service::is_torrent_client_allowed;
// use crate::Arcadia;
// use actix_web::{
// dev,
// web::{Data, Path},
// FromRequest, HttpRequest, HttpResponse, ResponseError,
// };
// use arcadia_common::{
// actix::HttpResponseBuilderExt,
// error::announce::Error as AnnounceError,
// models::tracker::announce::{Announce, AnnounceResponse, TorrentEvent},
// };
// use arcadia_storage::{redis::RedisPoolInterface, sqlx::types::ipnetwork::IpNetwork};
// use std::future::{self, Ready};
type Result<T> = std::result::Result<T, AnnounceError>;
// type Result<T> = std::result::Result<T, AnnounceError>;
#[derive(Debug)]
pub struct UserAgent(pub String);
// #[derive(Debug)]
// pub struct UserAgent(pub String);
impl std::ops::Deref for UserAgent {
type Target = str;
// impl std::ops::Deref for UserAgent {
// type Target = str;
fn deref(&self) -> &Self::Target {
self.0.deref()
}
}
// fn deref(&self) -> &Self::Target {
// self.0.deref()
// }
// }
#[derive(Debug, thiserror::Error)]
pub enum UserAgentExtractError {
#[error("no user agent")]
NoUserAgent,
// #[derive(Debug, thiserror::Error)]
// pub enum UserAgentExtractError {
// #[error("no user agent")]
// NoUserAgent,
#[error("not decodable as utf-8")]
ToStrError(#[from] actix_web::http::header::ToStrError),
}
// #[error("not decodable as utf-8")]
// ToStrError(#[from] actix_web::http::header::ToStrError),
// }
impl ResponseError for UserAgentExtractError {
fn error_response(&self) -> HttpResponse {
log::error!("The request generated this error: {self}");
HttpResponse::BadRequest().body(format!("{self}"))
}
}
// impl ResponseError for UserAgentExtractError {
// fn error_response(&self) -> HttpResponse {
// log::error!("The request generated this error: {self}");
// HttpResponse::BadRequest().body(format!("{self}"))
// }
// }
impl FromRequest for UserAgent {
type Error = UserAgentExtractError;
type Future = Ready<std::result::Result<Self, Self::Error>>;
// impl FromRequest for UserAgent {
// type Error = UserAgentExtractError;
// type Future = Ready<std::result::Result<Self, Self::Error>>;
fn from_request(req: &HttpRequest, _: &mut dev::Payload) -> Self::Future {
let user_agent = req
.headers()
.get("User-Agent")
.ok_or(UserAgentExtractError::NoUserAgent)
.and_then(|s| Ok(UserAgent(String::from(s.to_str()?))));
// fn from_request(req: &HttpRequest, _: &mut dev::Payload) -> Self::Future {
// let agent = req
// .headers()
// .get("User-Agent")
// .ok_or(UserAgentExtractError::NoUserAgent)
// .and_then(|s| Ok(UserAgent(String::from(s.to_str()?))));
future::ready(user_agent)
}
}
// future::ready(agent)
// }
// }
pub async fn exec<R: RedisPoolInterface>(
arc: Data<Arcadia<R>>,
passkey: Path<String>,
user_agent: Option<UserAgent>,
ann: Announce,
conn: dev::ConnectionInfo,
) -> Result<HttpResponse> {
if !is_torrent_client_allowed(&ann.peer_id, &arc.tracker.allowed_torrent_clients.clients) {
return Err(AnnounceError::TorrentClientNotInWhitelist);
}
// pub async fn exec<R: RedisPoolInterface>(
// arc: Data<Arcadia<R>>,
// passkey: Path<String>,
// agent: Option<UserAgent>,
// ann: Announce,
// conn: dev::ConnectionInfo,
// ) -> Result<HttpResponse> {
// if !is_torrent_client_allowed(&ann.peer_id, &arc.tracker.allowed_torrent_clients.clients) {
// return Err(AnnounceError::TorrentClientNotInWhitelist);
// }
let current_user = arc.pool.find_user_with_passkey(&passkey).await?;
// let current_user = arc.pool.find_user_with_passkey(&passkey).await?;
let torrent = arc.pool.find_torrent_with_id(&ann.info_hash).await?;
// let torrent = arc.pool.find_torrent_with_id(&ann.info_hash).await?;
let ip = conn
.realip_remote_addr()
.and_then(|ip| ip.parse::<IpNetwork>().ok())
.unwrap();
// let ip = conn
// .realip_remote_addr()
// .and_then(|ip| ip.parse::<IpNetwork>().ok())
// .unwrap();
if let Some(TorrentEvent::Stopped) = ann.event {
arc.pool
.remove_peer(&torrent.id, &ann.peer_id, &ip, ann.port)
.await;
//return HttpResponse::Ok().into();
todo!();
}
// if let Some(TorrentEvent::Stopped) = ann.event {
// arc.pool
// .remove_peer(&torrent.id, &ann.peer_id, &ip, ann.port)
// .await;
// //return HttpResponse::Ok().into();
// todo!();
// }
if let Some(TorrentEvent::Completed) = ann.event {
let _ = arc.pool.increment_torrent_completed(torrent.id).await;
}
// if let Some(TorrentEvent::Completed) = ann.event {
// let _ = arc.pool.increment_torrent_completed(torrent.id).await;
// }
let (old_real_uploaded, old_real_downloaded) = arc
.pool
.insert_or_update_peer(
&torrent.id,
&ip,
&current_user.id,
&ann,
user_agent.as_deref(),
)
.await;
// let (old_real_uploaded, old_real_downloaded) = arc
// .pool
// .insert_or_update_peer(&torrent.id, &ip, &current_user.id, &ann, agent.as_deref())
// .await;
let peers = arc
.pool
.find_torrent_peers(&torrent.id, &current_user.id)
.await;
// let peers = arc
// .pool
// .find_torrent_peers(&torrent.id, &current_user.id)
// .await;
// assuming that the client either sends both downloaded/uploaded
// or none of them
if let (Some(real_uploaded), Some(real_downloaded)) = (ann.uploaded, ann.downloaded) {
let upload_factor = if arc.tracker.global_upload_factor != 1.0 {
arc.tracker.global_upload_factor
} else {
torrent.upload_factor
};
let upload_to_credit =
((real_uploaded as i64 - old_real_uploaded) as f64 * upload_factor).ceil() as i64;
// // assuming that the client either sends both downloaded/uploaded
// // or none of them
// if let (Some(real_uploaded), Some(real_downloaded)) = (ann.uploaded, ann.downloaded) {
// let upload_factor = if arc.tracker.global_upload_factor != 1.0 {
// arc.tracker.global_upload_factor
// } else {
// torrent.upload_factor
// };
// let upload_to_credit =
// ((real_uploaded as i64 - old_real_uploaded) as f64 * upload_factor).ceil() as i64;
let download_factor = if arc.tracker.global_download_factor != 1.0 {
arc.tracker.global_download_factor
} else {
torrent.download_factor
};
let download_to_credit =
((real_downloaded as i64 - old_real_downloaded) as f64 * download_factor).ceil() as i64;
let real_uploaded_to_credit = real_uploaded as i64 - old_real_uploaded;
let real_downloaded_to_credit = real_downloaded as i64 - old_real_downloaded;
// let download_factor = if arc.tracker.global_download_factor != 1.0 {
// arc.tracker.global_download_factor
// } else {
// torrent.download_factor
// };
// let download_to_credit =
// ((real_downloaded as i64 - old_real_downloaded) as f64 * download_factor).ceil() as i64;
// let real_uploaded_to_credit = real_uploaded as i64 - old_real_uploaded;
// let real_downloaded_to_credit = real_downloaded as i64 - old_real_downloaded;
// if the client restarted, without sending a "stop" event, keeping the same ip/port
// calculated upload/download might be negative
if real_uploaded_to_credit >= 0 && real_downloaded_to_credit >= 0 {
let _ = arc
.pool
.credit_user_upload_download(
upload_to_credit,
download_to_credit,
real_uploaded_to_credit,
real_downloaded_to_credit,
current_user.id,
)
.await;
}
}
// // if the client restarted, without sending a "stop" event, keeping the same ip/port
// // calculated upload/download might be negative
// if real_uploaded_to_credit >= 0 && real_downloaded_to_credit >= 0 {
// let _ = arc
// .pool
// .credit_user_upload_download(
// upload_to_credit,
// download_to_credit,
// real_uploaded_to_credit,
// real_downloaded_to_credit,
// current_user.id,
// )
// .await;
// }
// }
if ann.left == Some(0u64) {
let _ = arc
.pool
.update_total_seedtime(
current_user.id,
torrent.id,
arc.tracker.announce_interval,
arc.tracker.announce_interval_grace_period,
)
.await;
}
// if ann.left == Some(0u64) {
// let _ = arc
// .pool
// .update_total_seedtime(
// current_user.id,
// torrent.id,
// arc.tracker.announce_interval,
// arc.tracker.announce_interval_grace_period,
// )
// .await;
// }
let resp = AnnounceResponse {
peers,
interval: arc.tracker.announce_interval,
..Default::default()
};
// let resp = AnnounceResponse {
// peers,
// interval: arc.tracker.announce_interval,
// ..Default::default()
// };
Ok(HttpResponse::Ok().bencode(resp))
}
// Ok(HttpResponse::Ok().bencode(resp))
// }

View File

@@ -1,8 +1,8 @@
pub mod handle_announce;
// pub mod handle_announce;
use actix_web::web::{get, resource, ServiceConfig};
use arcadia_storage::redis::RedisPoolInterface;
// use actix_web::web::{get, resource, ServiceConfig};
// use arcadia_storage::redis::RedisPoolInterface;
pub fn config<R: RedisPoolInterface + 'static>(cfg: &mut ServiceConfig) {
cfg.service(resource("/{passkey}").route(get().to(self::handle_announce::exec::<R>)));
}
// pub fn config<R: RedisPoolInterface + 'static>(cfg: &mut ServiceConfig) {
// cfg.service(resource("/{passkey}").route(get().to(self::handle_announce::exec::<R>)));
// }

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Query},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,5 +1,5 @@
use crate::{
handlers::scrapers::ExternalDBData, middlewares::jwt_middleware::Authdata,
handlers::scrapers::ExternalDBData, middlewares::auth_middleware::Authdata,
services::external_db_service::check_if_existing_title_group_with_link_exists, Arcadia,
};
use actix_web::{

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,5 +1,5 @@
use crate::{
middlewares::jwt_middleware::Authdata, services::email_service::EmailService, Arcadia,
middlewares::auth_middleware::Authdata, services::email_service::EmailService, Arcadia,
};
use actix_web::{
web::{Data, Json},

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -11,5 +11,5 @@ pub struct ExternalDBData {
pub title_group: Option<UserCreatedTitleGroup>,
pub edition_group: Option<UserCreatedEditionGroup>,
pub affiliated_artists: Vec<AffiliatedArtistHierarchy>, // pub series: UserCreatedSeries
pub existing_title_group_id: Option<i64>,
pub existing_title_group_id: Option<i32>,
}

View File

@@ -3,7 +3,7 @@ use actix_web::{
HttpResponse,
};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::Result;
use arcadia_storage::{
models::torrent::{TorrentSearch, TorrentSearchResults},

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Path},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{web::Data, HttpResponse};
use arcadia_common::error::Result;
use arcadia_storage::models::staff_pm::StaffPmOverview;

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Path},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Query},
HttpResponse,

View File

@@ -1,6 +1,6 @@
use crate::{
handlers::subscriptions::create_subscription::AddSubscriptionQuery,
middlewares::jwt_middleware::Authdata, Arcadia,
middlewares::auth_middleware::Authdata, Arcadia,
};
use actix_web::{
web::{Data, Query},

View File

@@ -9,7 +9,7 @@ use arcadia_storage::{
use futures::future::join_all;
use crate::{
handlers::external_db::get_tmdb_data::get_tmdb_rating, middlewares::jwt_middleware::Authdata,
handlers::external_db::get_tmdb_data::get_tmdb_rating, middlewares::auth_middleware::Authdata,
Arcadia,
};
use arcadia_common::error::Result;

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -10,7 +10,7 @@ use arcadia_storage::{
redis::RedisPoolInterface,
};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::{Error, Result};
#[utoipa::path(

View File

@@ -8,12 +8,12 @@ use arcadia_storage::{
use serde::Deserialize;
use utoipa::IntoParams;
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::Result;
#[derive(Debug, Deserialize, IntoParams)]
pub struct GetTitleGroupQuery {
pub id: i64,
pub id: i32,
}
#[utoipa::path(

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -10,7 +10,7 @@ use arcadia_storage::{
redis::RedisPoolInterface,
};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::{Error, Result};
#[utoipa::path(

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,7 +1,7 @@
use actix_multipart::form::MultipartForm;
use actix_web::{web::Data, HttpResponse};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::Result;
use arcadia_storage::{
models::torrent::{Torrent, UploadedTorrent},

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -4,7 +4,7 @@ use actix_web::{
};
use serde_json::json;
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::{Error, Result};
use arcadia_storage::{
models::{torrent::TorrentToDelete, user::UserClass},

View File

@@ -9,12 +9,12 @@ use arcadia_storage::redis::RedisPoolInterface;
use serde::Deserialize;
use utoipa::{IntoParams, ToSchema};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::Result;
#[derive(Debug, Deserialize, IntoParams, ToSchema)]
pub struct DownloadTorrentQuery {
id: i64,
id: i32,
}
#[utoipa::path(

View File

@@ -3,7 +3,7 @@ use actix_web::{
HttpResponse,
};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::{Error, Result};
use arcadia_storage::{
models::{

View File

@@ -1,6 +1,6 @@
use actix_web::{web::Data, HttpResponse};
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::error::{Error, Result};
use arcadia_storage::{
models::{torrent::TorrentMinimal, user::UserClass},

View File

@@ -3,7 +3,7 @@ use arcadia_storage::redis::RedisPoolInterface;
use serde::{Deserialize, Serialize};
use utoipa::ToSchema;
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use arcadia_common::{error::Result, services::torrent_service::get_announce_url};
#[derive(Debug, Serialize, Deserialize, ToSchema)]

View File

@@ -0,0 +1,9 @@
use crate::{handlers::tracker::binary_response, Arcadia};
use actix_web::{web::Data, HttpResponse};
use arcadia_common::error::Result;
use arcadia_storage::redis::RedisPoolInterface;
pub async fn exec<R: RedisPoolInterface + 'static>(arc: Data<Arcadia<R>>) -> Result<HttpResponse> {
let torrents = arc.pool.find_torrents().await?;
binary_response(&torrents)
}

View File

@@ -1,16 +1,7 @@
use crate::Arcadia;
use crate::{handlers::tracker::binary_response, Arcadia};
use actix_web::{web::Data, HttpResponse};
use arcadia_common::error::Result;
use arcadia_storage::redis::RedisPoolInterface;
use serde::Serialize;
#[inline]
fn binary_response<T: Serialize>(value: &T) -> Result<HttpResponse> {
let bytes = serde_bencode::to_bytes(value).expect("error encoding to binary");
Ok(HttpResponse::Ok()
.content_type("application/octet-stream")
.body(bytes))
}
pub async fn exec<R: RedisPoolInterface + 'static>(arc: Data<Arcadia<R>>) -> Result<HttpResponse> {
let users = arc.pool.find_users().await?;

View File

@@ -1,8 +1,24 @@
pub mod get_torrents;
pub mod get_users;
use actix_web::web::{get, resource, ServiceConfig};
use actix_web::{
web::{get, resource, ServiceConfig},
HttpResponse,
};
use arcadia_common::error::Result;
use arcadia_storage::redis::RedisPoolInterface;
use bincode::config;
// TODO: protect by only allowing requests from tracker's ip
pub fn config<R: RedisPoolInterface + 'static>(cfg: &mut ServiceConfig) {
cfg.service(resource("/users").route(get().to(self::get_users::exec::<R>)));
cfg.service(resource("/torrents").route(get().to(self::get_torrents::exec::<R>)));
}
fn binary_response<T: bincode::Encode>(value: &T) -> Result<HttpResponse> {
let config = config::standard();
let bytes = bincode::encode_to_vec(value, config).expect("error encoding to bincode");
Ok(HttpResponse::Ok()
.content_type("application/octet-stream")
.body(bytes))
}

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Query},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{web::Data, HttpResponse};
use arcadia_common::error::Result;
use arcadia_storage::{
@@ -31,7 +31,7 @@ pub async fn exec<R: RedisPoolInterface + 'static>(
) -> Result<HttpResponse> {
let mut current_user = arc.pool.find_user_with_id(user.sub).await?;
current_user.password_hash = String::from("");
let peers = arc.pool.get_user_peers(current_user.id).await;
// let peers = arc.pool.get_user_peers(current_user.id).await;
let user_warnings = arc.pool.find_user_warnings(current_user.id).await;
let search_title_group = TorrentSearchTitleGroup {
name: String::from(""),
@@ -73,7 +73,7 @@ pub async fn exec<R: RedisPoolInterface + 'static>(
Ok(HttpResponse::Ok().json(json!({
"user": current_user,
"peers":peers,
"peers": "[]",//peers,
"user_warnings": user_warnings,
"unread_conversations_amount": unread_conversations_amount,
"unread_notifications_amount":unread_notifications_amount,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{web::Data, HttpResponse};
use arcadia_common::error::{Error, Result};
use arcadia_storage::{

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Query},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{web::Data, HttpResponse};
use arcadia_common::error::Result;
use arcadia_storage::{models::conversation::ConversationsOverview, redis::RedisPoolInterface};

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1,4 +1,4 @@
use crate::{middlewares::jwt_middleware::Authdata, Arcadia};
use crate::{middlewares::auth_middleware::Authdata, Arcadia};
use actix_web::{
web::{Data, Json},
HttpResponse,

View File

@@ -1 +1 @@
pub mod jwt_middleware;
pub mod auth_middleware;

View File

@@ -3,7 +3,7 @@ use actix_web_httpauth::middleware::HttpAuthentication;
use arcadia_storage::redis::RedisPoolInterface;
use crate::handlers::affiliated_artists::config as AffiliatedArtistsConfig;
use crate::handlers::announces::config as AnnouncesConfig;
// use crate::handlers::announces::config as AnnouncesConfig;
use crate::handlers::artists::config as ArtistsConfig;
use crate::handlers::auth::config as AuthConfig;
use crate::handlers::collages::config as CollagesConfig;
@@ -26,10 +26,10 @@ use crate::handlers::tracker::config as TrackerConfig;
use crate::handlers::user_applications::config as UserApplicationsConfig;
use crate::handlers::users::config as UsersConfig;
use crate::handlers::wiki::config as WikiConfig;
use crate::middlewares::jwt_middleware::authenticate_user;
use crate::middlewares::auth_middleware::authenticate_user;
pub fn init<R: RedisPoolInterface + 'static>(cfg: &mut web::ServiceConfig) {
cfg.service(scope("/announce").configure(AnnouncesConfig::<R>));
// cfg.service(scope("/announce").configure(AnnouncesConfig::<R>));
cfg.service(
web::scope("/api")

View File

@@ -4,7 +4,7 @@ use chrono::{DateTime, Local};
pub struct Peer {
pub id: i64,
pub user_id: i32,
pub torrent_id: i64,
pub torrent_id: i32,
pub peer_id: [u8; 20],
pub ip: Option<std::net::Ipv4Addr>,
pub port: u16,

View File

@@ -12,7 +12,7 @@
RUST_LOG="debug,sqlx=info"
# Connection string for the database.
DATABASE_URL=postgresql://arcadia:password@localhost:5432/arcadia
DATABASE_URL=postgresql://arcadia:password@localhost:4321/arcadia
# Interval for tracker announcements (in seconds).
ARCADIA_TRACKER_ANNOUNCE_INTERVAL=1800

View File

@@ -1,16 +1,16 @@
use arcadia_periodic_tasks::{periodic_tasks::scheduler::run_periodic_tasks, store::Store};
use std::{env, sync::Arc};
// use arcadia_periodic_tasks::{periodic_tasks::scheduler::run_periodic_tasks, store::Store};
// use std::{env, sync::Arc};
#[tokio::main]
async fn main() {
if env::var("ENV").unwrap_or("".to_string()) != "Docker" {
dotenvy::from_filename(".env").expect("cannot load env from a file");
}
// if env::var("ENV").unwrap_or("".to_string()) != "Docker" {
// dotenvy::from_filename(".env").expect("cannot load env from a file");
// }
env_logger::init_from_env(env_logger::Env::default().default_filter_or("debug"));
let store = Arc::new(Store::new().await);
if let Err(e) = run_periodic_tasks(store).await {
eprintln!("Error running cron tasks: {e:?}");
}
// let store = Arc::new(Store::new().await);
// if let Err(e) = run_periodic_tasks(store).await {
// eprintln!("Error running cron tasks: {e:?}");
// }
}

View File

@@ -1,16 +1,16 @@
use arcadia_storage::connection_pool::ConnectionPool;
use std::sync::Arc;
// use arcadia_storage::connection_pool::ConnectionPool;
// use std::sync::Arc;
pub async fn remove_inactive_peers(
pool: Arc<ConnectionPool>,
announce_interval: u32,
announce_grace_period: u32,
) {
let removed_peers_amount = pool
.remove_inactive_peers((announce_interval + announce_grace_period) as f64)
.await;
log::info!(
"Removed {} inactive peers from the database",
removed_peers_amount.unwrap()
)
}
// pub async fn remove_inactive_peers(
// pool: Arc<ConnectionPool>,
// announce_interval: u32,
// announce_grace_period: u32,
// ) {
// let removed_peers_amount = pool
// .remove_inactive_peers((announce_interval + announce_grace_period) as f64)
// .await;
// log::info!(
// "Removed {} inactive peers from the database",
// removed_peers_amount.unwrap()
// )
// }

View File

@@ -1,57 +1,57 @@
use std::{env, sync::Arc};
use tokio_cron_scheduler::{Job, JobScheduler};
// use std::{env, sync::Arc};
// use tokio_cron_scheduler::{Job, JobScheduler};
use crate::{periodic_tasks::peers::remove_inactive_peers, store::Store};
// use crate::{periodic_tasks::peers::remove_inactive_peers, store::Store};
use super::torrents::update_torrent_seeders_leechers;
// use super::torrents::update_torrent_seeders_leechers;
pub async fn run_periodic_tasks(store: Arc<Store>) -> Result<(), Box<dyn std::error::Error>> {
let sched = JobScheduler::new().await?;
pub async fn run_periodic_tasks(/*store: Arc<Store>*/) -> Result<(), Box<dyn std::error::Error>> {
// let sched = JobScheduler::new().await?;
let update_torrent_seeders_leechers_interval =
env::var("TASK_INTERVAL_UPDATE_TORRENT_SEEDERS_LEECHERS")
.expect("env var TASK_INTERVAL_UPDATE_TORRENT_SEEDERS_LEECHERS is missing");
// let update_torrent_seeders_leechers_interval =
// env::var("TASK_INTERVAL_UPDATE_TORRENT_SEEDERS_LEECHERS")
// .expect("env var TASK_INTERVAL_UPDATE_TORRENT_SEEDERS_LEECHERS is missing");
let pool_1 = Arc::clone(&store.pool);
let job1 = match Job::new_async(
update_torrent_seeders_leechers_interval.as_str(),
move |_uuid, _l| Box::pin(update_torrent_seeders_leechers(Arc::clone(&pool_1))),
) {
Ok(job) => job,
Err(e) => {
return Err(format!(
"Error creating job for updating torrents seeders and leechers: {e}"
)
.into());
}
};
sched.add(job1).await?;
// let pool_1 = Arc::clone(&store.pool);
// let job1 = match Job::new_async(
// update_torrent_seeders_leechers_interval.as_str(),
// move |_uuid, _l| Box::pin(update_torrent_seeders_leechers(Arc::clone(&pool_1))),
// ) {
// Ok(job) => job,
// Err(e) => {
// return Err(format!(
// "Error creating job for updating torrents seeders and leechers: {e}"
// )
// .into());
// }
// };
// sched.add(job1).await?;
// this interval should be often enough
// let cleanup_interval_seconds = arc.tracker_announce_interval * 2;
let remove_inactive_peers_interval = env::var("TASK_INTERVAL_REMOVE_INACTIVE_PEERS")
.expect("env var TASK_INTERVAL_REMOVE_INACTIVE_PEERS is missing");
// // this interval should be often enough
// // let cleanup_interval_seconds = arc.tracker_announce_interval * 2;
// let remove_inactive_peers_interval = env::var("TASK_INTERVAL_REMOVE_INACTIVE_PEERS")
// .expect("env var TASK_INTERVAL_REMOVE_INACTIVE_PEERS is missing");
// cleaning old peers is also done when the client sends a "stop" event
// but it doesn't always do it, so we need to clean the ones that are gone without sending this event
let pool_2 = Arc::clone(&store.pool);
let announce_interval = store.env.tracker.announce_interval;
let announce_interval_grace_period = store.env.tracker.announce_interval_grace_period;
let job2 = match Job::new_async(remove_inactive_peers_interval.as_str(), move |_uuid, _l| {
Box::pin(remove_inactive_peers(
Arc::clone(&pool_2),
announce_interval,
announce_interval_grace_period,
))
}) {
Ok(job) => job,
Err(e) => {
return Err(format!("Error creating job for cleaning inactive peers: {e}").into());
}
};
sched.add(job2).await?;
// // cleaning old peers is also done when the client sends a "stop" event
// // but it doesn't always do it, so we need to clean the ones that are gone without sending this event
// let pool_2 = Arc::clone(&store.pool);
// let announce_interval = store.env.tracker.announce_interval;
// let announce_interval_grace_period = store.env.tracker.announce_interval_grace_period;
// let job2 = match Job::new_async(remove_inactive_peers_interval.as_str(), move |_uuid, _l| {
// Box::pin(remove_inactive_peers(
// Arc::clone(&pool_2),
// announce_interval,
// announce_interval_grace_period,
// ))
// }) {
// Ok(job) => job,
// Err(e) => {
// return Err(format!("Error creating job for cleaning inactive peers: {e}").into());
// }
// };
// sched.add(job2).await?;
sched.start().await?;
// sched.start().await?;
Ok(())
}

View File

@@ -1,6 +1,6 @@
use arcadia_storage::connection_pool::ConnectionPool;
use std::sync::Arc;
// use arcadia_storage::connection_pool::ConnectionPool;
// use std::sync::Arc;
pub async fn update_torrent_seeders_leechers(pool: Arc<ConnectionPool>) {
let _ = pool.update_torrent_seeders_leechers().await;
}
// pub async fn update_torrent_seeders_leechers(pool: Arc<ConnectionPool>) {
// // let _ = pool.update_torrent_seeders_leechers().await;
// }

View File

@@ -12,7 +12,7 @@
"parameters": {
"Left": [
"Int4",
"Int8"
"Int4"
]
},
"nullable": [

View File

@@ -5,7 +5,7 @@
"columns": [],
"parameters": {
"Left": [
"Int8",
"Int4",
"Int4",
"Int8"
]

View File

@@ -5,7 +5,7 @@
"columns": [],
"parameters": {
"Left": [
"Int8"
"Int4"
]
},
"nullable": []

View File

@@ -31,12 +31,12 @@
{
"ordinal": 5,
"name": "title_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 6,
"name": "refers_to_torrent_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 7,
@@ -47,9 +47,9 @@
"parameters": {
"Left": [
"Text",
"Int8",
"Int4",
"Int8",
"Int4",
"Int4",
"Int8"
]
},

View File

@@ -203,6 +203,11 @@
"ordinal": 37,
"name": "staff_note",
"type_info": "Text"
},
{
"ordinal": 38,
"name": "can_download",
"type_info": "Bool"
}
],
"parameters": {
@@ -253,6 +258,7 @@
false,
false,
false,
false,
false
]
},

View File

@@ -26,13 +26,13 @@
{
"ordinal": 4,
"name": "reported_torrent_id",
"type_info": "Int8"
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Int4",
"Int8",
"Int4",
"Text"
]
},

View File

@@ -6,12 +6,12 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "master_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 2,
@@ -231,7 +231,7 @@
],
"parameters": {
"Left": [
"Int8"
"Int4"
]
},
"nullable": [

View File

@@ -11,7 +11,7 @@
{
"ordinal": 1,
"name": "title_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 2,
@@ -427,7 +427,7 @@
{
"ordinal": 21,
"name": "filled_by_torrent_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 22,

View File

@@ -6,7 +6,7 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,

View File

@@ -5,7 +5,7 @@
"columns": [],
"parameters": {
"Left": [
"Int8",
"Int4",
"Int4"
]
},

View File

@@ -6,7 +6,7 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,
@@ -41,7 +41,7 @@
{
"ordinal": 7,
"name": "edition_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 8,
@@ -461,7 +461,7 @@
],
"parameters": {
"Left": [
"Int8",
"Int4",
"Text",
"Varchar",
"Text",

View File

@@ -203,6 +203,11 @@
"ordinal": 37,
"name": "staff_note",
"type_info": "Text"
},
{
"ordinal": 38,
"name": "can_download",
"type_info": "Bool"
}
],
"parameters": {
@@ -248,6 +253,7 @@
false,
false,
false,
false,
false
]
},

View File

@@ -1,32 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n INSERT INTO peers(torrent_id, peer_id, ip, port, user_id, real_uploaded, real_downloaded, user_agent, status)\n VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9::peer_status_enum)\n ON CONFLICT (torrent_id, peer_id, ip, port) DO UPDATE\n SET\n last_seen_at = NOW(),\n real_uploaded = $6,\n real_downloaded = $7,\n status = $9::peer_status_enum\n ",
"describe": {
"columns": [],
"parameters": {
"Left": [
"Int8",
"Bytea",
"Inet",
"Int4",
"Int4",
"Int8",
"Int8",
"Text",
{
"Custom": {
"name": "peer_status_enum",
"kind": {
"Enum": [
"seeding",
"leeching"
]
}
}
}
]
},
"nullable": []
},
"hash": "548e6f93067c88de9c7ae4e5a184ccc60bcf872eab55cfcd8219fe8a4e9f3e7f"
}

View File

@@ -6,12 +6,12 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "master_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 2,
@@ -231,8 +231,8 @@
],
"parameters": {
"Left": [
"Int8",
"Int8",
"Int4",
"Int4",
"Text",
"TextArray",
"Text",

View File

@@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n id,\n passkey,\n TRUE AS \"can_download!\",\n 0::int4 AS \"num_seeding!\",\n 0::int4 AS \"num_leeching!\"\n FROM users\n ",
"query": "\n SELECT\n id,\n passkey as \"passkey: Passkey\",\n TRUE AS \"can_download!\",\n 0::int4 AS \"num_seeding!\",\n 0::int4 AS \"num_leeching!\"\n FROM users\n ",
"describe": {
"columns": [
{
@@ -10,7 +10,7 @@
},
{
"ordinal": 1,
"name": "passkey",
"name": "passkey: Passkey",
"type_info": "Varchar"
},
{
@@ -40,5 +40,5 @@
null
]
},
"hash": "9a2c40e32e99578394e376c8389fae1805a901c5f38fbe6988a1739f09818463"
"hash": "68d484ba5626b404315ba07d4fcf536f1ecda16c3f190bcb9483df9fe47b0f1c"
}

View File

@@ -6,7 +6,7 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,
@@ -41,7 +41,7 @@
{
"ordinal": 7,
"name": "edition_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 8,
@@ -461,7 +461,7 @@
],
"parameters": {
"Left": [
"Int8"
"Int4"
]
},
"nullable": [

View File

@@ -11,7 +11,7 @@
{
"ordinal": 1,
"name": "title_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 2,
@@ -427,7 +427,7 @@
{
"ordinal": 21,
"name": "filled_by_torrent_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 22,
@@ -438,7 +438,7 @@
"parameters": {
"Left": [
"Int8",
"Int8",
"Int4",
"Text",
"Varchar",
"Text",

View File

@@ -1,32 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT real_uploaded, real_downloaded\n FROM peers\n WHERE torrent_id = $1 AND peer_id = $2 AND ip = $3 AND port = $4 AND user_id = $5\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "real_uploaded",
"type_info": "Int8"
},
{
"ordinal": 1,
"name": "real_downloaded",
"type_info": "Int8"
}
],
"parameters": {
"Left": [
"Int8",
"Bytea",
"Inet",
"Int4",
"Int4"
]
},
"nullable": [
false,
false
]
},
"hash": "7da30fb9252c3cc090c82890cfb6ae1539df66561292d0da594f4f02d4c9c416"
}

View File

@@ -11,7 +11,7 @@
],
"parameters": {
"Left": [
"Int8",
"Int4",
"Int8"
]
},

View File

@@ -6,7 +6,7 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,

View File

@@ -1,74 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n ip,\n port,\n user_agent,\n MIN(first_seen_at) as \"first_seen_at!\",\n MAX(last_seen_at) as \"last_seen_at!\",\n SUM(real_uploaded)::BIGINT as \"real_uploaded!\",\n SUM(real_downloaded)::BIGINT as \"real_downloaded!\",\n status::peer_status_enum as \"status!: PeerStatus\"\n FROM peers\n WHERE user_id = $1\n GROUP BY (peer_id, ip, port, user_agent, status)\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "ip",
"type_info": "Inet"
},
{
"ordinal": 1,
"name": "port",
"type_info": "Int4"
},
{
"ordinal": 2,
"name": "user_agent",
"type_info": "Text"
},
{
"ordinal": 3,
"name": "first_seen_at!",
"type_info": "Timestamptz"
},
{
"ordinal": 4,
"name": "last_seen_at!",
"type_info": "Timestamptz"
},
{
"ordinal": 5,
"name": "real_uploaded!",
"type_info": "Int8"
},
{
"ordinal": 6,
"name": "real_downloaded!",
"type_info": "Int8"
},
{
"ordinal": 7,
"name": "status!: PeerStatus",
"type_info": {
"Custom": {
"name": "peer_status_enum",
"kind": {
"Enum": [
"seeding",
"leeching"
]
}
}
}
}
],
"parameters": {
"Left": [
"Int4"
]
},
"nullable": [
false,
false,
true,
null,
null,
null,
null,
false
]
},
"hash": "8c678307a3962e0a192b292877cc5170f194251c2fa8bad5cf3c53532ea6a627"
}

View File

@@ -1,6 +1,6 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT jsonb_agg(data)\n FROM (\n SELECT jsonb_build_object(\n 'id', tg.id, 'content_type', tg.content_type, 'name', tg.name, 'platform', tg.platform, 'covers', tg.covers,\n 'original_release_date', tg.original_release_date,\n 'edition_groups', COALESCE(\n jsonb_agg(\n jsonb_build_object(\n 'id', eg.id,\n 'name', eg.name,\n 'release_date', eg.release_date,\n 'distributor', eg.distributor,\n 'source', eg.source,\n 'additional_information', eg.additional_information\n )\n ) FILTER (WHERE eg.id IS NOT NULL),\n '[]'::jsonb\n )\n ) as data\n FROM title_groups tg\n LEFT JOIN edition_groups eg ON eg.title_group_id = tg.id\n LEFT JOIN (\n SELECT edition_group_id, MAX(created_at) as created_at\n FROM torrents\n GROUP BY edition_group_id\n ) AS latest_torrent ON latest_torrent.edition_group_id = eg.id\n WHERE ($1::BIGINT IS NOT NULL AND tg.id = $1)\n OR ($2::TEXT IS NOT NULL AND (tg.name ILIKE '%' || $2 || '%' OR $2 = ANY(tg.name_aliases)))\n AND ($3::content_type_enum IS NULL OR tg.content_type = $3::content_type_enum)\n GROUP BY tg.id\n ORDER BY MAX(latest_torrent.created_at) DESC NULLS LAST\n LIMIT $4\n ) AS subquery;\n ",
"query": "\n SELECT jsonb_agg(data)\n FROM (\n SELECT jsonb_build_object(\n 'id', tg.id, 'content_type', tg.content_type, 'name', tg.name, 'platform', tg.platform, 'covers', tg.covers,\n 'original_release_date', tg.original_release_date,\n 'edition_groups', COALESCE(\n jsonb_agg(\n jsonb_build_object(\n 'id', eg.id,\n 'name', eg.name,\n 'release_date', eg.release_date,\n 'distributor', eg.distributor,\n 'source', eg.source,\n 'additional_information', eg.additional_information\n )\n ) FILTER (WHERE eg.id IS NOT NULL),\n '[]'::jsonb\n )\n ) as data\n FROM title_groups tg\n LEFT JOIN edition_groups eg ON eg.title_group_id = tg.id\n LEFT JOIN (\n SELECT edition_group_id, MAX(created_at) as created_at\n FROM torrents\n GROUP BY edition_group_id\n ) AS latest_torrent ON latest_torrent.edition_group_id = eg.id\n WHERE ($1::INT IS NOT NULL AND tg.id = $1)\n OR ($2::TEXT IS NOT NULL AND (tg.name ILIKE '%' || $2 || '%' OR $2 = ANY(tg.name_aliases)))\n AND ($3::content_type_enum IS NULL OR tg.content_type = $3::content_type_enum)\n GROUP BY tg.id\n ORDER BY MAX(latest_torrent.created_at) DESC NULLS LAST\n LIMIT $4\n ) AS subquery;\n ",
"describe": {
"columns": [
{
@@ -11,7 +11,7 @@
],
"parameters": {
"Left": [
"Int8",
"Int4",
"Text",
{
"Custom": {
@@ -37,5 +37,5 @@
null
]
},
"hash": "17a66ce5222c12349791df72eeea87a4df730db5dd22ab5a286d6f5aed5060d0"
"hash": "979326b2fbccce1e8237d42e6b25c38b32d1dd0b49cb636d85bebe10a0624545"
}

View File

@@ -0,0 +1,50 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT\n id,\n upload_factor,\n download_factor,\n seeders,\n leechers,\n completed\n FROM torrents\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "id",
"type_info": "Int4"
},
{
"ordinal": 1,
"name": "upload_factor",
"type_info": "Float8"
},
{
"ordinal": 2,
"name": "download_factor",
"type_info": "Float8"
},
{
"ordinal": 3,
"name": "seeders",
"type_info": "Int8"
},
{
"ordinal": 4,
"name": "leechers",
"type_info": "Int8"
},
{
"ordinal": 5,
"name": "completed",
"type_info": "Int8"
}
],
"parameters": {
"Left": []
},
"nullable": [
false,
false,
false,
false,
false,
false
]
},
"hash": "9a005f6e33977b003ca157711fb83fe4f96aca8348097fce5db85bb0fb709d5a"
}

View File

@@ -5,7 +5,7 @@
"columns": [],
"parameters": {
"Left": [
"Int8",
"Int4",
"Int4",
"Text"
]

View File

@@ -5,8 +5,8 @@
"columns": [],
"parameters": {
"Left": [
"Int8",
"Int8"
"Int4",
"Int4"
]
},
"nullable": []

View File

@@ -11,7 +11,7 @@
],
"parameters": {
"Left": [
"Int8"
"Int4"
]
},
"nullable": [

View File

@@ -6,7 +6,7 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,
@@ -16,7 +16,7 @@
],
"parameters": {
"Left": [
"Int8"
"Int4"
]
},
"nullable": [

View File

@@ -36,12 +36,12 @@
{
"ordinal": 6,
"name": "title_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 7,
"name": "master_group_id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 8,
@@ -54,8 +54,8 @@
"Int4",
"Int8",
"Int8",
"Int8",
"Int8",
"Int4",
"Int4",
"Int8",
"Text"
]

View File

@@ -203,6 +203,11 @@
"ordinal": 37,
"name": "staff_note",
"type_info": "Text"
},
{
"ordinal": 38,
"name": "can_download",
"type_info": "Bool"
}
],
"parameters": {
@@ -248,6 +253,7 @@
false,
false,
false,
false,
false
]
},

View File

@@ -21,7 +21,7 @@
],
"parameters": {
"Left": [
"Int8"
"Int4"
]
},
"nullable": [

View File

@@ -1,12 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n WITH peer_counts AS (\n SELECT\n torrent_id,\n COUNT(CASE WHEN status = 'seeding' THEN 1 END) AS current_seeders,\n COUNT(CASE WHEN status = 'leeching' THEN 1 END) AS current_leechers\n FROM\n peers\n GROUP BY\n torrent_id\n )\n UPDATE torrents AS t\n SET\n seeders = COALESCE(pc.current_seeders, 0),\n leechers = COALESCE(pc.current_leechers, 0)\n FROM\n torrents AS t_alias -- Use an alias for the table in the FROM clause to avoid ambiguity\n LEFT JOIN\n peer_counts AS pc ON t_alias.id = pc.torrent_id\n WHERE\n t.id = t_alias.id AND\n t.deleted_at IS NULL;\n ",
"describe": {
"columns": [],
"parameters": {
"Left": []
},
"nullable": []
},
"hash": "be1671602d0e2d5c5f430c344e239d8ef022c48b0a6326f095b2c61cadbfef9c"
}

View File

@@ -203,6 +203,11 @@
"ordinal": 37,
"name": "staff_note",
"type_info": "Text"
},
{
"ordinal": 38,
"name": "can_download",
"type_info": "Bool"
}
],
"parameters": {
@@ -248,6 +253,7 @@
false,
false,
false,
false,
false
]
},

View File

@@ -6,7 +6,7 @@
{
"ordinal": 0,
"name": "id",
"type_info": "Int8"
"type_info": "Int4"
},
{
"ordinal": 1,

View File

@@ -1,29 +0,0 @@
{
"db_name": "PostgreSQL",
"query": "\n SELECT peers.ip AS ip, peers.port AS port\n FROM peers\n WHERE\n torrent_id = $1\n AND\n peers.user_id != $2\n ",
"describe": {
"columns": [
{
"ordinal": 0,
"name": "ip",
"type_info": "Inet"
},
{
"ordinal": 1,
"name": "port",
"type_info": "Int4"
}
],
"parameters": {
"Left": [
"Int8",
"Int4"
]
},
"nullable": [
false,
false
]
},
"hash": "cd8465a3d4c43769c4b0cba93780d4d2fe7899d9ad2db1da3efd6cf30055daaf"
}

Some files were not shown because too many files have changed in this diff Show More