mirror of
https://github.com/readur/readur.git
synced 2025-12-30 02:50:11 -06:00
feat(tests): I like pain, and adding fields to the Config struct across all tests
This commit is contained in:
@@ -54,18 +54,28 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = Database::new(&config.database_url).await.unwrap();
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(
|
||||
db.clone(),
|
||||
db.pool.clone(),
|
||||
4,
|
||||
file_service.clone(),
|
||||
));
|
||||
|
||||
Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -52,15 +52,23 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = Database::new(&config.database_url).await.unwrap();
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2));
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2, file_service.clone()));
|
||||
let sync_progress_tracker = Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new());
|
||||
Arc::new(AppState {
|
||||
db,
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -53,8 +53,9 @@ mod tests {
|
||||
jwt_secret: "test-secret".to_string(),
|
||||
upload_path: "./test-uploads".to_string(),
|
||||
watch_folder: "./test-watch".to_string(),
|
||||
user_watch_base_dir: "./user_watch".to_string(),
|
||||
enable_per_user_watch: false, allowed_file_types: vec!["pdf".to_string()],
|
||||
user_watch_base_dir: "./user_watch".to_string(),
|
||||
enable_per_user_watch: false,
|
||||
allowed_file_types: vec!["pdf".to_string()],
|
||||
watch_interval_seconds: Some(30),
|
||||
file_stability_check_ms: Some(500),
|
||||
max_file_age_hours: None,
|
||||
@@ -69,6 +70,8 @@ mod tests {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -38,15 +38,24 @@ async fn create_test_app_state() -> Result<Arc<AppState>> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
}
|
||||
});
|
||||
|
||||
let db = Database::new(&config.database_url).await?;
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 1));
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await?;
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 1, file_service.clone()));
|
||||
|
||||
Ok(Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -13,14 +13,12 @@ use tracing::{info, warn, error};
|
||||
use uuid::Uuid;
|
||||
|
||||
use readur::{
|
||||
config::Config,
|
||||
db::Database,
|
||||
models::Document,
|
||||
services::file_service::FileService,
|
||||
storage::factory::create_storage_backend,
|
||||
storage::StorageConfig,
|
||||
ocr::enhanced::EnhancedOcrService,
|
||||
ocr::queue::{OcrQueueService, OcrQueueItem},
|
||||
ocr::queue::OcrQueueService,
|
||||
db_guardrails_simple::DocumentTransactionManager,
|
||||
};
|
||||
|
||||
@@ -33,7 +31,7 @@ async fn create_test_file_service(temp_path: &str) -> FileService {
|
||||
struct OCRPipelineTestHarness {
|
||||
db: Database,
|
||||
pool: PgPool,
|
||||
file_service: FileService,
|
||||
file_service: Arc<FileService>,
|
||||
ocr_service: EnhancedOcrService,
|
||||
queue_service: OcrQueueService,
|
||||
transaction_manager: DocumentTransactionManager,
|
||||
@@ -60,9 +58,9 @@ impl OCRPipelineTestHarness {
|
||||
upload_path: upload_path.clone()
|
||||
};
|
||||
let storage_backend = create_storage_backend(storage_config).await?;
|
||||
let file_service = FileService::with_storage(upload_path, storage_backend);
|
||||
let ocr_service = EnhancedOcrService::new("/tmp".to_string(), file_service.clone());
|
||||
let queue_service = OcrQueueService::new(db.clone(), pool.clone(), 4, std::sync::Arc::new(file_service));
|
||||
let file_service = Arc::new(FileService::with_storage(upload_path, storage_backend));
|
||||
let ocr_service = EnhancedOcrService::new("/tmp".to_string(), (*file_service).clone());
|
||||
let queue_service = OcrQueueService::new(db.clone(), pool.clone(), 4, file_service.clone());
|
||||
let transaction_manager = DocumentTransactionManager::new(pool.clone());
|
||||
|
||||
// Ensure test upload directory exists
|
||||
|
||||
@@ -37,6 +37,8 @@ mod tests {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = readur::db::Database::new(&config.database_url).await.unwrap();
|
||||
@@ -54,21 +56,28 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let app = axum::Router::new()
|
||||
.nest("/api/auth", readur::routes::auth::router())
|
||||
.with_state(Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service: file_service.clone(),
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service: Arc::new(readur::ocr::queue::OcrQueueService::new(
|
||||
db.clone(),
|
||||
db.pool.clone(),
|
||||
2
|
||||
2,
|
||||
file_service.clone()
|
||||
)),
|
||||
oidc_client: None,
|
||||
sync_progress_tracker: std::sync::Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new()),
|
||||
user_watch_service: None,
|
||||
user_watch_service: None,
|
||||
}));
|
||||
|
||||
(app, ())
|
||||
@@ -120,6 +129,8 @@ mod tests {
|
||||
oidc_client_secret: Some("test-client-secret".to_string()),
|
||||
oidc_issuer_url: Some(mock_server.uri()),
|
||||
oidc_redirect_uri: Some("http://localhost:8000/auth/oidc/callback".to_string()),
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let oidc_client = match OidcClient::new(&config).await {
|
||||
@@ -145,22 +156,29 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
// Create file service for OIDC app
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
// Create app with OIDC configuration
|
||||
let app = axum::Router::new()
|
||||
.nest("/api/auth", readur::routes::auth::router())
|
||||
.with_state(Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service: file_service.clone(),
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service: Arc::new(readur::ocr::queue::OcrQueueService::new(
|
||||
db.clone(),
|
||||
db.pool.clone(),
|
||||
2
|
||||
2,
|
||||
file_service.clone()
|
||||
)),
|
||||
oidc_client,
|
||||
sync_progress_tracker: std::sync::Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new()),
|
||||
user_watch_service: None,
|
||||
user_watch_service: None,
|
||||
}));
|
||||
|
||||
(app, mock_server)
|
||||
|
||||
@@ -48,12 +48,14 @@ impl SimpleThrottleTest {
|
||||
.await?;
|
||||
|
||||
let db = Database::new(&db_url).await?;
|
||||
let file_service = Arc::new(create_test_file_service("/tmp/test_throttling").await);
|
||||
|
||||
// Create queue service with throttling (max 15 concurrent jobs)
|
||||
let queue_service = Arc::new(OcrQueueService::new(
|
||||
db.clone(),
|
||||
pool.clone(),
|
||||
15 // This should prevent DB pool exhaustion
|
||||
15, // This should prevent DB pool exhaustion
|
||||
file_service
|
||||
));
|
||||
|
||||
Ok(Self {
|
||||
|
||||
@@ -46,14 +46,22 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = Database::new(&config.database_url).await.unwrap();
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2));
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2, file_service.clone()));
|
||||
Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -28,7 +28,7 @@ use readur::{
|
||||
AppState,
|
||||
config::Config,
|
||||
db::Database,
|
||||
models::{Source, SourceType, SourceStatus, User, CreateSource, CreateUser, UserRole, AuthProvider},
|
||||
models::{Source, SourceType, SourceStatus, User, CreateSource, CreateUser, UserRole},
|
||||
auth::Claims,
|
||||
};
|
||||
|
||||
@@ -60,21 +60,31 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = Database::new(&config.database_url).await.unwrap();
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(
|
||||
db.clone(),
|
||||
db.pool.clone(),
|
||||
2,
|
||||
file_service.clone(),
|
||||
));
|
||||
|
||||
let sync_progress_tracker = Arc::new(readur::services::sync_progress_tracker::SyncProgressTracker::new());
|
||||
|
||||
// Create initial app state
|
||||
let mut app_state = AppState {
|
||||
let app_state = AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service: file_service.clone(),
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
@@ -95,6 +105,7 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
Arc::new(AppState {
|
||||
db: state_arc.db.clone(),
|
||||
config: state_arc.config.clone(),
|
||||
file_service: state_arc.file_service.clone(),
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: Some(source_scheduler),
|
||||
queue_service: state_arc.queue_service.clone(),
|
||||
|
||||
@@ -143,16 +143,25 @@ async fn create_test_app_state() -> Result<Arc<AppState>> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
}
|
||||
});
|
||||
let db = Database::new(&config.database_url).await?;
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await?;
|
||||
let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = std::sync::Arc::new(
|
||||
readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1)
|
||||
readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1, file_service.clone())
|
||||
);
|
||||
|
||||
Ok(Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -37,7 +37,8 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
upload_path: "/tmp/test_uploads".to_string(),
|
||||
watch_folder: "/tmp/watch".to_string(),
|
||||
user_watch_base_dir: "./user_watch".to_string(),
|
||||
enable_per_user_watch: false, allowed_file_types: vec!["pdf".to_string(), "txt".to_string()],
|
||||
enable_per_user_watch: false,
|
||||
allowed_file_types: vec!["pdf".to_string(), "txt".to_string()],
|
||||
watch_interval_seconds: Some(10),
|
||||
file_stability_check_ms: Some(1000),
|
||||
max_file_age_hours: Some(24),
|
||||
@@ -52,18 +53,28 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = Database::new(&config.database_url).await.unwrap();
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(
|
||||
db.clone(),
|
||||
db.pool.clone(),
|
||||
4,
|
||||
file_service.clone(),
|
||||
));
|
||||
|
||||
Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -138,7 +138,8 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
upload_path: "/tmp/test_uploads".to_string(),
|
||||
watch_folder: "/tmp/test_watch".to_string(),
|
||||
user_watch_base_dir: "./user_watch".to_string(),
|
||||
enable_per_user_watch: false, allowed_file_types: vec!["pdf".to_string(), "txt".to_string()],
|
||||
enable_per_user_watch: false,
|
||||
allowed_file_types: vec!["pdf".to_string(), "txt".to_string()],
|
||||
watch_interval_seconds: Some(30),
|
||||
file_stability_check_ms: Some(500),
|
||||
max_file_age_hours: None,
|
||||
@@ -153,14 +154,22 @@ async fn create_test_app_state() -> Arc<AppState> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
let db = Database::new(&config.database_url).await.unwrap();
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2));
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 2, file_service.clone()));
|
||||
Arc::new(AppState {
|
||||
db,
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -348,6 +348,8 @@ fn test_webdav_scheduler_creation() {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
// Note: This is a minimal test since we can't easily mock the database
|
||||
|
||||
@@ -143,16 +143,25 @@ async fn create_test_app_state() -> Result<Arc<AppState>> {
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
}
|
||||
});
|
||||
let db = Database::new(&config.database_url).await?;
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: config.upload_path.clone() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await?;
|
||||
let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage(config.upload_path.clone(), storage_backend));
|
||||
|
||||
let queue_service = std::sync::Arc::new(
|
||||
readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1)
|
||||
readur::ocr::queue::OcrQueueService::new(db.clone(), db.get_pool().clone(), 1, file_service.clone())
|
||||
);
|
||||
|
||||
Ok(Arc::new(AppState {
|
||||
db: db.clone(),
|
||||
config,
|
||||
file_service,
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
|
||||
@@ -9,12 +9,11 @@ use serde_json::{json, Value};
|
||||
use uuid::Uuid;
|
||||
|
||||
use readur::{
|
||||
db::Database,
|
||||
config::Config,
|
||||
models::*,
|
||||
routes,
|
||||
AppState,
|
||||
test_helpers,
|
||||
config::Config,
|
||||
db::Database,
|
||||
};
|
||||
|
||||
// Removed constant - will use environment variables instead
|
||||
@@ -82,22 +81,42 @@ async fn setup_test_app() -> (Router, Arc<AppState>) {
|
||||
.unwrap_or_else(|_| "postgresql://readur:readur@localhost:5432/readur".to_string());
|
||||
|
||||
// Create test configuration with custom database URL
|
||||
let mut config = test_helpers::create_test_config();
|
||||
config.database_url = database_url.clone();
|
||||
config.jwt_secret = "test_jwt_secret_for_integration_tests".to_string();
|
||||
config.allowed_file_types = vec!["pdf".to_string(), "png".to_string()];
|
||||
config.watch_interval_seconds = Some(10);
|
||||
config.file_stability_check_ms = Some(1000);
|
||||
config.max_file_age_hours = Some(24);
|
||||
config.memory_limit_mb = 512;
|
||||
config.concurrent_ocr_jobs = 4;
|
||||
config.max_file_size_mb = 50;
|
||||
config.ocr_timeout_seconds = 300;
|
||||
let config = Config {
|
||||
database_url: database_url.clone(),
|
||||
server_address: "127.0.0.1:0".to_string(),
|
||||
jwt_secret: "test_jwt_secret_for_integration_tests".to_string(),
|
||||
upload_path: "/tmp/test_uploads".to_string(),
|
||||
watch_folder: "/tmp/test_watch".to_string(),
|
||||
user_watch_base_dir: "/tmp/user_watch".to_string(),
|
||||
enable_per_user_watch: false,
|
||||
allowed_file_types: vec!["pdf".to_string(), "png".to_string()],
|
||||
watch_interval_seconds: Some(10),
|
||||
file_stability_check_ms: Some(1000),
|
||||
max_file_age_hours: Some(24),
|
||||
memory_limit_mb: 512,
|
||||
concurrent_ocr_jobs: 4,
|
||||
max_file_size_mb: 50,
|
||||
ocr_timeout_seconds: 300,
|
||||
ocr_language: "eng".to_string(),
|
||||
cpu_priority: "normal".to_string(),
|
||||
oidc_enabled: false,
|
||||
oidc_client_id: None,
|
||||
oidc_client_secret: None,
|
||||
oidc_issuer_url: None,
|
||||
oidc_redirect_uri: None,
|
||||
s3_enabled: false,
|
||||
s3_config: None,
|
||||
};
|
||||
|
||||
// Create test services
|
||||
let db = test_helpers::create_test_database().await;
|
||||
let file_service = test_helpers::create_test_file_service(Some("/tmp/test_uploads")).await;
|
||||
let queue_service = test_helpers::create_test_queue_service(db.clone(), db.pool.clone(), file_service.clone());
|
||||
let db = Database::new(&database_url).await.unwrap();
|
||||
|
||||
// Create file service
|
||||
let storage_config = readur::storage::StorageConfig::Local { upload_path: "/tmp/test_uploads".to_string() };
|
||||
let storage_backend = readur::storage::factory::create_storage_backend(storage_config).await.unwrap();
|
||||
let file_service = std::sync::Arc::new(readur::services::file_service::FileService::with_storage("/tmp/test_uploads".to_string(), storage_backend));
|
||||
|
||||
let queue_service = std::sync::Arc::new(readur::ocr::queue::OcrQueueService::new(db.clone(), db.pool.clone(), 4, file_service.clone()));
|
||||
|
||||
// Create AppState
|
||||
let state = Arc::new(AppState {
|
||||
|
||||
Reference in New Issue
Block a user