mirror of
https://github.com/readur/readur.git
synced 2025-12-30 02:50:11 -06:00
feat(metrics): try to simplify webdav metrics some
This commit is contained in:
22
migrations/20250823000001_remove_webdav_metrics_tables.sql
Normal file
22
migrations/20250823000001_remove_webdav_metrics_tables.sql
Normal file
@@ -0,0 +1,22 @@
|
||||
-- Migration to remove the old WebDAV metrics tables
|
||||
-- These tables are no longer needed as we've moved to in-memory metrics collection
|
||||
|
||||
-- Drop tables in reverse order of dependencies
|
||||
DROP TABLE IF EXISTS webdav_request_metrics CASCADE;
|
||||
DROP TABLE IF EXISTS webdav_directory_metrics CASCADE;
|
||||
DROP TABLE IF EXISTS webdav_sync_sessions CASCADE;
|
||||
|
||||
-- Drop any indexes that may have been created
|
||||
DROP INDEX IF EXISTS idx_webdav_sync_sessions_user_source;
|
||||
DROP INDEX IF EXISTS idx_webdav_sync_sessions_started_at;
|
||||
DROP INDEX IF EXISTS idx_webdav_sync_sessions_status;
|
||||
DROP INDEX IF EXISTS idx_webdav_request_metrics_session;
|
||||
DROP INDEX IF EXISTS idx_webdav_request_metrics_started_at;
|
||||
DROP INDEX IF EXISTS idx_webdav_directory_metrics_session;
|
||||
DROP INDEX IF EXISTS idx_webdav_directory_metrics_path;
|
||||
|
||||
-- Drop the enum types if they exist
|
||||
DROP TYPE IF EXISTS webdav_sync_status CASCADE;
|
||||
DROP TYPE IF EXISTS webdav_operation_type CASCADE;
|
||||
DROP TYPE IF EXISTS webdav_request_type CASCADE;
|
||||
DROP TYPE IF EXISTS webdav_scan_failure_type CASCADE;
|
||||
@@ -9,7 +9,6 @@ pub mod documents;
|
||||
pub mod settings;
|
||||
pub mod notifications;
|
||||
pub mod webdav;
|
||||
pub mod webdav_metrics;
|
||||
pub mod sources;
|
||||
pub mod source_errors;
|
||||
pub mod images;
|
||||
|
||||
@@ -1,677 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use chrono::Utc;
|
||||
use sqlx::Row;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::db::Database;
|
||||
use crate::models::webdav_metrics::*;
|
||||
|
||||
impl Database {
|
||||
/// Create a new WebDAV sync session
|
||||
pub async fn create_webdav_sync_session(&self, session: &CreateWebDAVSyncSession) -> Result<Uuid> {
|
||||
self.with_retry(|| async {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO webdav_sync_sessions (
|
||||
user_id, source_id, sync_type, root_path, max_depth
|
||||
) VALUES ($1, $2, $3, $4, $5)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.bind(session.user_id)
|
||||
.bind(session.source_id)
|
||||
.bind(&session.sync_type)
|
||||
.bind(&session.root_path)
|
||||
.bind(session.max_depth)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(row.get::<Uuid, _>("id"))
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Update a WebDAV sync session with new metrics
|
||||
pub async fn update_webdav_sync_session(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
update: &UpdateWebDAVSyncSession
|
||||
) -> Result<bool> {
|
||||
self.with_retry(|| async {
|
||||
let rows_affected = sqlx::query(
|
||||
r#"
|
||||
UPDATE webdav_sync_sessions SET
|
||||
directories_discovered = COALESCE($2, directories_discovered),
|
||||
directories_processed = COALESCE($3, directories_processed),
|
||||
files_discovered = COALESCE($4, files_discovered),
|
||||
files_processed = COALESCE($5, files_processed),
|
||||
total_bytes_discovered = COALESCE($6, total_bytes_discovered),
|
||||
total_bytes_processed = COALESCE($7, total_bytes_processed),
|
||||
directories_skipped = COALESCE($8, directories_skipped),
|
||||
files_skipped = COALESCE($9, files_skipped),
|
||||
skip_reasons = COALESCE($10, skip_reasons),
|
||||
status = COALESCE($11, status),
|
||||
final_error_message = COALESCE($12, final_error_message),
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
"#
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(update.directories_discovered)
|
||||
.bind(update.directories_processed)
|
||||
.bind(update.files_discovered)
|
||||
.bind(update.files_processed)
|
||||
.bind(update.total_bytes_discovered)
|
||||
.bind(update.total_bytes_processed)
|
||||
.bind(update.directories_skipped)
|
||||
.bind(update.files_skipped)
|
||||
.bind(&update.skip_reasons)
|
||||
.bind(update.status.as_ref().map(|s| s.to_string()))
|
||||
.bind(&update.final_error_message)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(rows_affected.rows_affected() > 0)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Finalize a WebDAV sync session (calculate final metrics)
|
||||
pub async fn finalize_webdav_sync_session(&self, session_id: Uuid) -> Result<()> {
|
||||
self.with_retry(|| async {
|
||||
// Debug: Check how many requests exist for this session before finalizing
|
||||
let request_count: (i64,) = sqlx::query_as(
|
||||
"SELECT COUNT(*) FROM webdav_request_metrics WHERE session_id = $1"
|
||||
)
|
||||
.bind(session_id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
tracing::debug!("Finalizing session {}: found {} HTTP requests", session_id, request_count.0);
|
||||
|
||||
// Instead of using the PostgreSQL function, do the aggregation in Rust
|
||||
// to avoid transaction isolation issues
|
||||
let (successful_requests, failed_requests, total_requests, network_time_ms): (i64, i64, i64, i64) = sqlx::query_as(
|
||||
r#"
|
||||
SELECT
|
||||
COUNT(*) FILTER (WHERE success = true),
|
||||
COUNT(*) FILTER (WHERE success = false),
|
||||
COUNT(*),
|
||||
CAST(COALESCE(SUM(duration_ms), 0) AS BIGINT)
|
||||
FROM webdav_request_metrics
|
||||
WHERE session_id = $1
|
||||
"#
|
||||
)
|
||||
.bind(session_id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
tracing::debug!("Direct aggregation - total: {}, successful: {}, failed: {}", total_requests, successful_requests, failed_requests);
|
||||
|
||||
// Get the slowest operation
|
||||
let slowest_operation: Option<(i64, String)> = sqlx::query_as(
|
||||
"SELECT duration_ms, target_path FROM webdav_request_metrics WHERE session_id = $1 ORDER BY duration_ms DESC LIMIT 1"
|
||||
)
|
||||
.bind(session_id)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
// Update the session directly with Rust-calculated values
|
||||
sqlx::query(
|
||||
r#"
|
||||
UPDATE webdav_sync_sessions SET
|
||||
completed_at = NOW(),
|
||||
duration_ms = EXTRACT(EPOCH FROM (NOW() - started_at)) * 1000,
|
||||
total_http_requests = $2,
|
||||
successful_requests = $3,
|
||||
failed_requests = $4,
|
||||
retry_attempts = 0,
|
||||
network_time_ms = $5,
|
||||
slowest_operation_ms = $6,
|
||||
slowest_operation_path = $7,
|
||||
processing_rate_files_per_sec = CASE
|
||||
WHEN files_processed > 0 AND EXTRACT(EPOCH FROM (NOW() - started_at)) > 0
|
||||
THEN files_processed / EXTRACT(EPOCH FROM (NOW() - started_at))
|
||||
ELSE 0
|
||||
END,
|
||||
avg_file_size_bytes = CASE
|
||||
WHEN files_processed > 0
|
||||
THEN total_bytes_processed / files_processed
|
||||
ELSE 0
|
||||
END,
|
||||
status = CASE
|
||||
WHEN status = 'in_progress' THEN 'completed'
|
||||
ELSE status
|
||||
END,
|
||||
updated_at = NOW()
|
||||
WHERE id = $1
|
||||
"#
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(total_requests as i32)
|
||||
.bind(successful_requests as i32)
|
||||
.bind(failed_requests as i32)
|
||||
.bind(network_time_ms)
|
||||
.bind(slowest_operation.as_ref().map(|(ms, _)| *ms))
|
||||
.bind(slowest_operation.as_ref().map(|(_, path)| path.as_str()))
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
// Check the session after finalization
|
||||
let session_after: (i32, i32, i32) = sqlx::query_as(
|
||||
"SELECT total_http_requests, successful_requests, failed_requests FROM webdav_sync_sessions WHERE id = $1"
|
||||
)
|
||||
.bind(session_id)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
tracing::debug!("After finalization - total: {}, successful: {}, failed: {}", session_after.0, session_after.1, session_after.2);
|
||||
|
||||
Ok(())
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Get a WebDAV sync session by ID
|
||||
pub async fn get_webdav_sync_session(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid
|
||||
) -> Result<Option<WebDAVSyncSession>> {
|
||||
self.with_retry(|| async {
|
||||
let session = sqlx::query_as::<_, WebDAVSyncSession>(
|
||||
"SELECT * FROM webdav_sync_sessions WHERE id = $1 AND user_id = $2"
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(user_id)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(session)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// List WebDAV sync sessions with optional filtering
|
||||
pub async fn list_webdav_sync_sessions(
|
||||
&self,
|
||||
query: &WebDAVMetricsQuery
|
||||
) -> Result<Vec<WebDAVSyncSession>> {
|
||||
self.with_retry(|| async {
|
||||
let start_time = query.start_time.unwrap_or_else(|| Utc::now() - chrono::Duration::days(7));
|
||||
let end_time = query.end_time.unwrap_or_else(|| Utc::now());
|
||||
let limit = query.limit.unwrap_or(100).min(1000); // Cap at 1000
|
||||
let offset = query.offset.unwrap_or(0);
|
||||
|
||||
let sessions = sqlx::query_as::<_, WebDAVSyncSession>(
|
||||
r#"
|
||||
SELECT * FROM webdav_sync_sessions
|
||||
WHERE started_at BETWEEN $1 AND $2
|
||||
AND ($3::UUID IS NULL OR user_id = $3)
|
||||
AND ($4::UUID IS NULL OR source_id = $4)
|
||||
ORDER BY started_at DESC
|
||||
LIMIT $5 OFFSET $6
|
||||
"#
|
||||
)
|
||||
.bind(start_time)
|
||||
.bind(end_time)
|
||||
.bind(query.user_id)
|
||||
.bind(query.source_id)
|
||||
.bind(limit as i64)
|
||||
.bind(offset as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(sessions)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Create a new WebDAV directory metric
|
||||
pub async fn create_webdav_directory_metric(
|
||||
&self,
|
||||
metric: &CreateWebDAVDirectoryMetric
|
||||
) -> Result<Uuid> {
|
||||
self.with_retry(|| async {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO webdav_directory_metrics (
|
||||
session_id, user_id, source_id, directory_path,
|
||||
directory_depth, parent_directory_path
|
||||
) VALUES ($1, $2, $3, $4, $5, $6)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.bind(metric.session_id)
|
||||
.bind(metric.user_id)
|
||||
.bind(metric.source_id)
|
||||
.bind(&metric.directory_path)
|
||||
.bind(metric.directory_depth)
|
||||
.bind(&metric.parent_directory_path)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(row.get::<Uuid, _>("id"))
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Update a WebDAV directory metric
|
||||
pub async fn update_webdav_directory_metric(
|
||||
&self,
|
||||
metric_id: Uuid,
|
||||
update: &UpdateWebDAVDirectoryMetric
|
||||
) -> Result<bool> {
|
||||
self.with_retry(|| async {
|
||||
let rows_affected = sqlx::query(
|
||||
r#"
|
||||
UPDATE webdav_directory_metrics SET
|
||||
completed_at = CASE
|
||||
WHEN completed_at IS NULL THEN NOW()
|
||||
ELSE completed_at
|
||||
END,
|
||||
scan_duration_ms = CASE
|
||||
WHEN completed_at IS NULL THEN EXTRACT(EPOCH FROM (NOW() - started_at)) * 1000
|
||||
ELSE scan_duration_ms
|
||||
END,
|
||||
files_found = COALESCE($2, files_found),
|
||||
subdirectories_found = COALESCE($3, subdirectories_found),
|
||||
total_size_bytes = COALESCE($4, total_size_bytes),
|
||||
files_processed = COALESCE($5, files_processed),
|
||||
files_skipped = COALESCE($6, files_skipped),
|
||||
files_failed = COALESCE($7, files_failed),
|
||||
http_requests_made = COALESCE($8, http_requests_made),
|
||||
propfind_requests = COALESCE($9, propfind_requests),
|
||||
get_requests = COALESCE($10, get_requests),
|
||||
errors_encountered = COALESCE($11, errors_encountered),
|
||||
error_types = COALESCE($12, error_types),
|
||||
warnings_count = COALESCE($13, warnings_count),
|
||||
etag_matches = COALESCE($14, etag_matches),
|
||||
etag_mismatches = COALESCE($15, etag_mismatches),
|
||||
cache_hits = COALESCE($16, cache_hits),
|
||||
cache_misses = COALESCE($17, cache_misses),
|
||||
status = COALESCE($18, status),
|
||||
skip_reason = COALESCE($19, skip_reason),
|
||||
error_message = COALESCE($20, error_message)
|
||||
WHERE id = $1
|
||||
"#
|
||||
)
|
||||
.bind(metric_id)
|
||||
.bind(update.files_found)
|
||||
.bind(update.subdirectories_found)
|
||||
.bind(update.total_size_bytes)
|
||||
.bind(update.files_processed)
|
||||
.bind(update.files_skipped)
|
||||
.bind(update.files_failed)
|
||||
.bind(update.http_requests_made)
|
||||
.bind(update.propfind_requests)
|
||||
.bind(update.get_requests)
|
||||
.bind(update.errors_encountered)
|
||||
.bind(&update.error_types)
|
||||
.bind(update.warnings_count)
|
||||
.bind(update.etag_matches)
|
||||
.bind(update.etag_mismatches)
|
||||
.bind(update.cache_hits)
|
||||
.bind(update.cache_misses)
|
||||
.bind(&update.status)
|
||||
.bind(&update.skip_reason)
|
||||
.bind(&update.error_message)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(rows_affected.rows_affected() > 0)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Get directory metrics for a session
|
||||
pub async fn get_webdav_directory_metrics(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid
|
||||
) -> Result<Vec<WebDAVDirectoryMetric>> {
|
||||
self.with_retry(|| async {
|
||||
let metrics = sqlx::query_as::<_, WebDAVDirectoryMetric>(
|
||||
r#"
|
||||
SELECT * FROM webdav_directory_metrics
|
||||
WHERE session_id = $1 AND user_id = $2
|
||||
ORDER BY started_at ASC
|
||||
"#
|
||||
)
|
||||
.bind(session_id)
|
||||
.bind(user_id)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(metrics)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Record a WebDAV HTTP request metric
|
||||
pub async fn record_webdav_request_metric(
|
||||
&self,
|
||||
metric: &CreateWebDAVRequestMetric
|
||||
) -> Result<Uuid> {
|
||||
self.with_retry(|| async {
|
||||
let row = sqlx::query(
|
||||
r#"
|
||||
INSERT INTO webdav_request_metrics (
|
||||
session_id, directory_metric_id, user_id, source_id,
|
||||
request_type, operation_type, target_path, duration_ms,
|
||||
request_size_bytes, response_size_bytes, http_status_code,
|
||||
dns_lookup_ms, tcp_connect_ms, tls_handshake_ms, time_to_first_byte_ms,
|
||||
success, retry_attempt, error_type, error_message,
|
||||
server_header, dav_header, etag_value, last_modified,
|
||||
content_type, remote_ip, user_agent,
|
||||
completed_at
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5::webdav_request_type, $6::webdav_operation_type, $7, $8, $9, $10, $11, $12, $13, $14, $15,
|
||||
$16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, NOW()
|
||||
)
|
||||
RETURNING id
|
||||
"#
|
||||
)
|
||||
.bind(metric.session_id)
|
||||
.bind(metric.directory_metric_id)
|
||||
.bind(metric.user_id)
|
||||
.bind(metric.source_id)
|
||||
.bind(metric.request_type.to_string().as_str())
|
||||
.bind(metric.operation_type.to_string().as_str())
|
||||
.bind(&metric.target_path)
|
||||
.bind(metric.duration_ms)
|
||||
.bind(metric.request_size_bytes)
|
||||
.bind(metric.response_size_bytes)
|
||||
.bind(metric.http_status_code)
|
||||
.bind(metric.dns_lookup_ms)
|
||||
.bind(metric.tcp_connect_ms)
|
||||
.bind(metric.tls_handshake_ms)
|
||||
.bind(metric.time_to_first_byte_ms)
|
||||
.bind(metric.success)
|
||||
.bind(metric.retry_attempt)
|
||||
.bind(&metric.error_type)
|
||||
.bind(&metric.error_message)
|
||||
.bind(&metric.server_header)
|
||||
.bind(&metric.dav_header)
|
||||
.bind(&metric.etag_value)
|
||||
.bind(metric.last_modified)
|
||||
.bind(&metric.content_type)
|
||||
.bind(&metric.remote_ip)
|
||||
.bind(&metric.user_agent)
|
||||
.fetch_one(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(row.get::<Uuid, _>("id"))
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Get request metrics for a session or directory
|
||||
pub async fn get_webdav_request_metrics(
|
||||
&self,
|
||||
session_id: Option<Uuid>,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
limit: Option<i32>
|
||||
) -> Result<Vec<WebDAVRequestMetric>> {
|
||||
self.with_retry(|| async {
|
||||
let limit = limit.unwrap_or(1000).min(10000); // Cap at 10k
|
||||
|
||||
let metrics = sqlx::query_as::<_, WebDAVRequestMetric>(
|
||||
r#"
|
||||
SELECT
|
||||
id, session_id, directory_metric_id, user_id, source_id,
|
||||
request_type::TEXT as request_type,
|
||||
operation_type::TEXT as operation_type,
|
||||
target_path, started_at, completed_at, duration_ms,
|
||||
request_size_bytes, response_size_bytes, http_status_code,
|
||||
dns_lookup_ms, tcp_connect_ms, tls_handshake_ms, time_to_first_byte_ms,
|
||||
success, retry_attempt, error_type, error_message,
|
||||
server_header, dav_header, etag_value, last_modified,
|
||||
content_type, remote_ip, user_agent
|
||||
FROM webdav_request_metrics
|
||||
WHERE user_id = $1
|
||||
AND ($2::UUID IS NULL OR session_id = $2)
|
||||
AND ($3::UUID IS NULL OR directory_metric_id = $3)
|
||||
ORDER BY started_at DESC
|
||||
LIMIT $4
|
||||
"#
|
||||
)
|
||||
.bind(user_id)
|
||||
.bind(session_id)
|
||||
.bind(directory_metric_id)
|
||||
.bind(limit as i64)
|
||||
.fetch_all(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(metrics)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Get WebDAV metrics summary for a time period
|
||||
pub async fn get_webdav_metrics_summary(
|
||||
&self,
|
||||
query: &WebDAVMetricsQuery
|
||||
) -> Result<Option<WebDAVMetricsSummary>> {
|
||||
self.with_retry(|| async {
|
||||
let start_time = query.start_time.unwrap_or_else(|| Utc::now() - chrono::Duration::days(1));
|
||||
let end_time = query.end_time.unwrap_or_else(|| Utc::now());
|
||||
|
||||
// First try to call the function directly and see what happens
|
||||
let summary = match sqlx::query_as::<_, WebDAVMetricsSummary>(
|
||||
r#"
|
||||
SELECT
|
||||
total_sessions,
|
||||
successful_sessions,
|
||||
failed_sessions,
|
||||
total_files_processed,
|
||||
total_bytes_processed,
|
||||
avg_session_duration_sec,
|
||||
avg_processing_rate,
|
||||
total_http_requests,
|
||||
request_success_rate,
|
||||
avg_request_duration_ms,
|
||||
common_error_types
|
||||
FROM get_webdav_metrics_summary($1, $2, $3, $4)
|
||||
"#
|
||||
)
|
||||
.bind(query.user_id)
|
||||
.bind(query.source_id)
|
||||
.bind(start_time)
|
||||
.bind(end_time)
|
||||
.fetch_optional(&self.pool)
|
||||
.await {
|
||||
Ok(result) => result,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to call get_webdav_metrics_summary function: {}", e);
|
||||
// Fall back to manual query if function fails
|
||||
sqlx::query_as::<_, WebDAVMetricsSummary>(
|
||||
r#"
|
||||
SELECT
|
||||
COALESCE(COUNT(*)::INTEGER, 0) as total_sessions,
|
||||
COALESCE(COUNT(*) FILTER (WHERE s.status = 'completed')::INTEGER, 0) as successful_sessions,
|
||||
COALESCE(COUNT(*) FILTER (WHERE s.status = 'failed')::INTEGER, 0) as failed_sessions,
|
||||
COALESCE(SUM(s.files_processed)::BIGINT, 0) as total_files_processed,
|
||||
COALESCE(SUM(s.total_bytes_processed)::BIGINT, 0) as total_bytes_processed,
|
||||
COALESCE(AVG(s.duration_ms / 1000.0)::DOUBLE PRECISION, 0.0) as avg_session_duration_sec,
|
||||
COALESCE(AVG(s.processing_rate_files_per_sec)::DOUBLE PRECISION, 0.0) as avg_processing_rate,
|
||||
COALESCE(SUM(s.total_http_requests)::BIGINT, 0) as total_http_requests,
|
||||
CASE
|
||||
WHEN SUM(s.total_http_requests)::BIGINT > 0
|
||||
THEN (SUM(s.successful_requests)::BIGINT::DOUBLE PRECISION / SUM(s.total_http_requests)::BIGINT * 100.0)
|
||||
ELSE 0.0
|
||||
END as request_success_rate,
|
||||
COALESCE((SELECT AVG(duration_ms) FROM webdav_request_metrics r
|
||||
WHERE r.started_at BETWEEN $3 AND $4
|
||||
AND ($1 IS NULL OR r.user_id = $1)
|
||||
AND ($2 IS NULL OR r.source_id = $2)), 0.0)::DOUBLE PRECISION as avg_request_duration_ms,
|
||||
COALESCE((SELECT jsonb_agg(jsonb_build_object('error_type', error_type, 'count', error_count))
|
||||
FROM (
|
||||
SELECT error_type, COUNT(*) as error_count
|
||||
FROM webdav_request_metrics r
|
||||
WHERE r.started_at BETWEEN $3 AND $4
|
||||
AND r.success = false
|
||||
AND r.error_type IS NOT NULL
|
||||
AND ($1 IS NULL OR r.user_id = $1)
|
||||
AND ($2 IS NULL OR r.source_id = $2)
|
||||
GROUP BY error_type
|
||||
ORDER BY error_count DESC
|
||||
LIMIT 10
|
||||
) error_summary), '[]'::jsonb) as common_error_types
|
||||
FROM webdav_sync_sessions s
|
||||
WHERE s.started_at BETWEEN $3 AND $4
|
||||
AND ($1 IS NULL OR s.user_id = $1)
|
||||
AND ($2 IS NULL OR s.source_id = $2)
|
||||
"#
|
||||
)
|
||||
.bind(query.user_id)
|
||||
.bind(query.source_id)
|
||||
.bind(start_time)
|
||||
.bind(end_time)
|
||||
.fetch_optional(&self.pool)
|
||||
.await?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(summary)
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Get performance insights for a specific session
|
||||
pub async fn get_webdav_performance_insights(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid
|
||||
) -> Result<Option<WebDAVPerformanceInsights>> {
|
||||
self.with_retry(|| async {
|
||||
// Get session info
|
||||
let session = self.get_webdav_sync_session(session_id, user_id).await?;
|
||||
if session.is_none() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
// Get directory metrics
|
||||
let directory_metrics = self.get_webdav_directory_metrics(session_id, user_id).await?;
|
||||
|
||||
// Calculate average directory scan time
|
||||
let avg_directory_scan_time_ms = if !directory_metrics.is_empty() {
|
||||
directory_metrics.iter()
|
||||
.filter_map(|d| d.scan_duration_ms)
|
||||
.sum::<i64>() as f64 / directory_metrics.len() as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Find slowest directories
|
||||
let mut slowest_directories: Vec<SlowDirectoryInfo> = directory_metrics.iter()
|
||||
.filter_map(|d| {
|
||||
d.scan_duration_ms.map(|duration| SlowDirectoryInfo {
|
||||
path: d.directory_path.clone(),
|
||||
scan_duration_ms: duration,
|
||||
files_count: d.files_found,
|
||||
size_bytes: d.total_size_bytes,
|
||||
error_count: d.errors_encountered,
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
slowest_directories.sort_by(|a, b| b.scan_duration_ms.cmp(&a.scan_duration_ms));
|
||||
slowest_directories.truncate(10); // Top 10
|
||||
|
||||
// Get request metrics for analysis
|
||||
let request_metrics = self.get_webdav_request_metrics(
|
||||
Some(session_id),
|
||||
None,
|
||||
user_id,
|
||||
Some(10000)
|
||||
).await?;
|
||||
|
||||
// Calculate request type distribution
|
||||
let propfind_requests: Vec<_> = request_metrics.iter()
|
||||
.filter(|r| r.request_type == "PROPFIND")
|
||||
.collect();
|
||||
let get_requests: Vec<_> = request_metrics.iter()
|
||||
.filter(|r| r.request_type == "GET")
|
||||
.collect();
|
||||
|
||||
let request_distribution = RequestTypeDistribution {
|
||||
propfind_count: propfind_requests.len() as i32,
|
||||
get_count: get_requests.len() as i32,
|
||||
head_count: request_metrics.iter().filter(|r| r.request_type == "HEAD").count() as i32,
|
||||
options_count: request_metrics.iter().filter(|r| r.request_type == "OPTIONS").count() as i32,
|
||||
total_count: request_metrics.len() as i32,
|
||||
avg_propfind_duration_ms: if !propfind_requests.is_empty() {
|
||||
propfind_requests.iter().map(|r| r.duration_ms).sum::<i64>() as f64 / propfind_requests.len() as f64
|
||||
} else { 0.0 },
|
||||
avg_get_duration_ms: if !get_requests.is_empty() {
|
||||
get_requests.iter().map(|r| r.duration_ms).sum::<i64>() as f64 / get_requests.len() as f64
|
||||
} else { 0.0 },
|
||||
};
|
||||
|
||||
// Analyze errors
|
||||
let total_errors = request_metrics.iter().filter(|r| !r.success).count() as i32;
|
||||
let network_errors = request_metrics.iter()
|
||||
.filter(|r| !r.success && r.error_type.as_ref().map_or(false, |e| e.contains("network") || e.contains("timeout")))
|
||||
.count() as i32;
|
||||
let auth_errors = request_metrics.iter()
|
||||
.filter(|r| !r.success && r.http_status_code.map_or(false, |s| s == 401 || s == 403))
|
||||
.count() as i32;
|
||||
let timeout_errors = request_metrics.iter()
|
||||
.filter(|r| !r.success && r.error_type.as_ref().map_or(false, |e| e.contains("timeout")))
|
||||
.count() as i32;
|
||||
let server_errors = request_metrics.iter()
|
||||
.filter(|r| !r.success && r.http_status_code.map_or(false, |s| s >= 500))
|
||||
.count() as i32;
|
||||
|
||||
// Find most problematic paths
|
||||
let mut path_errors: std::collections::HashMap<String, i32> = std::collections::HashMap::new();
|
||||
for metric in &request_metrics {
|
||||
if !metric.success {
|
||||
*path_errors.entry(metric.target_path.clone()).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
let mut most_problematic_paths: Vec<_> = path_errors.into_iter().collect();
|
||||
most_problematic_paths.sort_by(|a, b| b.1.cmp(&a.1));
|
||||
let most_problematic_paths: Vec<String> = most_problematic_paths.into_iter()
|
||||
.take(5)
|
||||
.map(|(path, _)| path)
|
||||
.collect();
|
||||
|
||||
let error_analysis = ErrorAnalysis {
|
||||
total_errors,
|
||||
network_errors,
|
||||
auth_errors,
|
||||
timeout_errors,
|
||||
server_errors,
|
||||
most_problematic_paths,
|
||||
};
|
||||
|
||||
// Create simple performance trends (would be more sophisticated in production)
|
||||
let performance_trends = PerformanceTrends {
|
||||
requests_per_minute: vec![], // Would calculate from time-series data
|
||||
avg_response_time_trend: vec![],
|
||||
error_rate_trend: vec![],
|
||||
throughput_mbps_trend: vec![],
|
||||
};
|
||||
|
||||
Ok(Some(WebDAVPerformanceInsights {
|
||||
session_id,
|
||||
avg_directory_scan_time_ms,
|
||||
slowest_directories,
|
||||
request_distribution,
|
||||
error_analysis,
|
||||
performance_trends,
|
||||
}))
|
||||
}).await
|
||||
}
|
||||
|
||||
/// Clean up old WebDAV metrics (for maintenance)
|
||||
pub async fn cleanup_old_webdav_metrics(&self, days_to_keep: i32) -> Result<u64> {
|
||||
self.with_retry(|| async {
|
||||
let cutoff_date = Utc::now() - chrono::Duration::days(days_to_keep as i64);
|
||||
|
||||
let result = sqlx::query(
|
||||
r#"
|
||||
DELETE FROM webdav_sync_sessions
|
||||
WHERE created_at < $1
|
||||
AND status IN ('completed', 'failed', 'cancelled')
|
||||
"#
|
||||
)
|
||||
.bind(cutoff_date)
|
||||
.execute(&self.pool)
|
||||
.await?;
|
||||
|
||||
Ok(result.rows_affected())
|
||||
}).await
|
||||
}
|
||||
}
|
||||
@@ -45,6 +45,7 @@ pub struct AppState {
|
||||
pub oidc_client: Option<std::sync::Arc<OidcClient>>,
|
||||
pub sync_progress_tracker: std::sync::Arc<services::sync_progress_tracker::SyncProgressTracker>,
|
||||
pub user_watch_service: Option<std::sync::Arc<services::user_watch_service::UserWatchService>>,
|
||||
pub webdav_metrics_collector: Option<std::sync::Arc<services::webdav_metrics_integration::WebDAVMetricsCollector>>,
|
||||
}
|
||||
|
||||
/// Health check endpoint for monitoring
|
||||
|
||||
61
src/lib.rs.backup
Normal file
61
src/lib.rs.backup
Normal file
@@ -0,0 +1,61 @@
|
||||
pub mod auth;
|
||||
pub mod config;
|
||||
pub mod db;
|
||||
pub mod db_guardrails_simple;
|
||||
pub mod errors;
|
||||
pub mod ingestion;
|
||||
pub mod metadata_extraction;
|
||||
pub mod mime_detection;
|
||||
pub mod models;
|
||||
pub mod monitoring;
|
||||
pub mod ocr;
|
||||
pub mod oidc;
|
||||
pub mod routes;
|
||||
pub mod scheduling;
|
||||
pub mod seed;
|
||||
pub mod services;
|
||||
pub mod storage;
|
||||
pub mod swagger;
|
||||
pub mod utils;
|
||||
pub mod webdav_xml_parser;
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests;
|
||||
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
pub mod test_utils;
|
||||
|
||||
#[cfg(any(test, feature = "test-utils"))]
|
||||
pub mod test_helpers;
|
||||
|
||||
use axum::{http::StatusCode, Json};
|
||||
use utoipa;
|
||||
use config::Config;
|
||||
use db::Database;
|
||||
use oidc::OidcClient;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct AppState {
|
||||
pub db: Database,
|
||||
pub config: Config,
|
||||
pub file_service: std::sync::Arc<services::file_service::FileService>,
|
||||
pub webdav_scheduler: Option<std::sync::Arc<scheduling::webdav_scheduler::WebDAVScheduler>>,
|
||||
pub source_scheduler: Option<std::sync::Arc<scheduling::source_scheduler::SourceScheduler>>,
|
||||
pub queue_service: std::sync::Arc<ocr::queue::OcrQueueService>,
|
||||
pub oidc_client: Option<std::sync::Arc<OidcClient>>,
|
||||
pub sync_progress_tracker: std::sync::Arc<services::sync_progress_tracker::SyncProgressTracker>,
|
||||
pub user_watch_service: Option<std::sync::Arc<services::user_watch_service::UserWatchService>>,
|
||||
}
|
||||
|
||||
/// Health check endpoint for monitoring
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/health",
|
||||
tag = "health",
|
||||
responses(
|
||||
(status = 200, description = "Service is healthy", body = serde_json::Value),
|
||||
)
|
||||
)]
|
||||
pub async fn health_check() -> Result<Json<serde_json::Value>, StatusCode> {
|
||||
Ok(Json(serde_json::json!({"status": "ok"})))
|
||||
}
|
||||
10
src/main.rs
10
src/main.rs
@@ -379,6 +379,12 @@ async fn main() -> anyhow::Result<()> {
|
||||
None
|
||||
};
|
||||
|
||||
// Create simplified WebDAV metrics collector
|
||||
let webdav_metrics_collector = {
|
||||
let metrics = std::sync::Arc::new(crate::services::webdav_metrics_simple::WebDAVMetrics::new());
|
||||
Some(std::sync::Arc::new(crate::services::webdav_metrics_integration::WebDAVMetricsCollector::new(metrics)))
|
||||
};
|
||||
|
||||
// Create web-facing state with shared queue service
|
||||
let web_state = AppState {
|
||||
db: web_db,
|
||||
@@ -390,6 +396,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
oidc_client: oidc_client.clone(),
|
||||
sync_progress_tracker: sync_progress_tracker.clone(),
|
||||
user_watch_service: user_watch_service.clone(),
|
||||
webdav_metrics_collector: webdav_metrics_collector.clone(),
|
||||
};
|
||||
let web_state = Arc::new(web_state);
|
||||
|
||||
@@ -404,6 +411,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
oidc_client: oidc_client.clone(),
|
||||
sync_progress_tracker: sync_progress_tracker.clone(),
|
||||
user_watch_service: user_watch_service.clone(),
|
||||
webdav_metrics_collector: webdav_metrics_collector.clone(),
|
||||
};
|
||||
let background_state = Arc::new(background_state);
|
||||
|
||||
@@ -489,6 +497,7 @@ async fn main() -> anyhow::Result<()> {
|
||||
oidc_client: oidc_client.clone(),
|
||||
sync_progress_tracker: sync_progress_tracker.clone(),
|
||||
user_watch_service: user_watch_service.clone(),
|
||||
webdav_metrics_collector: webdav_metrics_collector.clone(),
|
||||
};
|
||||
let web_state = Arc::new(updated_web_state);
|
||||
|
||||
@@ -529,7 +538,6 @@ async fn main() -> anyhow::Result<()> {
|
||||
.nest("/api/users", readur::routes::users::router())
|
||||
.nest("/api/webdav", readur::routes::webdav::router())
|
||||
.nest("/api/webdav/scan/failures", readur::routes::webdav_scan_failures::router())
|
||||
.nest("/api/webdav-metrics", readur::routes::webdav_metrics::router())
|
||||
.merge(readur::swagger::create_swagger_router())
|
||||
.fallback_service(
|
||||
ServeDir::new(&static_dir)
|
||||
|
||||
@@ -6,7 +6,6 @@ pub mod search;
|
||||
pub mod settings;
|
||||
pub mod source;
|
||||
pub mod source_error;
|
||||
pub mod webdav_metrics;
|
||||
pub mod responses;
|
||||
|
||||
// Re-export commonly used types
|
||||
@@ -16,6 +15,5 @@ pub use search::*;
|
||||
pub use settings::*;
|
||||
pub use source::*;
|
||||
pub use source_error::*;
|
||||
pub use webdav_metrics::*;
|
||||
|
||||
pub use responses::*;
|
||||
@@ -1,363 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use sqlx::FromRow;
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
use chrono::{DateTime, Utc};
|
||||
use utoipa::{ToSchema, IntoParams};
|
||||
|
||||
/// WebDAV operation types for categorizing different kinds of operations
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum WebDAVOperationType {
|
||||
Discovery,
|
||||
Download,
|
||||
MetadataFetch,
|
||||
ConnectionTest,
|
||||
Validation,
|
||||
FullSync,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for WebDAVOperationType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::Discovery => write!(f, "discovery"),
|
||||
Self::Download => write!(f, "download"),
|
||||
Self::MetadataFetch => write!(f, "metadata_fetch"),
|
||||
Self::ConnectionTest => write!(f, "connection_test"),
|
||||
Self::Validation => write!(f, "validation"),
|
||||
Self::FullSync => write!(f, "full_sync"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// WebDAV request types (HTTP methods)
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "UPPERCASE")]
|
||||
pub enum WebDAVRequestType {
|
||||
#[serde(rename = "PROPFIND")]
|
||||
PropFind,
|
||||
#[serde(rename = "GET")]
|
||||
Get,
|
||||
#[serde(rename = "HEAD")]
|
||||
Head,
|
||||
#[serde(rename = "OPTIONS")]
|
||||
Options,
|
||||
#[serde(rename = "POST")]
|
||||
Post,
|
||||
#[serde(rename = "PUT")]
|
||||
Put,
|
||||
#[serde(rename = "DELETE")]
|
||||
Delete,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for WebDAVRequestType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::PropFind => write!(f, "PROPFIND"),
|
||||
Self::Get => write!(f, "GET"),
|
||||
Self::Head => write!(f, "HEAD"),
|
||||
Self::Options => write!(f, "OPTIONS"),
|
||||
Self::Post => write!(f, "POST"),
|
||||
Self::Put => write!(f, "PUT"),
|
||||
Self::Delete => write!(f, "DELETE"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Status of a WebDAV sync session
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, ToSchema)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum WebDAVSyncStatus {
|
||||
InProgress,
|
||||
Completed,
|
||||
Failed,
|
||||
Cancelled,
|
||||
}
|
||||
|
||||
impl std::fmt::Display for WebDAVSyncStatus {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
Self::InProgress => write!(f, "in_progress"),
|
||||
Self::Completed => write!(f, "completed"),
|
||||
Self::Failed => write!(f, "failed"),
|
||||
Self::Cancelled => write!(f, "cancelled"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Request to create a new WebDAV sync session
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct CreateWebDAVSyncSession {
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub sync_type: String,
|
||||
pub root_path: String,
|
||||
pub max_depth: Option<i32>,
|
||||
}
|
||||
|
||||
/// WebDAV sync session record
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct WebDAVSyncSession {
|
||||
pub id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub started_at: DateTime<Utc>,
|
||||
pub completed_at: Option<DateTime<Utc>>,
|
||||
pub duration_ms: Option<i64>,
|
||||
pub sync_type: String,
|
||||
pub root_path: String,
|
||||
pub max_depth: Option<i32>,
|
||||
pub directories_discovered: i32,
|
||||
pub directories_processed: i32,
|
||||
pub files_discovered: i32,
|
||||
pub files_processed: i32,
|
||||
pub total_bytes_discovered: i64,
|
||||
pub total_bytes_processed: i64,
|
||||
pub avg_file_size_bytes: Option<i64>,
|
||||
pub processing_rate_files_per_sec: Option<f64>,
|
||||
pub total_http_requests: i32,
|
||||
pub successful_requests: i32,
|
||||
pub failed_requests: i32,
|
||||
pub retry_attempts: i32,
|
||||
pub directories_skipped: i32,
|
||||
pub files_skipped: i32,
|
||||
pub skip_reasons: Option<serde_json::Value>,
|
||||
pub status: String,
|
||||
pub final_error_message: Option<String>,
|
||||
pub slowest_operation_ms: Option<i64>,
|
||||
pub slowest_operation_path: Option<String>,
|
||||
pub network_time_ms: Option<i64>,
|
||||
pub processing_time_ms: Option<i64>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
pub updated_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Request to create a directory metrics record
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct CreateWebDAVDirectoryMetric {
|
||||
pub session_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub directory_path: String,
|
||||
pub directory_depth: i32,
|
||||
pub parent_directory_path: Option<String>,
|
||||
}
|
||||
|
||||
/// WebDAV directory scan metrics
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct WebDAVDirectoryMetric {
|
||||
pub id: Uuid,
|
||||
pub session_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub directory_path: String,
|
||||
pub directory_depth: i32,
|
||||
pub parent_directory_path: Option<String>,
|
||||
pub started_at: DateTime<Utc>,
|
||||
pub completed_at: Option<DateTime<Utc>>,
|
||||
pub scan_duration_ms: Option<i64>,
|
||||
pub files_found: i32,
|
||||
pub subdirectories_found: i32,
|
||||
pub total_size_bytes: i64,
|
||||
pub files_processed: i32,
|
||||
pub files_skipped: i32,
|
||||
pub files_failed: i32,
|
||||
pub http_requests_made: i32,
|
||||
pub propfind_requests: i32,
|
||||
pub get_requests: i32,
|
||||
pub errors_encountered: i32,
|
||||
pub error_types: Option<serde_json::Value>,
|
||||
pub warnings_count: i32,
|
||||
pub avg_response_time_ms: Option<f64>,
|
||||
pub slowest_request_ms: Option<i64>,
|
||||
pub fastest_request_ms: Option<i64>,
|
||||
pub etag_matches: i32,
|
||||
pub etag_mismatches: i32,
|
||||
pub cache_hits: i32,
|
||||
pub cache_misses: i32,
|
||||
pub status: String,
|
||||
pub skip_reason: Option<String>,
|
||||
pub error_message: Option<String>,
|
||||
pub created_at: DateTime<Utc>,
|
||||
}
|
||||
|
||||
/// Request to create an HTTP request metric record
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct CreateWebDAVRequestMetric {
|
||||
pub session_id: Option<Uuid>,
|
||||
pub directory_metric_id: Option<Uuid>,
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub request_type: WebDAVRequestType,
|
||||
pub operation_type: WebDAVOperationType,
|
||||
pub target_path: String,
|
||||
pub duration_ms: i64,
|
||||
pub request_size_bytes: Option<i64>,
|
||||
pub response_size_bytes: Option<i64>,
|
||||
pub http_status_code: Option<i32>,
|
||||
pub dns_lookup_ms: Option<i64>,
|
||||
pub tcp_connect_ms: Option<i64>,
|
||||
pub tls_handshake_ms: Option<i64>,
|
||||
pub time_to_first_byte_ms: Option<i64>,
|
||||
pub success: bool,
|
||||
pub retry_attempt: i32,
|
||||
pub error_type: Option<String>,
|
||||
pub error_message: Option<String>,
|
||||
pub server_header: Option<String>,
|
||||
pub dav_header: Option<String>,
|
||||
pub etag_value: Option<String>,
|
||||
pub last_modified: Option<DateTime<Utc>>,
|
||||
pub content_type: Option<String>,
|
||||
pub remote_ip: Option<String>,
|
||||
pub user_agent: Option<String>,
|
||||
}
|
||||
|
||||
/// WebDAV HTTP request metrics
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct WebDAVRequestMetric {
|
||||
pub id: Uuid,
|
||||
pub session_id: Option<Uuid>,
|
||||
pub directory_metric_id: Option<Uuid>,
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub request_type: String,
|
||||
pub operation_type: String,
|
||||
pub target_path: String,
|
||||
pub started_at: DateTime<Utc>,
|
||||
pub completed_at: Option<DateTime<Utc>>,
|
||||
pub duration_ms: i64,
|
||||
pub request_size_bytes: Option<i64>,
|
||||
pub response_size_bytes: Option<i64>,
|
||||
pub http_status_code: Option<i32>,
|
||||
pub dns_lookup_ms: Option<i64>,
|
||||
pub tcp_connect_ms: Option<i64>,
|
||||
pub tls_handshake_ms: Option<i64>,
|
||||
pub time_to_first_byte_ms: Option<i64>,
|
||||
pub success: bool,
|
||||
pub retry_attempt: i32,
|
||||
pub error_type: Option<String>,
|
||||
pub error_message: Option<String>,
|
||||
pub server_header: Option<String>,
|
||||
pub dav_header: Option<String>,
|
||||
pub etag_value: Option<String>,
|
||||
pub last_modified: Option<DateTime<Utc>>,
|
||||
pub content_type: Option<String>,
|
||||
pub remote_ip: Option<String>,
|
||||
pub user_agent: Option<String>,
|
||||
}
|
||||
|
||||
/// Summary metrics for WebDAV operations
|
||||
#[derive(Debug, Clone, FromRow, Serialize, Deserialize, ToSchema)]
|
||||
pub struct WebDAVMetricsSummary {
|
||||
pub total_sessions: i32,
|
||||
pub successful_sessions: i32,
|
||||
pub failed_sessions: i32,
|
||||
pub total_files_processed: i64,
|
||||
pub total_bytes_processed: i64,
|
||||
pub avg_session_duration_sec: f64,
|
||||
pub avg_processing_rate: f64,
|
||||
pub total_http_requests: i64,
|
||||
pub request_success_rate: f64,
|
||||
pub avg_request_duration_ms: f64,
|
||||
pub common_error_types: serde_json::Value,
|
||||
}
|
||||
|
||||
/// Request parameters for querying WebDAV metrics
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema, IntoParams)]
|
||||
pub struct WebDAVMetricsQuery {
|
||||
pub user_id: Option<Uuid>,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub start_time: Option<DateTime<Utc>>,
|
||||
pub end_time: Option<DateTime<Utc>>,
|
||||
pub limit: Option<i32>,
|
||||
pub offset: Option<i32>,
|
||||
}
|
||||
|
||||
/// Performance insights for WebDAV operations
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct WebDAVPerformanceInsights {
|
||||
pub session_id: Uuid,
|
||||
pub avg_directory_scan_time_ms: f64,
|
||||
pub slowest_directories: Vec<SlowDirectoryInfo>,
|
||||
pub request_distribution: RequestTypeDistribution,
|
||||
pub error_analysis: ErrorAnalysis,
|
||||
pub performance_trends: PerformanceTrends,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct SlowDirectoryInfo {
|
||||
pub path: String,
|
||||
pub scan_duration_ms: i64,
|
||||
pub files_count: i32,
|
||||
pub size_bytes: i64,
|
||||
pub error_count: i32,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct RequestTypeDistribution {
|
||||
pub propfind_count: i32,
|
||||
pub get_count: i32,
|
||||
pub head_count: i32,
|
||||
pub options_count: i32,
|
||||
pub total_count: i32,
|
||||
pub avg_propfind_duration_ms: f64,
|
||||
pub avg_get_duration_ms: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct ErrorAnalysis {
|
||||
pub total_errors: i32,
|
||||
pub network_errors: i32,
|
||||
pub auth_errors: i32,
|
||||
pub timeout_errors: i32,
|
||||
pub server_errors: i32,
|
||||
pub most_problematic_paths: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct PerformanceTrends {
|
||||
pub requests_per_minute: Vec<f64>,
|
||||
pub avg_response_time_trend: Vec<f64>,
|
||||
pub error_rate_trend: Vec<f64>,
|
||||
pub throughput_mbps_trend: Vec<f64>,
|
||||
}
|
||||
|
||||
/// Update request for WebDAV sync session
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UpdateWebDAVSyncSession {
|
||||
pub directories_discovered: Option<i32>,
|
||||
pub directories_processed: Option<i32>,
|
||||
pub files_discovered: Option<i32>,
|
||||
pub files_processed: Option<i32>,
|
||||
pub total_bytes_discovered: Option<i64>,
|
||||
pub total_bytes_processed: Option<i64>,
|
||||
pub directories_skipped: Option<i32>,
|
||||
pub files_skipped: Option<i32>,
|
||||
pub skip_reasons: Option<serde_json::Value>,
|
||||
pub status: Option<WebDAVSyncStatus>,
|
||||
pub final_error_message: Option<String>,
|
||||
}
|
||||
|
||||
/// Update request for WebDAV directory metric
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, ToSchema)]
|
||||
pub struct UpdateWebDAVDirectoryMetric {
|
||||
pub files_found: Option<i32>,
|
||||
pub subdirectories_found: Option<i32>,
|
||||
pub total_size_bytes: Option<i64>,
|
||||
pub files_processed: Option<i32>,
|
||||
pub files_skipped: Option<i32>,
|
||||
pub files_failed: Option<i32>,
|
||||
pub http_requests_made: Option<i32>,
|
||||
pub propfind_requests: Option<i32>,
|
||||
pub get_requests: Option<i32>,
|
||||
pub errors_encountered: Option<i32>,
|
||||
pub error_types: Option<serde_json::Value>,
|
||||
pub warnings_count: Option<i32>,
|
||||
pub etag_matches: Option<i32>,
|
||||
pub etag_mismatches: Option<i32>,
|
||||
pub cache_hits: Option<i32>,
|
||||
pub cache_misses: Option<i32>,
|
||||
pub status: Option<String>,
|
||||
pub skip_reason: Option<String>,
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
@@ -14,5 +14,4 @@ pub mod source_errors;
|
||||
pub mod sources;
|
||||
pub mod users;
|
||||
pub mod webdav;
|
||||
pub mod webdav_metrics;
|
||||
pub mod webdav_scan_failures;
|
||||
@@ -649,120 +649,42 @@ async fn collect_security_metrics(state: &Arc<AppState>) -> Result<SecurityMetri
|
||||
}
|
||||
|
||||
async fn collect_webdav_metrics(state: &Arc<AppState>) -> Result<WebDAVMetrics, StatusCode> {
|
||||
// Get WebDAV session metrics for the last 24 hours
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct WebDAVStats {
|
||||
total_sessions: Option<i64>,
|
||||
successful_sessions: Option<i64>,
|
||||
failed_sessions: Option<i64>,
|
||||
total_files_processed: Option<i64>,
|
||||
total_bytes_processed: Option<i64>,
|
||||
avg_session_duration_sec: Option<f64>,
|
||||
avg_processing_rate: Option<f64>,
|
||||
total_http_requests: Option<i64>,
|
||||
request_success_rate: Option<f64>,
|
||||
avg_request_duration_ms: Option<f64>,
|
||||
}
|
||||
|
||||
let webdav_stats = sqlx::query_as::<_, WebDAVStats>(
|
||||
r#"
|
||||
SELECT
|
||||
COUNT(*) as total_sessions,
|
||||
COUNT(*) FILTER (WHERE status = 'completed') as successful_sessions,
|
||||
COUNT(*) FILTER (WHERE status = 'failed') as failed_sessions,
|
||||
COALESCE(SUM(files_processed)::BIGINT, 0) as total_files_processed,
|
||||
COALESCE(SUM(total_bytes_processed)::BIGINT, 0) as total_bytes_processed,
|
||||
COALESCE(AVG(duration_ms / 1000.0)::DOUBLE PRECISION, 0) as avg_session_duration_sec,
|
||||
COALESCE(AVG(processing_rate_files_per_sec)::DOUBLE PRECISION, 0) as avg_processing_rate,
|
||||
COALESCE(SUM(total_http_requests)::BIGINT, 0) as total_http_requests,
|
||||
CASE
|
||||
WHEN SUM(total_http_requests)::BIGINT > 0
|
||||
THEN (SUM(successful_requests)::BIGINT::DECIMAL / SUM(total_http_requests)::BIGINT * 100)::DOUBLE PRECISION
|
||||
ELSE 0::DOUBLE PRECISION
|
||||
END as request_success_rate,
|
||||
COALESCE(
|
||||
(SELECT AVG(duration_ms)::DOUBLE PRECISION FROM webdav_request_metrics
|
||||
WHERE started_at > NOW() - INTERVAL '24 hours'),
|
||||
0::DOUBLE PRECISION
|
||||
) as avg_request_duration_ms
|
||||
FROM webdav_sync_sessions
|
||||
WHERE started_at > NOW() - INTERVAL '24 hours'
|
||||
"#
|
||||
)
|
||||
.fetch_one(&state.db.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get WebDAV session metrics: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
// Get sessions active in last hour
|
||||
let sessions_last_hour = sqlx::query_scalar::<_, i64>(
|
||||
"SELECT COUNT(*) FROM webdav_sync_sessions WHERE started_at > NOW() - INTERVAL '1 hour'"
|
||||
)
|
||||
.fetch_one(&state.db.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get recent WebDAV sessions: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
// Calculate error rate for last hour
|
||||
#[derive(sqlx::FromRow)]
|
||||
struct ErrorRate {
|
||||
total_requests: Option<i64>,
|
||||
failed_requests: Option<i64>,
|
||||
}
|
||||
|
||||
let error_stats = sqlx::query_as::<_, ErrorRate>(
|
||||
r#"
|
||||
SELECT
|
||||
COUNT(*) as total_requests,
|
||||
COUNT(*) FILTER (WHERE success = false) as failed_requests
|
||||
FROM webdav_request_metrics
|
||||
WHERE started_at > NOW() - INTERVAL '1 hour'
|
||||
"#
|
||||
)
|
||||
.fetch_one(&state.db.pool)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get WebDAV error rates: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
let error_rate_last_hour = if let (Some(total), Some(failed)) = (error_stats.total_requests, error_stats.failed_requests) {
|
||||
if total > 0 {
|
||||
(failed as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
// Use the new simplified metrics collector
|
||||
if let Some(ref collector) = state.webdav_metrics_collector {
|
||||
let metrics = collector.get_prometheus_metrics().await;
|
||||
|
||||
// Convert from new PrometheusMetrics to old WebDAVMetrics struct for compatibility
|
||||
Ok(WebDAVMetrics {
|
||||
total_sessions: metrics.total_sessions as i64,
|
||||
successful_sessions: metrics.successful_sessions as i64,
|
||||
failed_sessions: metrics.failed_sessions as i64,
|
||||
success_rate: metrics.success_rate,
|
||||
total_files_processed: metrics.total_files_processed as i64,
|
||||
total_bytes_processed: metrics.total_bytes_processed as i64,
|
||||
avg_session_duration_sec: metrics.avg_session_duration_sec,
|
||||
avg_processing_rate: metrics.avg_processing_rate,
|
||||
total_http_requests: metrics.total_http_requests as i64,
|
||||
request_success_rate: metrics.request_success_rate,
|
||||
avg_request_duration_ms: metrics.avg_request_duration_ms,
|
||||
sessions_last_hour: metrics.sessions_last_hour as i64,
|
||||
error_rate_last_hour: metrics.error_rate_last_hour,
|
||||
})
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let total_sessions = webdav_stats.total_sessions.unwrap_or(0);
|
||||
let successful_sessions = webdav_stats.successful_sessions.unwrap_or(0);
|
||||
let failed_sessions = webdav_stats.failed_sessions.unwrap_or(0);
|
||||
|
||||
let success_rate = if total_sessions > 0 {
|
||||
(successful_sessions as f64 / total_sessions as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
Ok(WebDAVMetrics {
|
||||
total_sessions,
|
||||
successful_sessions,
|
||||
failed_sessions,
|
||||
success_rate,
|
||||
total_files_processed: webdav_stats.total_files_processed.unwrap_or(0),
|
||||
total_bytes_processed: webdav_stats.total_bytes_processed.unwrap_or(0),
|
||||
avg_session_duration_sec: webdav_stats.avg_session_duration_sec.unwrap_or(0.0),
|
||||
avg_processing_rate: webdav_stats.avg_processing_rate.unwrap_or(0.0),
|
||||
total_http_requests: webdav_stats.total_http_requests.unwrap_or(0),
|
||||
request_success_rate: webdav_stats.request_success_rate.unwrap_or(0.0),
|
||||
avg_request_duration_ms: webdav_stats.avg_request_duration_ms.unwrap_or(0.0),
|
||||
sessions_last_hour,
|
||||
error_rate_last_hour,
|
||||
})
|
||||
// Return empty metrics if collector not available
|
||||
Ok(WebDAVMetrics {
|
||||
total_sessions: 0,
|
||||
successful_sessions: 0,
|
||||
failed_sessions: 0,
|
||||
success_rate: 0.0,
|
||||
total_files_processed: 0,
|
||||
total_bytes_processed: 0,
|
||||
avg_session_duration_sec: 0.0,
|
||||
avg_processing_rate: 0.0,
|
||||
total_http_requests: 0,
|
||||
request_success_rate: 0.0,
|
||||
avg_request_duration_ms: 0.0,
|
||||
sessions_last_hour: 0,
|
||||
error_rate_last_hour: 0.0,
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -1,627 +0,0 @@
|
||||
use axum::{
|
||||
extract::{Path, Query, State},
|
||||
http::StatusCode,
|
||||
response::Json,
|
||||
routing::get,
|
||||
Router,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use utoipa::{ToSchema, IntoParams};
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::{auth::AuthUser, AppState};
|
||||
use crate::models::webdav_metrics::*;
|
||||
use crate::services::webdav_metrics_tracker::WebDAVMetricsTracker;
|
||||
|
||||
/// Validate and normalize a limit parameter
|
||||
fn validate_limit(limit: Option<i32>) -> Option<i32> {
|
||||
match limit {
|
||||
Some(l) if l < 1 => {
|
||||
tracing::warn!("Invalid limit parameter: {} (must be at least 1)", l);
|
||||
None
|
||||
}
|
||||
Some(l) if l > 1000 => {
|
||||
tracing::warn!("Limit parameter {} exceeds maximum, capping at 1000", l);
|
||||
Some(1000)
|
||||
}
|
||||
Some(l) => Some(l),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Validate and normalize an offset parameter
|
||||
fn validate_offset(offset: Option<i32>) -> Option<i32> {
|
||||
match offset {
|
||||
Some(o) if o < 0 => {
|
||||
tracing::warn!("Invalid offset parameter: {} (must be non-negative)", o);
|
||||
None
|
||||
}
|
||||
Some(o) => Some(o),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn router() -> Router<Arc<AppState>> {
|
||||
Router::new()
|
||||
.route("/sessions", get(list_webdav_sessions))
|
||||
.route("/sessions/{session_id}", get(get_webdav_session))
|
||||
.route("/sessions/{session_id}/insights", get(get_session_performance_insights))
|
||||
.route("/sessions/{session_id}/directories", get(get_session_directory_metrics))
|
||||
.route("/sessions/{session_id}/requests", get(get_session_request_metrics))
|
||||
.route("/summary", get(get_webdav_metrics_summary))
|
||||
.route("/performance", get(get_webdav_performance_overview))
|
||||
}
|
||||
|
||||
/// Query parameters for listing WebDAV sessions
|
||||
#[derive(Debug, Deserialize, ToSchema, IntoParams)]
|
||||
pub struct ListSessionsQuery {
|
||||
pub source_id: Option<Uuid>,
|
||||
pub start_time: Option<chrono::DateTime<chrono::Utc>>,
|
||||
pub end_time: Option<chrono::DateTime<chrono::Utc>>,
|
||||
pub limit: Option<i32>,
|
||||
pub offset: Option<i32>,
|
||||
}
|
||||
|
||||
impl ListSessionsQuery {
|
||||
/// Validate and normalize query parameters
|
||||
pub fn validate(&self) -> Result<Self, String> {
|
||||
// Validate limit
|
||||
let limit = match self.limit {
|
||||
Some(l) if l < 1 => return Err("limit must be at least 1".to_string()),
|
||||
Some(l) if l > 1000 => return Err("limit cannot exceed 1000".to_string()),
|
||||
Some(l) => Some(l),
|
||||
None => None,
|
||||
};
|
||||
|
||||
// Validate offset
|
||||
let offset = match self.offset {
|
||||
Some(o) if o < 0 => return Err("offset must be non-negative".to_string()),
|
||||
Some(o) => Some(o),
|
||||
None => None,
|
||||
};
|
||||
|
||||
Ok(Self {
|
||||
source_id: self.source_id,
|
||||
start_time: self.start_time,
|
||||
end_time: self.end_time,
|
||||
limit,
|
||||
offset,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Query parameters for metrics summary
|
||||
#[derive(Debug, Deserialize, ToSchema, IntoParams)]
|
||||
pub struct MetricsSummaryQuery {
|
||||
pub source_id: Option<Uuid>,
|
||||
pub start_time: Option<chrono::DateTime<chrono::Utc>>,
|
||||
pub end_time: Option<chrono::DateTime<chrono::Utc>>,
|
||||
}
|
||||
|
||||
/// Performance overview response
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
pub struct WebDAVPerformanceOverview {
|
||||
pub recent_sessions: Vec<WebDAVSyncSession>,
|
||||
pub summary_stats: WebDAVMetricsSummary,
|
||||
pub top_slow_directories: Vec<SlowDirectoryInfo>,
|
||||
pub error_trends: ErrorTrendData,
|
||||
pub performance_recommendations: Vec<PerformanceRecommendation>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
pub struct ErrorTrendData {
|
||||
pub total_errors_last_24h: i32,
|
||||
pub error_rate_trend: f64, // Percentage change from previous period
|
||||
pub common_error_types: Vec<ErrorTypeCount>,
|
||||
pub most_problematic_sources: Vec<ProblematicSource>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
pub struct ErrorTypeCount {
|
||||
pub error_type: String,
|
||||
pub count: i32,
|
||||
pub percentage: f64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
pub struct ProblematicSource {
|
||||
pub source_id: Option<Uuid>,
|
||||
pub source_name: Option<String>,
|
||||
pub error_count: i32,
|
||||
pub last_error: chrono::DateTime<chrono::Utc>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, ToSchema)]
|
||||
pub struct PerformanceRecommendation {
|
||||
pub category: String,
|
||||
pub title: String,
|
||||
pub description: String,
|
||||
pub priority: String, // "high", "medium", "low"
|
||||
pub potential_impact: String,
|
||||
}
|
||||
|
||||
/// List WebDAV sync sessions for the current user
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/sessions",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(ListSessionsQuery),
|
||||
responses(
|
||||
(status = 200, description = "List of WebDAV sync sessions", body = Vec<WebDAVSyncSession>),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn list_webdav_sessions(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Query(query): Query<ListSessionsQuery>,
|
||||
) -> Result<Json<Vec<WebDAVSyncSession>>, StatusCode> {
|
||||
// Validate query parameters
|
||||
let validated_query = query.validate().map_err(|e| {
|
||||
tracing::warn!("Invalid query parameters: {}", e);
|
||||
StatusCode::BAD_REQUEST
|
||||
})?;
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
let metrics_query = WebDAVMetricsQuery {
|
||||
user_id: Some(auth_user.user.id),
|
||||
source_id: validated_query.source_id,
|
||||
start_time: validated_query.start_time,
|
||||
end_time: validated_query.end_time,
|
||||
limit: validated_query.limit,
|
||||
offset: validated_query.offset,
|
||||
};
|
||||
|
||||
let sessions = metrics_tracker
|
||||
.list_sessions(&metrics_query)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to list WebDAV sessions: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
Ok(Json(sessions))
|
||||
}
|
||||
|
||||
/// Get details for a specific WebDAV sync session
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/sessions/{session_id}",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(
|
||||
("session_id" = Uuid, Path, description = "Session ID")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "WebDAV sync session details", body = WebDAVSyncSession),
|
||||
(status = 404, description = "Session not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn get_webdav_session(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Path(session_id): Path<Uuid>,
|
||||
) -> Result<Json<WebDAVSyncSession>, StatusCode> {
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
let session = metrics_tracker
|
||||
.get_session_details(session_id, auth_user.user.id)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get WebDAV session {}: {}", session_id, e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?
|
||||
.ok_or(StatusCode::NOT_FOUND)?;
|
||||
|
||||
Ok(Json(session))
|
||||
}
|
||||
|
||||
/// Get performance insights for a specific session
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/sessions/{session_id}/insights",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(
|
||||
("session_id" = Uuid, Path, description = "Session ID")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Session performance insights", body = WebDAVPerformanceInsights),
|
||||
(status = 404, description = "Session not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn get_session_performance_insights(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Path(session_id): Path<Uuid>,
|
||||
) -> Result<Json<WebDAVPerformanceInsights>, StatusCode> {
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
let insights = metrics_tracker
|
||||
.get_performance_insights(session_id, auth_user.user.id)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get performance insights for session {}: {}", session_id, e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?
|
||||
.ok_or(StatusCode::NOT_FOUND)?;
|
||||
|
||||
Ok(Json(insights))
|
||||
}
|
||||
|
||||
/// Get directory metrics for a specific session
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/sessions/{session_id}/directories",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(
|
||||
("session_id" = Uuid, Path, description = "Session ID")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "Directory metrics for the session", body = Vec<WebDAVDirectoryMetric>),
|
||||
(status = 404, description = "Session not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn get_session_directory_metrics(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Path(session_id): Path<Uuid>,
|
||||
) -> Result<Json<Vec<WebDAVDirectoryMetric>>, StatusCode> {
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
let directory_metrics = metrics_tracker
|
||||
.get_directory_metrics(session_id, auth_user.user.id)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get directory metrics for session {}: {}", session_id, e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
Ok(Json(directory_metrics))
|
||||
}
|
||||
|
||||
/// Get request metrics for a specific session
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/sessions/{session_id}/requests",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(
|
||||
("session_id" = Uuid, Path, description = "Session ID"),
|
||||
("limit" = Option<i32>, Query, description = "Maximum number of requests to return")
|
||||
),
|
||||
responses(
|
||||
(status = 200, description = "HTTP request metrics for the session", body = Vec<WebDAVRequestMetric>),
|
||||
(status = 404, description = "Session not found"),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn get_session_request_metrics(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Path(session_id): Path<Uuid>,
|
||||
Query(query): Query<serde_json::Value>,
|
||||
) -> Result<Json<Vec<WebDAVRequestMetric>>, StatusCode> {
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
let limit = query.get("limit")
|
||||
.and_then(|v| v.as_i64())
|
||||
.map(|v| v as i32)
|
||||
.and_then(|l| {
|
||||
if l < 1 {
|
||||
tracing::warn!("Invalid limit parameter: {} (must be at least 1)", l);
|
||||
None
|
||||
} else if l > 1000 {
|
||||
tracing::warn!("Invalid limit parameter: {} (cannot exceed 1000)", l);
|
||||
Some(1000) // Cap at maximum allowed
|
||||
} else {
|
||||
Some(l)
|
||||
}
|
||||
});
|
||||
|
||||
let request_metrics = metrics_tracker
|
||||
.get_request_metrics(Some(session_id), None, auth_user.user.id, limit)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get request metrics for session {}: {}", session_id, e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
Ok(Json(request_metrics))
|
||||
}
|
||||
|
||||
/// Get WebDAV metrics summary
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/summary",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(MetricsSummaryQuery),
|
||||
responses(
|
||||
(status = 200, description = "WebDAV metrics summary", body = WebDAVMetricsSummary),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn get_webdav_metrics_summary(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Query(query): Query<MetricsSummaryQuery>,
|
||||
) -> Result<Json<WebDAVMetricsSummary>, StatusCode> {
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
let metrics_query = WebDAVMetricsQuery {
|
||||
user_id: Some(auth_user.user.id),
|
||||
source_id: query.source_id,
|
||||
start_time: query.start_time,
|
||||
end_time: query.end_time,
|
||||
limit: None,
|
||||
offset: None,
|
||||
};
|
||||
|
||||
let summary = metrics_tracker
|
||||
.get_metrics_summary(&metrics_query)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get WebDAV metrics summary: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?
|
||||
.unwrap_or_else(|| {
|
||||
// Return empty summary if no data found
|
||||
WebDAVMetricsSummary {
|
||||
total_sessions: 0,
|
||||
successful_sessions: 0,
|
||||
failed_sessions: 0,
|
||||
total_files_processed: 0,
|
||||
total_bytes_processed: 0,
|
||||
avg_session_duration_sec: 0.0,
|
||||
avg_processing_rate: 0.0,
|
||||
total_http_requests: 0,
|
||||
request_success_rate: 0.0,
|
||||
avg_request_duration_ms: 0.0,
|
||||
common_error_types: serde_json::json!([]),
|
||||
}
|
||||
});
|
||||
|
||||
Ok(Json(summary))
|
||||
}
|
||||
|
||||
/// Get comprehensive WebDAV performance overview
|
||||
#[utoipa::path(
|
||||
get,
|
||||
path = "/api/webdav-metrics/performance",
|
||||
tag = "webdav-metrics",
|
||||
security(("bearer_auth" = [])),
|
||||
params(MetricsSummaryQuery),
|
||||
responses(
|
||||
(status = 200, description = "WebDAV performance overview", body = WebDAVPerformanceOverview),
|
||||
(status = 401, description = "Unauthorized"),
|
||||
(status = 500, description = "Internal server error")
|
||||
)
|
||||
)]
|
||||
pub async fn get_webdav_performance_overview(
|
||||
State(state): State<Arc<AppState>>,
|
||||
auth_user: AuthUser,
|
||||
Query(query): Query<MetricsSummaryQuery>,
|
||||
) -> Result<Json<WebDAVPerformanceOverview>, StatusCode> {
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(state.db.clone());
|
||||
|
||||
// Get recent sessions (last 10) - enforce reasonable limit
|
||||
let limited_sessions_limit = Some(10);
|
||||
let recent_sessions_query = WebDAVMetricsQuery {
|
||||
user_id: Some(auth_user.user.id),
|
||||
source_id: query.source_id,
|
||||
start_time: query.start_time,
|
||||
end_time: query.end_time,
|
||||
limit: limited_sessions_limit,
|
||||
offset: None,
|
||||
};
|
||||
|
||||
let recent_sessions = metrics_tracker
|
||||
.list_sessions(&recent_sessions_query)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get recent WebDAV sessions: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?;
|
||||
|
||||
// Get summary stats
|
||||
let summary_query = WebDAVMetricsQuery {
|
||||
user_id: Some(auth_user.user.id),
|
||||
source_id: query.source_id,
|
||||
start_time: query.start_time,
|
||||
end_time: query.end_time,
|
||||
limit: None,
|
||||
offset: None,
|
||||
};
|
||||
|
||||
let summary_stats = metrics_tracker
|
||||
.get_metrics_summary(&summary_query)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
tracing::error!("Failed to get WebDAV metrics summary: {}", e);
|
||||
StatusCode::INTERNAL_SERVER_ERROR
|
||||
})?
|
||||
.unwrap_or_else(|| WebDAVMetricsSummary {
|
||||
total_sessions: 0,
|
||||
successful_sessions: 0,
|
||||
failed_sessions: 0,
|
||||
total_files_processed: 0,
|
||||
total_bytes_processed: 0,
|
||||
avg_session_duration_sec: 0.0,
|
||||
avg_processing_rate: 0.0,
|
||||
total_http_requests: 0,
|
||||
request_success_rate: 0.0,
|
||||
avg_request_duration_ms: 0.0,
|
||||
common_error_types: serde_json::json!([]),
|
||||
});
|
||||
|
||||
// Analyze performance and generate recommendations
|
||||
let top_slow_directories = get_slow_directories_for_user(&recent_sessions, &metrics_tracker, auth_user.user.id).await;
|
||||
let error_trends = analyze_error_trends(&summary_stats);
|
||||
let performance_recommendations = generate_performance_recommendations(&summary_stats, &recent_sessions);
|
||||
|
||||
let overview = WebDAVPerformanceOverview {
|
||||
recent_sessions,
|
||||
summary_stats,
|
||||
top_slow_directories,
|
||||
error_trends,
|
||||
performance_recommendations,
|
||||
};
|
||||
|
||||
Ok(Json(overview))
|
||||
}
|
||||
|
||||
/// Helper function to get slow directories across recent sessions
|
||||
async fn get_slow_directories_for_user(
|
||||
sessions: &[WebDAVSyncSession],
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
user_id: Uuid,
|
||||
) -> Vec<SlowDirectoryInfo> {
|
||||
let mut all_slow_directories = Vec::new();
|
||||
|
||||
for session in sessions.iter().take(5) { // Check last 5 sessions
|
||||
if let Ok(Some(insights)) = metrics_tracker
|
||||
.get_performance_insights(session.id, user_id)
|
||||
.await
|
||||
{
|
||||
all_slow_directories.extend(insights.slowest_directories);
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by scan duration and take top 10
|
||||
all_slow_directories.sort_by(|a, b| b.scan_duration_ms.cmp(&a.scan_duration_ms));
|
||||
all_slow_directories.into_iter().take(10).collect()
|
||||
}
|
||||
|
||||
/// Analyze error trends from summary stats
|
||||
fn analyze_error_trends(summary: &WebDAVMetricsSummary) -> ErrorTrendData {
|
||||
let total_requests = summary.total_http_requests as f64;
|
||||
let failed_requests = total_requests - (total_requests * summary.request_success_rate / 100.0);
|
||||
|
||||
let common_error_types = if let Some(error_array) = summary.common_error_types.as_array() {
|
||||
error_array
|
||||
.iter()
|
||||
.filter_map(|v| {
|
||||
let obj = v.as_object()?;
|
||||
let error_type = obj.get("error_type")?.as_str()?.to_string();
|
||||
let count = obj.get("count")?.as_i64()? as i32;
|
||||
let percentage = if failed_requests > 0.0 {
|
||||
(count as f64 / failed_requests) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
Some(ErrorTypeCount {
|
||||
error_type,
|
||||
count,
|
||||
percentage,
|
||||
})
|
||||
})
|
||||
.collect()
|
||||
} else {
|
||||
Vec::new()
|
||||
};
|
||||
|
||||
ErrorTrendData {
|
||||
total_errors_last_24h: failed_requests as i32,
|
||||
error_rate_trend: 0.0, // Would calculate from historical data
|
||||
common_error_types,
|
||||
most_problematic_sources: Vec::new(), // Would analyze by source
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate performance recommendations based on metrics
|
||||
fn generate_performance_recommendations(
|
||||
summary: &WebDAVMetricsSummary,
|
||||
sessions: &[WebDAVSyncSession],
|
||||
) -> Vec<PerformanceRecommendation> {
|
||||
let mut recommendations = Vec::new();
|
||||
|
||||
// Analyze success rate
|
||||
let success_rate = summary.request_success_rate;
|
||||
if success_rate < 90.0 {
|
||||
recommendations.push(PerformanceRecommendation {
|
||||
category: "reliability".to_string(),
|
||||
title: "Low Success Rate Detected".to_string(),
|
||||
description: format!(
|
||||
"Your WebDAV requests have a {:.1}% success rate. Consider checking network connectivity and server configuration.",
|
||||
success_rate
|
||||
),
|
||||
priority: if success_rate < 70.0 { "high" } else { "medium" }.to_string(),
|
||||
potential_impact: "Improved sync reliability and reduced failures".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Analyze response times
|
||||
let avg_response_time = summary.avg_request_duration_ms;
|
||||
if avg_response_time > 2000.0 {
|
||||
recommendations.push(PerformanceRecommendation {
|
||||
category: "performance".to_string(),
|
||||
title: "Slow Response Times".to_string(),
|
||||
description: format!(
|
||||
"Average request time is {:.0}ms. Consider checking network conditions or server performance.",
|
||||
avg_response_time
|
||||
),
|
||||
priority: if avg_response_time > 5000.0 { "high" } else { "medium" }.to_string(),
|
||||
potential_impact: "Faster sync operations and improved user experience".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Analyze session patterns
|
||||
let recent_failed_sessions = sessions.iter()
|
||||
.filter(|s| s.status == "failed")
|
||||
.count();
|
||||
|
||||
if recent_failed_sessions > sessions.len() / 4 {
|
||||
recommendations.push(PerformanceRecommendation {
|
||||
category: "reliability".to_string(),
|
||||
title: "Frequent Sync Failures".to_string(),
|
||||
description: format!(
|
||||
"{} of your last {} sync sessions failed. Review error logs and server connectivity.",
|
||||
recent_failed_sessions, sessions.len()
|
||||
),
|
||||
priority: "high".to_string(),
|
||||
potential_impact: "More reliable syncing and data consistency".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Processing rate analysis
|
||||
let avg_processing_rate = summary.avg_processing_rate;
|
||||
if avg_processing_rate < 1.0 && summary.total_files_processed > 0 {
|
||||
recommendations.push(PerformanceRecommendation {
|
||||
category: "performance".to_string(),
|
||||
title: "Low Processing Rate".to_string(),
|
||||
description: format!(
|
||||
"Processing rate is {:.2} files/second. Consider optimizing file selection or increasing concurrency.",
|
||||
avg_processing_rate
|
||||
),
|
||||
priority: "medium".to_string(),
|
||||
potential_impact: "Faster sync completion times".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// If no recommendations, add a positive note
|
||||
if recommendations.is_empty() {
|
||||
recommendations.push(PerformanceRecommendation {
|
||||
category: "general".to_string(),
|
||||
title: "Good Performance".to_string(),
|
||||
description: "Your WebDAV sync operations are performing well with good success rates and response times.".to_string(),
|
||||
priority: "low".to_string(),
|
||||
potential_impact: "Continue monitoring for optimal performance".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
recommendations
|
||||
}
|
||||
@@ -9,4 +9,5 @@ pub mod source_error_tracker;
|
||||
pub mod sync_progress_tracker;
|
||||
pub mod user_watch_service;
|
||||
pub mod webdav;
|
||||
pub mod webdav_metrics_tracker;
|
||||
pub mod webdav_metrics_simple;
|
||||
pub mod webdav_metrics_integration;
|
||||
@@ -1,455 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use std::collections::HashMap;
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::models::{
|
||||
ErrorSourceType, SourceErrorType, SourceErrorSeverity, SourceErrorClassifier,
|
||||
ErrorContext, ErrorClassification, SourceScanFailure, RetryStrategy,
|
||||
};
|
||||
use crate::models::source::{
|
||||
WebDAVScanFailureType, WebDAVScanFailureSeverity,
|
||||
};
|
||||
|
||||
/// WebDAV-specific error classifier that maps WebDAV errors to the generic system
|
||||
pub struct WebDAVErrorClassifier;
|
||||
|
||||
impl WebDAVErrorClassifier {
|
||||
pub fn new() -> Self {
|
||||
Self
|
||||
}
|
||||
|
||||
/// Map WebDAV-specific error types to generic error types
|
||||
fn map_webdav_error_type(webdav_type: &WebDAVScanFailureType) -> SourceErrorType {
|
||||
match webdav_type {
|
||||
WebDAVScanFailureType::Timeout => SourceErrorType::Timeout,
|
||||
WebDAVScanFailureType::PathTooLong => SourceErrorType::PathTooLong,
|
||||
WebDAVScanFailureType::PermissionDenied => SourceErrorType::PermissionDenied,
|
||||
WebDAVScanFailureType::InvalidCharacters => SourceErrorType::InvalidCharacters,
|
||||
WebDAVScanFailureType::NetworkError => SourceErrorType::NetworkError,
|
||||
WebDAVScanFailureType::ServerError => SourceErrorType::ServerError,
|
||||
WebDAVScanFailureType::XmlParseError => SourceErrorType::XmlParseError,
|
||||
WebDAVScanFailureType::TooManyItems => SourceErrorType::TooManyItems,
|
||||
WebDAVScanFailureType::DepthLimit => SourceErrorType::DepthLimit,
|
||||
WebDAVScanFailureType::SizeLimit => SourceErrorType::SizeLimit,
|
||||
WebDAVScanFailureType::Unknown => SourceErrorType::Unknown,
|
||||
}
|
||||
}
|
||||
|
||||
/// Map WebDAV-specific severity to generic severity
|
||||
fn map_webdav_severity(webdav_severity: &WebDAVScanFailureSeverity) -> SourceErrorSeverity {
|
||||
match webdav_severity {
|
||||
WebDAVScanFailureSeverity::Low => SourceErrorSeverity::Low,
|
||||
WebDAVScanFailureSeverity::Medium => SourceErrorSeverity::Medium,
|
||||
WebDAVScanFailureSeverity::High => SourceErrorSeverity::High,
|
||||
WebDAVScanFailureSeverity::Critical => SourceErrorSeverity::Critical,
|
||||
}
|
||||
}
|
||||
|
||||
/// Classify WebDAV error using the original logic from error_tracking.rs
|
||||
fn classify_webdav_error_type(&self, error: &anyhow::Error) -> WebDAVScanFailureType {
|
||||
let error_str = error.to_string().to_lowercase();
|
||||
|
||||
// Check for specific error patterns (from original WebDAV error tracking)
|
||||
if error_str.contains("timeout") || error_str.contains("timed out") {
|
||||
WebDAVScanFailureType::Timeout
|
||||
} else if error_str.contains("name too long") || error_str.contains("path too long") {
|
||||
WebDAVScanFailureType::PathTooLong
|
||||
} else if error_str.contains("permission denied") || error_str.contains("forbidden") || error_str.contains("401") || error_str.contains("403") {
|
||||
WebDAVScanFailureType::PermissionDenied
|
||||
} else if error_str.contains("invalid character") || error_str.contains("illegal character") {
|
||||
WebDAVScanFailureType::InvalidCharacters
|
||||
} else if error_str.contains("connection refused") || error_str.contains("network") || error_str.contains("dns") {
|
||||
WebDAVScanFailureType::NetworkError
|
||||
} else if error_str.contains("500") || error_str.contains("502") || error_str.contains("503") || error_str.contains("504") {
|
||||
WebDAVScanFailureType::ServerError
|
||||
} else if error_str.contains("xml") || error_str.contains("parse") || error_str.contains("malformed") {
|
||||
WebDAVScanFailureType::XmlParseError
|
||||
} else if error_str.contains("too many") || error_str.contains("limit exceeded") {
|
||||
WebDAVScanFailureType::TooManyItems
|
||||
} else if error_str.contains("depth") || error_str.contains("nested") {
|
||||
WebDAVScanFailureType::DepthLimit
|
||||
} else if error_str.contains("size") || error_str.contains("too large") || error_str.contains("507") || error_str.contains("insufficient storage") || error_str.contains("quota exceeded") {
|
||||
WebDAVScanFailureType::SizeLimit
|
||||
} else if error_str.contains("404") || error_str.contains("not found") {
|
||||
WebDAVScanFailureType::ServerError // Will be further classified by HTTP status
|
||||
} else if error_str.contains("405") || error_str.contains("method not allowed") || error_str.contains("propfind not allowed") {
|
||||
WebDAVScanFailureType::ServerError // Method not allowed - likely PROPFIND disabled
|
||||
} else if error_str.contains("423") || error_str.contains("locked") || error_str.contains("lock") {
|
||||
WebDAVScanFailureType::ServerError // Resource locked
|
||||
} else {
|
||||
WebDAVScanFailureType::Unknown
|
||||
}
|
||||
}
|
||||
|
||||
/// Classify WebDAV error severity using original logic
|
||||
fn classify_webdav_severity(&self,
|
||||
webdav_type: &WebDAVScanFailureType,
|
||||
http_status: Option<i32>,
|
||||
failure_count: i32,
|
||||
) -> WebDAVScanFailureSeverity {
|
||||
match webdav_type {
|
||||
WebDAVScanFailureType::PathTooLong |
|
||||
WebDAVScanFailureType::InvalidCharacters => WebDAVScanFailureSeverity::Critical,
|
||||
|
||||
WebDAVScanFailureType::PermissionDenied |
|
||||
WebDAVScanFailureType::XmlParseError |
|
||||
WebDAVScanFailureType::TooManyItems |
|
||||
WebDAVScanFailureType::DepthLimit |
|
||||
WebDAVScanFailureType::SizeLimit => WebDAVScanFailureSeverity::High,
|
||||
|
||||
WebDAVScanFailureType::Timeout |
|
||||
WebDAVScanFailureType::ServerError => {
|
||||
if let Some(code) = http_status {
|
||||
if code == 404 {
|
||||
WebDAVScanFailureSeverity::Critical
|
||||
} else if code >= 500 {
|
||||
WebDAVScanFailureSeverity::Medium
|
||||
} else {
|
||||
WebDAVScanFailureSeverity::Medium
|
||||
}
|
||||
} else {
|
||||
WebDAVScanFailureSeverity::Medium
|
||||
}
|
||||
},
|
||||
|
||||
WebDAVScanFailureType::NetworkError => WebDAVScanFailureSeverity::Low,
|
||||
|
||||
WebDAVScanFailureType::Unknown => {
|
||||
// Escalate severity based on failure count for unknown errors
|
||||
if failure_count > 5 {
|
||||
WebDAVScanFailureSeverity::High
|
||||
} else {
|
||||
WebDAVScanFailureSeverity::Medium
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract HTTP status code from error (from original WebDAV error tracking)
|
||||
fn extract_http_status(&self, error: &anyhow::Error) -> Option<i32> {
|
||||
let error_str = error.to_string();
|
||||
|
||||
// Look for common HTTP status code patterns including WebDAV-specific codes
|
||||
if error_str.contains("404") {
|
||||
Some(404)
|
||||
} else if error_str.contains("401") {
|
||||
Some(401)
|
||||
} else if error_str.contains("403") {
|
||||
Some(403)
|
||||
} else if error_str.contains("405") {
|
||||
Some(405) // Method Not Allowed (PROPFIND disabled)
|
||||
} else if error_str.contains("423") {
|
||||
Some(423) // Locked
|
||||
} else if error_str.contains("500") {
|
||||
Some(500)
|
||||
} else if error_str.contains("502") {
|
||||
Some(502)
|
||||
} else if error_str.contains("503") {
|
||||
Some(503)
|
||||
} else if error_str.contains("504") {
|
||||
Some(504)
|
||||
} else if error_str.contains("507") {
|
||||
Some(507) // Insufficient Storage
|
||||
} else {
|
||||
// Try to extract any 3-digit number that looks like an HTTP status
|
||||
let re = regex::Regex::new(r"\b([4-5]\d{2})\b").ok()?;
|
||||
re.captures(&error_str)
|
||||
.and_then(|cap| cap.get(1))
|
||||
.and_then(|m| m.as_str().parse::<i32>().ok())
|
||||
}
|
||||
}
|
||||
|
||||
/// Extract error code if present (from original WebDAV error tracking)
|
||||
fn extract_error_code(&self, error: &anyhow::Error) -> Option<String> {
|
||||
let error_str = error.to_string();
|
||||
|
||||
// Look for common error code patterns
|
||||
if let Some(caps) = regex::Regex::new(r"(?i)error[:\s]+([A-Z0-9_]+)")
|
||||
.ok()
|
||||
.and_then(|re| re.captures(&error_str))
|
||||
{
|
||||
return caps.get(1).map(|m| m.as_str().to_string());
|
||||
}
|
||||
|
||||
// Look for OS error codes
|
||||
if let Some(caps) = regex::Regex::new(r"(?i)os error (\d+)")
|
||||
.ok()
|
||||
.and_then(|re| re.captures(&error_str))
|
||||
{
|
||||
return caps.get(1).map(|m| format!("OS_{}", m.as_str()));
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Try to estimate item count from error message (from original WebDAV error tracking)
|
||||
fn estimate_item_count_from_error(&self, error: &anyhow::Error) -> Option<i32> {
|
||||
let error_str = error.to_string();
|
||||
|
||||
// Look for patterns like "1000 items", "contains 500 files", etc.
|
||||
if let Some(caps) = regex::Regex::new(r"(\d+)\s*(?:items?|files?|directories|folders?|entries)")
|
||||
.ok()
|
||||
.and_then(|re| re.captures(&error_str))
|
||||
{
|
||||
return caps.get(1)
|
||||
.and_then(|m| m.as_str().parse::<i32>().ok());
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Build WebDAV-specific diagnostic data
|
||||
fn build_webdav_diagnostics(&self, error: &anyhow::Error, context: &ErrorContext) -> serde_json::Value {
|
||||
let mut diagnostics = serde_json::json!({
|
||||
"error_chain": format!("{:?}", error),
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
"webdav_specific": true,
|
||||
});
|
||||
|
||||
// Add stack trace if available
|
||||
let backtrace = error.backtrace().to_string();
|
||||
if !backtrace.is_empty() && backtrace != "disabled backtrace" {
|
||||
diagnostics["backtrace"] = serde_json::json!(backtrace);
|
||||
}
|
||||
|
||||
// Add WebDAV-specific context
|
||||
if let Some(server_type) = &context.server_type {
|
||||
diagnostics["server_type"] = serde_json::json!(server_type);
|
||||
}
|
||||
if let Some(server_version) = &context.server_version {
|
||||
diagnostics["server_version"] = serde_json::json!(server_version);
|
||||
}
|
||||
|
||||
// Add estimated item count if available
|
||||
if let Some(item_count) = self.estimate_item_count_from_error(error) {
|
||||
diagnostics["estimated_item_count"] = serde_json::json!(item_count);
|
||||
}
|
||||
|
||||
// Add path analysis
|
||||
let path_depth = context.resource_path.matches('/').count();
|
||||
diagnostics["path_length"] = serde_json::json!(context.resource_path.len());
|
||||
diagnostics["path_depth"] = serde_json::json!(path_depth);
|
||||
|
||||
// Add response metrics
|
||||
if let Some(response_time) = context.response_time {
|
||||
diagnostics["response_time_ms"] = serde_json::json!(response_time.as_millis());
|
||||
}
|
||||
if let Some(response_size) = context.response_size {
|
||||
diagnostics["response_size_bytes"] = serde_json::json!(response_size);
|
||||
}
|
||||
|
||||
// Add any additional context
|
||||
for (key, value) in &context.additional_context {
|
||||
diagnostics[key] = value.clone();
|
||||
}
|
||||
|
||||
diagnostics
|
||||
}
|
||||
}
|
||||
|
||||
impl SourceErrorClassifier for WebDAVErrorClassifier {
|
||||
fn classify_error(&self, error: &anyhow::Error, context: &ErrorContext) -> ErrorClassification {
|
||||
// Use original WebDAV classification logic
|
||||
let webdav_type = self.classify_webdav_error_type(error);
|
||||
let http_status = self.extract_http_status(error);
|
||||
let webdav_severity = self.classify_webdav_severity(&webdav_type, http_status, 1);
|
||||
|
||||
// Map to generic types
|
||||
let error_type = Self::map_webdav_error_type(&webdav_type);
|
||||
let severity = Self::map_webdav_severity(&webdav_severity);
|
||||
|
||||
// Determine retry strategy based on error type
|
||||
let retry_strategy = match webdav_type {
|
||||
WebDAVScanFailureType::NetworkError => RetryStrategy::Exponential,
|
||||
WebDAVScanFailureType::Timeout => RetryStrategy::Exponential,
|
||||
WebDAVScanFailureType::ServerError => RetryStrategy::Exponential,
|
||||
WebDAVScanFailureType::XmlParseError => RetryStrategy::Linear,
|
||||
_ => RetryStrategy::Exponential,
|
||||
};
|
||||
|
||||
// Set retry delay based on error type
|
||||
let retry_delay_seconds = match webdav_type {
|
||||
WebDAVScanFailureType::NetworkError => 60, // 1 minute
|
||||
WebDAVScanFailureType::Timeout => 900, // 15 minutes
|
||||
WebDAVScanFailureType::ServerError => 300, // 5 minutes
|
||||
WebDAVScanFailureType::XmlParseError => 600, // 10 minutes
|
||||
_ => 300, // 5 minutes default
|
||||
};
|
||||
|
||||
// Set max retries based on severity
|
||||
let max_retries = match webdav_severity {
|
||||
WebDAVScanFailureSeverity::Critical => 1,
|
||||
WebDAVScanFailureSeverity::High => 3,
|
||||
WebDAVScanFailureSeverity::Medium => 5,
|
||||
WebDAVScanFailureSeverity::Low => 10,
|
||||
};
|
||||
|
||||
// Build user-friendly message
|
||||
let user_friendly_message = self.build_webdav_user_message(&webdav_type, &context.resource_path, http_status);
|
||||
let recommended_action = self.build_webdav_recommended_action(&webdav_type, &webdav_severity);
|
||||
|
||||
// Build diagnostic data
|
||||
let diagnostic_data = self.build_webdav_diagnostics(error, context);
|
||||
|
||||
ErrorClassification {
|
||||
error_type,
|
||||
severity,
|
||||
retry_strategy,
|
||||
retry_delay_seconds,
|
||||
max_retries,
|
||||
user_friendly_message,
|
||||
recommended_action,
|
||||
diagnostic_data,
|
||||
}
|
||||
}
|
||||
|
||||
fn extract_diagnostics(&self, error: &anyhow::Error, context: &ErrorContext) -> serde_json::Value {
|
||||
self.build_webdav_diagnostics(error, context)
|
||||
}
|
||||
|
||||
fn build_user_friendly_message(&self, failure: &SourceScanFailure) -> String {
|
||||
// Convert generic failure back to WebDAV-specific types for message building
|
||||
let webdav_type = match failure.error_type {
|
||||
SourceErrorType::Timeout => WebDAVScanFailureType::Timeout,
|
||||
SourceErrorType::PathTooLong => WebDAVScanFailureType::PathTooLong,
|
||||
SourceErrorType::PermissionDenied => WebDAVScanFailureType::PermissionDenied,
|
||||
SourceErrorType::InvalidCharacters => WebDAVScanFailureType::InvalidCharacters,
|
||||
SourceErrorType::NetworkError => WebDAVScanFailureType::NetworkError,
|
||||
SourceErrorType::ServerError => WebDAVScanFailureType::ServerError,
|
||||
SourceErrorType::XmlParseError => WebDAVScanFailureType::XmlParseError,
|
||||
SourceErrorType::TooManyItems => WebDAVScanFailureType::TooManyItems,
|
||||
SourceErrorType::DepthLimit => WebDAVScanFailureType::DepthLimit,
|
||||
SourceErrorType::SizeLimit => WebDAVScanFailureType::SizeLimit,
|
||||
_ => WebDAVScanFailureType::Unknown,
|
||||
};
|
||||
|
||||
self.build_webdav_user_message(&webdav_type, &failure.resource_path, failure.http_status_code)
|
||||
}
|
||||
|
||||
fn should_retry(&self, failure: &SourceScanFailure) -> bool {
|
||||
match failure.error_severity {
|
||||
SourceErrorSeverity::Critical => false,
|
||||
SourceErrorSeverity::High => failure.failure_count < 3,
|
||||
SourceErrorSeverity::Medium => failure.failure_count < 5,
|
||||
SourceErrorSeverity::Low => failure.failure_count < 10,
|
||||
}
|
||||
}
|
||||
|
||||
fn source_type(&self) -> ErrorSourceType {
|
||||
ErrorSourceType::WebDAV
|
||||
}
|
||||
}
|
||||
|
||||
impl WebDAVErrorClassifier {
|
||||
/// Build WebDAV-specific user message (from original error tracking logic)
|
||||
fn build_webdav_user_message(&self,
|
||||
failure_type: &WebDAVScanFailureType,
|
||||
directory_path: &str,
|
||||
http_status: Option<i32>,
|
||||
) -> String {
|
||||
match failure_type {
|
||||
WebDAVScanFailureType::Timeout => {
|
||||
format!(
|
||||
"The WebDAV directory '{}' is taking too long to scan. This might be due to a large number of files or slow server response.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::PathTooLong => {
|
||||
format!(
|
||||
"The WebDAV path '{}' exceeds system limits. Consider shortening directory names.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::PermissionDenied => {
|
||||
format!(
|
||||
"Access denied to WebDAV directory '{}'. Please check your WebDAV permissions.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::TooManyItems => {
|
||||
format!(
|
||||
"WebDAV directory '{}' contains too many items. Consider organizing into subdirectories.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::ServerError if http_status == Some(404) => {
|
||||
format!(
|
||||
"WebDAV directory '{}' was not found on the server. It may have been deleted or moved.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::ServerError if http_status == Some(405) => {
|
||||
format!(
|
||||
"WebDAV PROPFIND method is not allowed for '{}'. The server may not support WebDAV or it's disabled for this path.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::ServerError if http_status == Some(423) => {
|
||||
format!(
|
||||
"WebDAV resource '{}' is locked. Another process may be using it.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::SizeLimit if http_status == Some(507) => {
|
||||
format!(
|
||||
"Insufficient storage quota for WebDAV path '{}'. The server has run out of space.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::XmlParseError => {
|
||||
format!(
|
||||
"Malformed XML response from WebDAV server for directory '{}'. Server may be incompatible.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
WebDAVScanFailureType::NetworkError => {
|
||||
format!(
|
||||
"Network error accessing WebDAV directory '{}'. Check your connection.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
_ => {
|
||||
format!(
|
||||
"Failed to scan WebDAV directory '{}'. Error will be retried automatically.",
|
||||
directory_path
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Build WebDAV-specific recommended action
|
||||
fn build_webdav_recommended_action(&self,
|
||||
failure_type: &WebDAVScanFailureType,
|
||||
severity: &WebDAVScanFailureSeverity,
|
||||
) -> String {
|
||||
match (failure_type, severity) {
|
||||
(WebDAVScanFailureType::PathTooLong, _) => {
|
||||
"Shorten directory names or reorganize the directory structure.".to_string()
|
||||
}
|
||||
(WebDAVScanFailureType::InvalidCharacters, _) => {
|
||||
"Remove or rename directories with invalid characters.".to_string()
|
||||
}
|
||||
(WebDAVScanFailureType::PermissionDenied, _) => {
|
||||
"Check WebDAV server permissions and authentication credentials.".to_string()
|
||||
}
|
||||
(WebDAVScanFailureType::TooManyItems, _) => {
|
||||
"Split large directories into smaller subdirectories.".to_string()
|
||||
}
|
||||
(WebDAVScanFailureType::XmlParseError, _) => {
|
||||
"Check WebDAV server compatibility or contact server administrator.".to_string()
|
||||
}
|
||||
(WebDAVScanFailureType::Timeout, WebDAVScanFailureSeverity::High) => {
|
||||
"Consider excluding this directory from scanning due to repeated timeouts.".to_string()
|
||||
}
|
||||
(WebDAVScanFailureType::NetworkError, _) => {
|
||||
"Check network connectivity to WebDAV server.".to_string()
|
||||
}
|
||||
(_, WebDAVScanFailureSeverity::Critical) => {
|
||||
"Manual intervention required. This error cannot be resolved automatically.".to_string()
|
||||
}
|
||||
_ => {
|
||||
"The system will retry this operation automatically with increasing delays.".to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,504 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use std::time::{Duration, Instant};
|
||||
use uuid::Uuid;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
use crate::models::webdav_metrics::*;
|
||||
use crate::services::webdav_metrics_tracker::WebDAVMetricsTracker;
|
||||
use super::{WebDAVService, WebDAVDiscoveryResult};
|
||||
|
||||
/// Extension trait that adds metrics tracking to WebDAV operations
|
||||
pub trait WebDAVServiceWithMetrics {
|
||||
/// Discover files and directories with metrics tracking
|
||||
async fn discover_with_metrics(
|
||||
&self,
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
session_id: Uuid,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
path: &str,
|
||||
depth: Option<i32>,
|
||||
file_extensions: &[String],
|
||||
) -> Result<WebDAVDiscoveryResult>;
|
||||
|
||||
/// Download file with metrics tracking
|
||||
async fn download_file_with_metrics(
|
||||
&self,
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
session_id: Uuid,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
file_url: &str,
|
||||
expected_size: Option<u64>,
|
||||
) -> Result<super::WebDAVDownloadResult>;
|
||||
|
||||
/// Test connection with metrics tracking
|
||||
async fn test_connection_with_metrics(
|
||||
&self,
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
) -> Result<super::HealthStatus>;
|
||||
}
|
||||
|
||||
impl WebDAVServiceWithMetrics for WebDAVService {
|
||||
async fn discover_with_metrics(
|
||||
&self,
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
session_id: Uuid,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
path: &str,
|
||||
depth: Option<i32>,
|
||||
file_extensions: &[String],
|
||||
) -> Result<WebDAVDiscoveryResult> {
|
||||
let start_time = Instant::now();
|
||||
|
||||
// Start directory scan metrics if not provided
|
||||
let dir_metric_id = if let Some(id) = directory_metric_id {
|
||||
id
|
||||
} else {
|
||||
let path_depth = path.matches('/').count() as i32;
|
||||
let parent_path = if path == "/" {
|
||||
None
|
||||
} else {
|
||||
path.rfind('/').map(|pos| path[..pos].to_string())
|
||||
};
|
||||
|
||||
metrics_tracker
|
||||
.start_directory_scan(
|
||||
session_id,
|
||||
user_id,
|
||||
source_id,
|
||||
path.to_string(),
|
||||
path_depth,
|
||||
parent_path,
|
||||
)
|
||||
.await?
|
||||
};
|
||||
|
||||
// Record the discovery request
|
||||
let discovery_start = Instant::now();
|
||||
let discovery_result = self.discover_files_and_directories(path, depth.is_some()).await;
|
||||
let discovery_duration = discovery_start.elapsed();
|
||||
|
||||
// Record HTTP request metric for the discovery operation
|
||||
let (success, error_type, error_message) = match &discovery_result {
|
||||
Ok(_) => (true, None, None),
|
||||
Err(e) => (false, Some("discovery_error".to_string()), Some(e.to_string())),
|
||||
};
|
||||
|
||||
let _request_metric_id = metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
Some(dir_metric_id),
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::PropFind,
|
||||
WebDAVOperationType::Discovery,
|
||||
path.to_string(),
|
||||
discovery_duration,
|
||||
None, // request_size_bytes
|
||||
None, // response_size_bytes (would need to track this in discover method)
|
||||
None, // http_status_code (would need to extract from discovery)
|
||||
success,
|
||||
0, // retry_attempt
|
||||
error_type,
|
||||
error_message,
|
||||
None, // server_headers (would need to pass through from discover)
|
||||
None, // remote_ip
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to record discovery request metric: {}", e);
|
||||
Uuid::new_v4() // Return dummy ID if metrics recording fails
|
||||
});
|
||||
|
||||
match discovery_result {
|
||||
Ok(result) => {
|
||||
// Update directory metrics with discovery results
|
||||
let files_count = result.files.len() as i32;
|
||||
let dirs_count = result.directories.len() as i32;
|
||||
let total_size: u64 = result.files.iter()
|
||||
.map(|f| f.size as u64)
|
||||
.sum();
|
||||
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
files_count,
|
||||
dirs_count,
|
||||
total_size as i64,
|
||||
0, // files_processed (will be updated later)
|
||||
0, // files_skipped
|
||||
0, // files_failed
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to update directory counters: {}", e);
|
||||
});
|
||||
|
||||
// Update session counters
|
||||
metrics_tracker
|
||||
.update_session_counters(
|
||||
session_id,
|
||||
dirs_count,
|
||||
0, // directories_processed (will be updated later)
|
||||
files_count,
|
||||
0, // files_processed (will be updated later)
|
||||
total_size as i64,
|
||||
0, // bytes_processed (will be updated later)
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to update session counters: {}", e);
|
||||
});
|
||||
|
||||
debug!(
|
||||
"Discovery completed for '{}': {} files, {} directories, {} bytes ({}ms)",
|
||||
path, files_count, dirs_count, total_size, discovery_duration.as_millis()
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => {
|
||||
// Record the error in directory metrics
|
||||
metrics_tracker
|
||||
.record_directory_error(dir_metric_id, "discovery_failed", false)
|
||||
.await
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("Failed to record directory error: {}", err);
|
||||
});
|
||||
|
||||
// Finish the directory scan with error status
|
||||
metrics_tracker
|
||||
.finish_directory_scan(
|
||||
dir_metric_id,
|
||||
"failed",
|
||||
None,
|
||||
Some(e.to_string()),
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("Failed to finish directory scan: {}", err);
|
||||
});
|
||||
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn download_file_with_metrics(
|
||||
&self,
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
session_id: Uuid,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
file_url: &str,
|
||||
expected_size: Option<u64>,
|
||||
) -> Result<super::WebDAVDownloadResult> {
|
||||
let download_start = Instant::now();
|
||||
// Create a temporary FileIngestionInfo for download with mime detection
|
||||
let temp_file_info = crate::models::FileIngestionInfo {
|
||||
relative_path: file_url.to_string(),
|
||||
full_path: file_url.to_string(),
|
||||
path: file_url.to_string(),
|
||||
name: file_url.split('/').last().unwrap_or("unknown").to_string(),
|
||||
size: expected_size.unwrap_or(0) as i64,
|
||||
mime_type: "application/octet-stream".to_string(),
|
||||
last_modified: Some(chrono::Utc::now()),
|
||||
etag: "".to_string(),
|
||||
is_directory: false,
|
||||
created_at: None,
|
||||
permissions: None,
|
||||
owner: None,
|
||||
group: None,
|
||||
metadata: None,
|
||||
};
|
||||
let download_result = self.download_file_with_mime_detection(&temp_file_info).await;
|
||||
let download_duration = download_start.elapsed();
|
||||
|
||||
let (success, error_type, error_message, response_size) = match &download_result {
|
||||
Ok(result) => (
|
||||
true,
|
||||
None,
|
||||
None,
|
||||
Some(result.content.len() as i64),
|
||||
),
|
||||
Err(e) => (
|
||||
false,
|
||||
Some("download_error".to_string()),
|
||||
Some(e.to_string()),
|
||||
None,
|
||||
),
|
||||
};
|
||||
|
||||
// Record HTTP request metric for the download operation
|
||||
let _request_metric_id = metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
directory_metric_id,
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::Get,
|
||||
WebDAVOperationType::Download,
|
||||
file_url.to_string(),
|
||||
download_duration,
|
||||
None, // request_size_bytes
|
||||
response_size,
|
||||
None, // http_status_code (would need to extract from download method)
|
||||
success,
|
||||
0, // retry_attempt
|
||||
error_type,
|
||||
error_message,
|
||||
None, // server_headers (would need to pass through)
|
||||
None, // remote_ip
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to record download request metric: {}", e);
|
||||
Uuid::new_v4()
|
||||
});
|
||||
|
||||
match download_result {
|
||||
Ok(result) => {
|
||||
let file_size = result.content.len() as i64;
|
||||
|
||||
// Update directory metrics if provided
|
||||
if let Some(dir_metric_id) = directory_metric_id {
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
0, // files_found
|
||||
0, // subdirectories_found
|
||||
0, // size_bytes_delta (already counted in discovery)
|
||||
1, // files_processed
|
||||
0, // files_skipped
|
||||
0, // files_failed
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to update directory counters for download: {}", e);
|
||||
});
|
||||
}
|
||||
|
||||
// Update session counters
|
||||
metrics_tracker
|
||||
.update_session_counters(
|
||||
session_id,
|
||||
0, // directories_discovered
|
||||
0, // directories_processed
|
||||
0, // files_discovered
|
||||
1, // files_processed
|
||||
0, // bytes_discovered
|
||||
file_size, // bytes_processed
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to update session counters for download: {}", e);
|
||||
});
|
||||
|
||||
debug!(
|
||||
"Download completed for '{}': {} bytes ({}ms)",
|
||||
file_url, file_size, download_duration.as_millis()
|
||||
);
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => {
|
||||
// Record failed download in directory metrics
|
||||
if let Some(dir_metric_id) = directory_metric_id {
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
0, // files_found
|
||||
0, // subdirectories_found
|
||||
0, // size_bytes_delta
|
||||
0, // files_processed
|
||||
0, // files_skipped
|
||||
1, // files_failed
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("Failed to update directory counters for failed download: {}", err);
|
||||
});
|
||||
|
||||
metrics_tracker
|
||||
.record_directory_error(dir_metric_id, "download_failed", false)
|
||||
.await
|
||||
.unwrap_or_else(|err| {
|
||||
warn!("Failed to record directory error for download: {}", err);
|
||||
});
|
||||
}
|
||||
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn test_connection_with_metrics(
|
||||
&self,
|
||||
metrics_tracker: &WebDAVMetricsTracker,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
) -> Result<super::HealthStatus> {
|
||||
let test_start = Instant::now();
|
||||
let test_result = self.test_connection().await;
|
||||
let test_duration = test_start.elapsed();
|
||||
|
||||
let (success, error_type, error_message) = match &test_result {
|
||||
Ok(status) => (status.success, None, if status.success { None } else { Some(status.message.clone()) }),
|
||||
Err(e) => (false, Some("connection_test_error".to_string()), Some(e.to_string())),
|
||||
};
|
||||
|
||||
// Record HTTP request metric for the connection test
|
||||
let _request_metric_id = metrics_tracker
|
||||
.record_http_request(
|
||||
None, // session_id (connection tests are not part of a sync session)
|
||||
None, // directory_metric_id
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::Options,
|
||||
WebDAVOperationType::ConnectionTest,
|
||||
"/".to_string(), // Root path for connection test
|
||||
test_duration,
|
||||
None, // request_size_bytes
|
||||
None, // response_size_bytes
|
||||
None, // http_status_code
|
||||
success,
|
||||
0, // retry_attempt
|
||||
error_type,
|
||||
error_message,
|
||||
None, // server_headers
|
||||
None, // remote_ip
|
||||
)
|
||||
.await
|
||||
.unwrap_or_else(|e| {
|
||||
warn!("Failed to record connection test metric: {}", e);
|
||||
Uuid::new_v4()
|
||||
});
|
||||
|
||||
debug!(
|
||||
"Connection test completed: success={}, duration={}ms",
|
||||
success, test_duration.as_millis()
|
||||
);
|
||||
|
||||
// Convert WebDAVConnectionResult to HealthStatus
|
||||
match test_result {
|
||||
Ok(conn_result) => Ok(super::HealthStatus {
|
||||
healthy: conn_result.success,
|
||||
message: conn_result.message,
|
||||
response_time_ms: test_duration.as_millis() as u64,
|
||||
details: Some(serde_json::json!({
|
||||
"server_version": conn_result.server_version,
|
||||
"server_type": conn_result.server_type
|
||||
})),
|
||||
}),
|
||||
Err(e) => Ok(super::HealthStatus {
|
||||
healthy: false,
|
||||
message: e.to_string(),
|
||||
response_time_ms: test_duration.as_millis() as u64,
|
||||
details: None,
|
||||
}),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Helper struct for managing metrics during a complete sync operation
|
||||
pub struct SyncWithMetrics<'a> {
|
||||
pub metrics_tracker: &'a WebDAVMetricsTracker,
|
||||
pub session_id: Uuid,
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
current_directory_metric: Option<Uuid>,
|
||||
}
|
||||
|
||||
impl<'a> SyncWithMetrics<'a> {
|
||||
pub fn new(
|
||||
metrics_tracker: &'a WebDAVMetricsTracker,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
) -> Self {
|
||||
Self {
|
||||
metrics_tracker,
|
||||
session_id,
|
||||
user_id,
|
||||
source_id,
|
||||
current_directory_metric: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start tracking a new directory
|
||||
pub async fn start_directory(&mut self, directory_path: &str, depth: i32) -> Result<()> {
|
||||
let parent_path = if directory_path == "/" {
|
||||
None
|
||||
} else {
|
||||
directory_path.rfind('/').map(|pos| directory_path[..pos].to_string())
|
||||
};
|
||||
|
||||
let metric_id = self.metrics_tracker
|
||||
.start_directory_scan(
|
||||
self.session_id,
|
||||
self.user_id,
|
||||
self.source_id,
|
||||
directory_path.to_string(),
|
||||
depth,
|
||||
parent_path,
|
||||
)
|
||||
.await?;
|
||||
|
||||
self.current_directory_metric = Some(metric_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finish tracking the current directory
|
||||
pub async fn finish_directory(&mut self, status: &str, error_message: Option<String>) -> Result<()> {
|
||||
if let Some(metric_id) = self.current_directory_metric.take() {
|
||||
self.metrics_tracker
|
||||
.finish_directory_scan(metric_id, status, None, error_message)
|
||||
.await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record a skipped item
|
||||
pub async fn record_skipped(&self, is_directory: bool, reason: &str) -> Result<()> {
|
||||
let (dirs_skipped, files_skipped) = if is_directory { (1, 0) } else { (0, 1) };
|
||||
|
||||
self.metrics_tracker
|
||||
.record_skipped_items(self.session_id, dirs_skipped, files_skipped, reason)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Record an error
|
||||
pub async fn record_error(&self, error_type: &str, is_warning: bool) -> Result<()> {
|
||||
if let Some(metric_id) = self.current_directory_metric {
|
||||
self.metrics_tracker
|
||||
.record_directory_error(metric_id, error_type, is_warning)
|
||||
.await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Record ETag comparison result
|
||||
pub async fn record_etag_result(&self, etag_matched: bool, cache_hit: bool) -> Result<()> {
|
||||
if let Some(metric_id) = self.current_directory_metric {
|
||||
self.metrics_tracker
|
||||
.record_etag_result(metric_id, etag_matched, cache_hit)
|
||||
.await
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the current directory metric ID
|
||||
pub fn current_directory_metric_id(&self) -> Option<Uuid> {
|
||||
self.current_directory_metric
|
||||
}
|
||||
}
|
||||
@@ -5,8 +5,6 @@ pub mod config;
|
||||
pub mod service;
|
||||
pub mod smart_sync;
|
||||
pub mod progress_shim; // Backward compatibility shim for simplified progress tracking
|
||||
pub mod error_classifier; // WebDAV error classification for generic error tracking
|
||||
pub mod metrics_integration; // WebDAV metrics collection integration
|
||||
|
||||
// Re-export main types for convenience
|
||||
pub use common::build_user_agent;
|
||||
@@ -17,7 +15,6 @@ pub use service::{
|
||||
ValidationRecommendation, ValidationAction, ValidationSummary
|
||||
};
|
||||
pub use smart_sync::{SmartSyncService, SmartSyncDecision, SmartSyncStrategy, SmartSyncResult};
|
||||
pub use metrics_integration::{WebDAVServiceWithMetrics, SyncWithMetrics};
|
||||
|
||||
// Backward compatibility exports for progress tracking (simplified)
|
||||
pub use progress_shim::{SyncProgress, SyncPhase, ProgressStats};
|
||||
|
||||
259
src/services/webdav_metrics_integration.rs
Normal file
259
src/services/webdav_metrics_integration.rs
Normal file
@@ -0,0 +1,259 @@
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use uuid::Uuid;
|
||||
use tracing::{debug, warn};
|
||||
use crate::services::webdav_metrics_simple::WebDAVMetrics;
|
||||
|
||||
/// Integration layer for WebDAV metrics
|
||||
/// This provides a clean interface for WebDAV services to record metrics
|
||||
/// without coupling them tightly to the metrics implementation
|
||||
pub struct WebDAVMetricsCollector {
|
||||
metrics: Arc<WebDAVMetrics>,
|
||||
}
|
||||
|
||||
impl WebDAVMetricsCollector {
|
||||
pub fn new(metrics: Arc<WebDAVMetrics>) -> Self {
|
||||
Self { metrics }
|
||||
}
|
||||
|
||||
/// Create a new session tracker
|
||||
pub fn start_session(&self, user_id: Uuid, source_id: Option<Uuid>) -> SessionTracker {
|
||||
SessionTracker {
|
||||
metrics: Arc::clone(&self.metrics),
|
||||
user_id,
|
||||
source_id,
|
||||
start_time: Instant::now(),
|
||||
files_processed: 0,
|
||||
bytes_processed: 0,
|
||||
requests_made: 0,
|
||||
successful_requests: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a standalone request (not part of a session)
|
||||
pub async fn record_standalone_request(&self, success: bool, duration_ms: u64) {
|
||||
self.metrics.record_request(success, duration_ms).await;
|
||||
}
|
||||
|
||||
/// Get metrics for Prometheus export
|
||||
pub async fn get_prometheus_metrics(&self) -> crate::services::webdav_metrics_simple::PrometheusMetrics {
|
||||
self.metrics.get_prometheus_metrics().await
|
||||
}
|
||||
}
|
||||
|
||||
/// Tracks metrics for a single WebDAV sync session
|
||||
/// This replaces the complex database-backed session tracking
|
||||
pub struct SessionTracker {
|
||||
metrics: Arc<WebDAVMetrics>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
start_time: Instant,
|
||||
files_processed: u32,
|
||||
bytes_processed: u64,
|
||||
requests_made: u32,
|
||||
successful_requests: u32,
|
||||
}
|
||||
|
||||
impl SessionTracker {
|
||||
/// Record that files were processed in this session
|
||||
pub fn record_files_processed(&mut self, count: u32, bytes: u64) {
|
||||
self.files_processed += count;
|
||||
self.bytes_processed += bytes;
|
||||
|
||||
debug!("Session {}: processed {} files, {} bytes total",
|
||||
self.user_id, self.files_processed, self.bytes_processed);
|
||||
}
|
||||
|
||||
/// Record an HTTP request made during this session
|
||||
pub async fn record_request(&mut self, success: bool, duration_ms: u64) {
|
||||
self.requests_made += 1;
|
||||
if success {
|
||||
self.successful_requests += 1;
|
||||
}
|
||||
|
||||
// Record in global metrics
|
||||
self.metrics.record_request(success, duration_ms).await;
|
||||
|
||||
debug!("Session {}: request {} (success: {}, duration: {}ms)",
|
||||
self.user_id, self.requests_made, success, duration_ms);
|
||||
}
|
||||
|
||||
/// Complete the session successfully
|
||||
pub async fn complete_success(self) {
|
||||
let duration_ms = self.start_time.elapsed().as_millis() as u64;
|
||||
|
||||
self.metrics.record_session(
|
||||
true,
|
||||
duration_ms,
|
||||
self.files_processed,
|
||||
self.bytes_processed,
|
||||
).await;
|
||||
|
||||
debug!("Session {} completed successfully: {}ms, {} files, {} bytes, {}/{} requests successful",
|
||||
self.user_id, duration_ms, self.files_processed, self.bytes_processed,
|
||||
self.successful_requests, self.requests_made);
|
||||
}
|
||||
|
||||
/// Complete the session with failure
|
||||
pub async fn complete_failure(self, _error: &str) {
|
||||
let duration_ms = self.start_time.elapsed().as_millis() as u64;
|
||||
|
||||
self.metrics.record_session(
|
||||
false,
|
||||
duration_ms,
|
||||
self.files_processed,
|
||||
self.bytes_processed,
|
||||
).await;
|
||||
|
||||
warn!("Session {} failed after {}ms: {} files, {} bytes, {}/{} requests successful",
|
||||
self.user_id, duration_ms, self.files_processed, self.bytes_processed,
|
||||
self.successful_requests, self.requests_made);
|
||||
}
|
||||
|
||||
/// Get current session stats (for debugging/logging)
|
||||
pub fn current_stats(&self) -> SessionStats {
|
||||
SessionStats {
|
||||
user_id: self.user_id,
|
||||
source_id: self.source_id,
|
||||
duration_ms: self.start_time.elapsed().as_millis() as u64,
|
||||
files_processed: self.files_processed,
|
||||
bytes_processed: self.bytes_processed,
|
||||
requests_made: self.requests_made,
|
||||
successful_requests: self.successful_requests,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Simple session statistics for logging/debugging
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SessionStats {
|
||||
pub user_id: Uuid,
|
||||
pub source_id: Option<Uuid>,
|
||||
pub duration_ms: u64,
|
||||
pub files_processed: u32,
|
||||
pub bytes_processed: u64,
|
||||
pub requests_made: u32,
|
||||
pub successful_requests: u32,
|
||||
}
|
||||
|
||||
/// Request timing helper for easy request measurement
|
||||
pub struct RequestTimer {
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
impl RequestTimer {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
start_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn elapsed_ms(&self) -> u64 {
|
||||
self.start_time.elapsed().as_millis() as u64
|
||||
}
|
||||
|
||||
/// Complete and record the request
|
||||
pub async fn complete(self, session: &mut SessionTracker, success: bool) {
|
||||
let duration_ms = self.elapsed_ms();
|
||||
session.record_request(success, duration_ms).await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RequestTimer {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_session_tracking() {
|
||||
let metrics = Arc::new(WebDAVMetrics::new());
|
||||
let collector = WebDAVMetricsCollector::new(metrics);
|
||||
|
||||
let user_id = Uuid::new_v4();
|
||||
let source_id = Some(Uuid::new_v4());
|
||||
|
||||
// Start session
|
||||
let mut session = collector.start_session(user_id, source_id);
|
||||
|
||||
// Simulate processing files
|
||||
session.record_files_processed(5, 1024000);
|
||||
session.record_files_processed(3, 512000);
|
||||
|
||||
// Simulate HTTP requests
|
||||
session.record_request(true, 100).await;
|
||||
session.record_request(true, 150).await;
|
||||
session.record_request(false, 5000).await; // Failed request
|
||||
|
||||
let stats = session.current_stats();
|
||||
assert_eq!(stats.files_processed, 8);
|
||||
assert_eq!(stats.bytes_processed, 1536000);
|
||||
assert_eq!(stats.requests_made, 3);
|
||||
assert_eq!(stats.successful_requests, 2);
|
||||
|
||||
// Complete successfully
|
||||
session.complete_success().await;
|
||||
|
||||
// Check metrics
|
||||
let prometheus_metrics = collector.get_prometheus_metrics().await;
|
||||
assert_eq!(prometheus_metrics.total_sessions, 1);
|
||||
assert_eq!(prometheus_metrics.successful_sessions, 1);
|
||||
assert_eq!(prometheus_metrics.total_files_processed, 8);
|
||||
assert_eq!(prometheus_metrics.total_bytes_processed, 1536000);
|
||||
assert_eq!(prometheus_metrics.total_http_requests, 3);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_request_timer() {
|
||||
let metrics = Arc::new(WebDAVMetrics::new());
|
||||
let collector = WebDAVMetricsCollector::new(metrics);
|
||||
|
||||
let user_id = Uuid::new_v4();
|
||||
let mut session = collector.start_session(user_id, None);
|
||||
|
||||
// Test request timing
|
||||
let timer = RequestTimer::new();
|
||||
sleep(Duration::from_millis(10)).await; // Simulate work
|
||||
let duration_before = timer.elapsed_ms();
|
||||
|
||||
timer.complete(&mut session, true).await;
|
||||
|
||||
// Should have recorded a request with reasonable duration
|
||||
assert!(duration_before >= 10);
|
||||
|
||||
let stats = session.current_stats();
|
||||
assert_eq!(stats.requests_made, 1);
|
||||
assert_eq!(stats.successful_requests, 1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_failed_session() {
|
||||
let metrics = Arc::new(WebDAVMetrics::new());
|
||||
let collector = WebDAVMetricsCollector::new(metrics);
|
||||
|
||||
let user_id = Uuid::new_v4();
|
||||
let mut session = collector.start_session(user_id, None);
|
||||
|
||||
// Process some data before failure
|
||||
session.record_files_processed(2, 100000);
|
||||
session.record_request(true, 100).await;
|
||||
session.record_request(false, 200).await;
|
||||
|
||||
// Complete with failure
|
||||
session.complete_failure("Connection error").await;
|
||||
|
||||
// Check metrics
|
||||
let prometheus_metrics = collector.get_prometheus_metrics().await;
|
||||
assert_eq!(prometheus_metrics.total_sessions, 1);
|
||||
assert_eq!(prometheus_metrics.successful_sessions, 0);
|
||||
assert_eq!(prometheus_metrics.failed_sessions, 1);
|
||||
assert_eq!(prometheus_metrics.total_files_processed, 2);
|
||||
assert_eq!(prometheus_metrics.total_http_requests, 2);
|
||||
}
|
||||
}
|
||||
462
src/services/webdav_metrics_simple.rs
Normal file
462
src/services/webdav_metrics_simple.rs
Normal file
@@ -0,0 +1,462 @@
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
/// Simplified WebDAV metrics using atomic counters
|
||||
/// Replaces the complex database-backed metrics system with in-memory tracking
|
||||
#[derive(Clone)]
|
||||
pub struct WebDAVMetrics {
|
||||
// Primary counters - these map directly to Prometheus metrics
|
||||
sessions_total: Arc<AtomicU64>,
|
||||
sessions_successful: Arc<AtomicU64>,
|
||||
sessions_failed: Arc<AtomicU64>,
|
||||
|
||||
files_processed: Arc<AtomicU64>,
|
||||
bytes_processed: Arc<AtomicU64>,
|
||||
http_requests_total: Arc<AtomicU64>,
|
||||
http_requests_successful: Arc<AtomicU64>,
|
||||
|
||||
// Time-windowed data for calculating rates and recent activity
|
||||
recent_sessions: Arc<RwLock<CircularBuffer<SessionEvent>>>,
|
||||
recent_requests: Arc<RwLock<CircularBuffer<RequestEvent>>>,
|
||||
|
||||
// Cached calculations to avoid recomputing on every metrics request
|
||||
cached_calculations: Arc<RwLock<CachedCalculations>>,
|
||||
cache_timestamp: Arc<AtomicU64>,
|
||||
}
|
||||
|
||||
/// Minimal session event for time-window calculations
|
||||
#[derive(Debug, Clone)]
|
||||
struct SessionEvent {
|
||||
timestamp: u64,
|
||||
success: bool,
|
||||
duration_ms: u64,
|
||||
files_count: u32,
|
||||
bytes_count: u64,
|
||||
}
|
||||
|
||||
/// Minimal request event for time-window calculations
|
||||
#[derive(Debug, Clone)]
|
||||
struct RequestEvent {
|
||||
timestamp: u64,
|
||||
success: bool,
|
||||
duration_ms: u64,
|
||||
}
|
||||
|
||||
|
||||
/// Cached calculated metrics to avoid recomputation
|
||||
#[derive(Debug, Clone)]
|
||||
struct CachedCalculations {
|
||||
success_rate: f64,
|
||||
avg_session_duration_sec: f64,
|
||||
avg_processing_rate: f64,
|
||||
request_success_rate: f64,
|
||||
avg_request_duration_ms: f64,
|
||||
sessions_last_hour: u64,
|
||||
error_rate_last_hour: f64,
|
||||
}
|
||||
|
||||
/// Prometheus metrics structure matching the current API
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct PrometheusMetrics {
|
||||
pub total_sessions: u64,
|
||||
pub successful_sessions: u64,
|
||||
pub failed_sessions: u64,
|
||||
pub success_rate: f64,
|
||||
pub total_files_processed: u64,
|
||||
pub total_bytes_processed: u64,
|
||||
pub avg_session_duration_sec: f64,
|
||||
pub avg_processing_rate: f64,
|
||||
pub total_http_requests: u64,
|
||||
pub request_success_rate: f64,
|
||||
pub avg_request_duration_ms: f64,
|
||||
pub sessions_last_hour: u64,
|
||||
pub error_rate_last_hour: f64,
|
||||
}
|
||||
/// Circular buffer for efficient time-window tracking
|
||||
#[derive(Debug)]
|
||||
struct CircularBuffer<T> {
|
||||
data: Vec<Option<T>>,
|
||||
head: usize,
|
||||
size: usize,
|
||||
capacity: usize,
|
||||
}
|
||||
|
||||
impl<T> CircularBuffer<T> {
|
||||
fn new(capacity: usize) -> Self {
|
||||
Self {
|
||||
data: (0..capacity).map(|_| None).collect(),
|
||||
head: 0,
|
||||
size: 0,
|
||||
capacity,
|
||||
}
|
||||
}
|
||||
|
||||
fn push(&mut self, item: T) {
|
||||
self.data[self.head] = Some(item);
|
||||
self.head = (self.head + 1) % self.capacity;
|
||||
if self.size < self.capacity {
|
||||
self.size += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Implement specific iterator methods for each type
|
||||
impl CircularBuffer<SessionEvent> {
|
||||
fn iter_recent(&self, cutoff_timestamp: u64) -> impl Iterator<Item = &SessionEvent> {
|
||||
self.data.iter()
|
||||
.filter_map(|opt| opt.as_ref())
|
||||
.filter(move |item| item.timestamp >= cutoff_timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
impl CircularBuffer<RequestEvent> {
|
||||
fn iter_recent(&self, cutoff_timestamp: u64) -> impl Iterator<Item = &RequestEvent> {
|
||||
self.data.iter()
|
||||
.filter_map(|opt| opt.as_ref())
|
||||
.filter(move |item| item.timestamp >= cutoff_timestamp)
|
||||
}
|
||||
}
|
||||
|
||||
impl WebDAVMetrics {
|
||||
/// Create a new metrics instance
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
sessions_total: Arc::new(AtomicU64::new(0)),
|
||||
sessions_successful: Arc::new(AtomicU64::new(0)),
|
||||
sessions_failed: Arc::new(AtomicU64::new(0)),
|
||||
files_processed: Arc::new(AtomicU64::new(0)),
|
||||
bytes_processed: Arc::new(AtomicU64::new(0)),
|
||||
http_requests_total: Arc::new(AtomicU64::new(0)),
|
||||
http_requests_successful: Arc::new(AtomicU64::new(0)),
|
||||
|
||||
// Buffers sized for 1 hour of data at reasonable rates
|
||||
recent_sessions: Arc::new(RwLock::new(CircularBuffer::new(1000))),
|
||||
recent_requests: Arc::new(RwLock::new(CircularBuffer::new(10000))),
|
||||
|
||||
cached_calculations: Arc::new(RwLock::new(CachedCalculations {
|
||||
success_rate: 0.0,
|
||||
avg_session_duration_sec: 0.0,
|
||||
avg_processing_rate: 0.0,
|
||||
request_success_rate: 0.0,
|
||||
avg_request_duration_ms: 0.0,
|
||||
sessions_last_hour: 0,
|
||||
error_rate_last_hour: 0.0,
|
||||
})),
|
||||
cache_timestamp: Arc::new(AtomicU64::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record a completed WebDAV sync session
|
||||
pub async fn record_session(&self, success: bool, duration_ms: u64, files_count: u32, bytes_count: u64) {
|
||||
let timestamp = current_timestamp();
|
||||
|
||||
// Update atomic counters
|
||||
self.sessions_total.fetch_add(1, Ordering::Relaxed);
|
||||
if success {
|
||||
self.sessions_successful.fetch_add(1, Ordering::Relaxed);
|
||||
} else {
|
||||
self.sessions_failed.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
self.files_processed.fetch_add(files_count as u64, Ordering::Relaxed);
|
||||
self.bytes_processed.fetch_add(bytes_count, Ordering::Relaxed);
|
||||
|
||||
// Add to time-windowed data
|
||||
let mut recent = self.recent_sessions.write().await;
|
||||
recent.push(SessionEvent {
|
||||
timestamp,
|
||||
success,
|
||||
duration_ms,
|
||||
files_count,
|
||||
bytes_count,
|
||||
});
|
||||
|
||||
// Invalidate cache
|
||||
self.cache_timestamp.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Record a WebDAV HTTP request
|
||||
pub async fn record_request(&self, success: bool, duration_ms: u64) {
|
||||
let timestamp = current_timestamp();
|
||||
|
||||
// Update atomic counters
|
||||
self.http_requests_total.fetch_add(1, Ordering::Relaxed);
|
||||
if success {
|
||||
self.http_requests_successful.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Add to time-windowed data
|
||||
let mut recent = self.recent_requests.write().await;
|
||||
recent.push(RequestEvent {
|
||||
timestamp,
|
||||
success,
|
||||
duration_ms,
|
||||
});
|
||||
|
||||
// Invalidate cache
|
||||
self.cache_timestamp.store(0, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get all metrics for Prometheus export
|
||||
pub async fn get_prometheus_metrics(&self) -> PrometheusMetrics {
|
||||
const CACHE_DURATION_SECS: u64 = 30; // Cache for 30 seconds
|
||||
|
||||
let now = current_timestamp();
|
||||
let last_cache = self.cache_timestamp.load(Ordering::Relaxed);
|
||||
|
||||
// Use cache if still valid
|
||||
if now - last_cache < CACHE_DURATION_SECS {
|
||||
let cached = self.cached_calculations.read().await;
|
||||
return self.build_prometheus_metrics(&cached).await;
|
||||
}
|
||||
|
||||
// Recalculate metrics
|
||||
let calculations = self.calculate_derived_metrics(now).await;
|
||||
|
||||
// Update cache
|
||||
{
|
||||
let mut cached = self.cached_calculations.write().await;
|
||||
*cached = calculations.clone();
|
||||
}
|
||||
self.cache_timestamp.store(now, Ordering::Relaxed);
|
||||
|
||||
self.build_prometheus_metrics(&calculations).await
|
||||
}
|
||||
|
||||
/// Calculate all derived metrics that require time-window analysis
|
||||
async fn calculate_derived_metrics(&self, now: u64) -> CachedCalculations {
|
||||
let one_hour_ago = now.saturating_sub(3600);
|
||||
|
||||
// Get recent data
|
||||
let sessions = self.recent_sessions.read().await;
|
||||
let requests = self.recent_requests.read().await;
|
||||
|
||||
// Calculate session metrics
|
||||
let recent_session_events: Vec<&SessionEvent> = sessions.iter_recent(one_hour_ago).collect();
|
||||
|
||||
let total_sessions = self.sessions_total.load(Ordering::Relaxed);
|
||||
let successful_sessions = self.sessions_successful.load(Ordering::Relaxed);
|
||||
|
||||
let success_rate = if total_sessions > 0 {
|
||||
(successful_sessions as f64 / total_sessions as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let (avg_session_duration_sec, avg_processing_rate) = if !recent_session_events.is_empty() {
|
||||
let total_duration: u64 = recent_session_events.iter().map(|e| e.duration_ms).sum();
|
||||
let total_files: u32 = recent_session_events.iter().map(|e| e.files_count).sum();
|
||||
|
||||
let avg_duration = total_duration as f64 / recent_session_events.len() as f64 / 1000.0;
|
||||
let avg_rate = if total_duration > 0 {
|
||||
total_files as f64 / (total_duration as f64 / 1000.0)
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
(avg_duration, avg_rate)
|
||||
} else {
|
||||
(0.0, 0.0)
|
||||
};
|
||||
|
||||
// Calculate request metrics
|
||||
let recent_request_events: Vec<&RequestEvent> = requests.iter_recent(one_hour_ago).collect();
|
||||
|
||||
let total_requests = self.http_requests_total.load(Ordering::Relaxed);
|
||||
let successful_requests = self.http_requests_successful.load(Ordering::Relaxed);
|
||||
|
||||
let request_success_rate = if total_requests > 0 {
|
||||
(successful_requests as f64 / total_requests as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
let avg_request_duration_ms = if !recent_request_events.is_empty() {
|
||||
let total_duration: u64 = recent_request_events.iter().map(|e| e.duration_ms).sum();
|
||||
total_duration as f64 / recent_request_events.len() as f64
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
// Last hour metrics
|
||||
let sessions_last_hour = recent_session_events.len() as u64;
|
||||
let failed_sessions_last_hour = recent_session_events.iter()
|
||||
.filter(|e| !e.success)
|
||||
.count() as u64;
|
||||
|
||||
let error_rate_last_hour = if sessions_last_hour > 0 {
|
||||
(failed_sessions_last_hour as f64 / sessions_last_hour as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
CachedCalculations {
|
||||
success_rate,
|
||||
avg_session_duration_sec,
|
||||
avg_processing_rate,
|
||||
request_success_rate,
|
||||
avg_request_duration_ms,
|
||||
sessions_last_hour,
|
||||
error_rate_last_hour,
|
||||
}
|
||||
}
|
||||
|
||||
/// Build the final Prometheus metrics structure
|
||||
async fn build_prometheus_metrics(&self, calculations: &CachedCalculations) -> PrometheusMetrics {
|
||||
PrometheusMetrics {
|
||||
total_sessions: self.sessions_total.load(Ordering::Relaxed),
|
||||
successful_sessions: self.sessions_successful.load(Ordering::Relaxed),
|
||||
failed_sessions: self.sessions_failed.load(Ordering::Relaxed),
|
||||
success_rate: calculations.success_rate,
|
||||
total_files_processed: self.files_processed.load(Ordering::Relaxed),
|
||||
total_bytes_processed: self.bytes_processed.load(Ordering::Relaxed),
|
||||
avg_session_duration_sec: calculations.avg_session_duration_sec,
|
||||
avg_processing_rate: calculations.avg_processing_rate,
|
||||
total_http_requests: self.http_requests_total.load(Ordering::Relaxed),
|
||||
request_success_rate: calculations.request_success_rate,
|
||||
avg_request_duration_ms: calculations.avg_request_duration_ms,
|
||||
sessions_last_hour: calculations.sessions_last_hour,
|
||||
error_rate_last_hour: calculations.error_rate_last_hour,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get simple session counter for basic tracking
|
||||
pub fn get_total_sessions(&self) -> u64 {
|
||||
self.sessions_total.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
fn current_timestamp() -> u64 {
|
||||
SystemTime::now()
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}
|
||||
|
||||
impl Default for WebDAVMetrics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use tokio::time::{sleep, Duration};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_basic_session_recording() {
|
||||
let metrics = WebDAVMetrics::new();
|
||||
|
||||
// Record successful session
|
||||
metrics.record_session(true, 5000, 10, 1024000).await;
|
||||
|
||||
// Record failed session
|
||||
metrics.record_session(false, 2000, 0, 0).await;
|
||||
|
||||
let prometheus_metrics = metrics.get_prometheus_metrics().await;
|
||||
|
||||
assert_eq!(prometheus_metrics.total_sessions, 2);
|
||||
assert_eq!(prometheus_metrics.successful_sessions, 1);
|
||||
assert_eq!(prometheus_metrics.failed_sessions, 1);
|
||||
assert_eq!(prometheus_metrics.success_rate, 50.0);
|
||||
assert_eq!(prometheus_metrics.total_files_processed, 10);
|
||||
assert_eq!(prometheus_metrics.total_bytes_processed, 1024000);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_request_recording() {
|
||||
let metrics = WebDAVMetrics::new();
|
||||
|
||||
// Record successful requests
|
||||
metrics.record_request(true, 100).await;
|
||||
metrics.record_request(true, 200).await;
|
||||
|
||||
// Record failed request
|
||||
metrics.record_request(false, 5000).await;
|
||||
|
||||
let prometheus_metrics = metrics.get_prometheus_metrics().await;
|
||||
|
||||
assert_eq!(prometheus_metrics.total_http_requests, 3);
|
||||
assert!((prometheus_metrics.request_success_rate - 66.67).abs() < 0.1);
|
||||
assert!((prometheus_metrics.avg_request_duration_ms - 1766.67).abs() < 0.1);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_caching() {
|
||||
let metrics = WebDAVMetrics::new();
|
||||
|
||||
metrics.record_session(true, 1000, 5, 512000).await;
|
||||
|
||||
// First call should calculate
|
||||
let start = std::time::Instant::now();
|
||||
let metrics1 = metrics.get_prometheus_metrics().await;
|
||||
let first_duration = start.elapsed();
|
||||
|
||||
// Second call should use cache
|
||||
let start = std::time::Instant::now();
|
||||
let metrics2 = metrics.get_prometheus_metrics().await;
|
||||
let second_duration = start.elapsed();
|
||||
|
||||
// Results should be identical (from cache)
|
||||
assert_eq!(metrics1.total_sessions, metrics2.total_sessions);
|
||||
assert_eq!(metrics1.success_rate, metrics2.success_rate);
|
||||
|
||||
// Second call should be faster (cached)
|
||||
assert!(second_duration < first_duration);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_circular_buffer() {
|
||||
let mut buffer = CircularBuffer::new(3);
|
||||
|
||||
buffer.push(SessionEvent {
|
||||
timestamp: 100,
|
||||
success: true,
|
||||
duration_ms: 1000,
|
||||
files_count: 1,
|
||||
bytes_count: 100,
|
||||
});
|
||||
|
||||
buffer.push(SessionEvent {
|
||||
timestamp: 200,
|
||||
success: false,
|
||||
duration_ms: 2000,
|
||||
files_count: 0,
|
||||
bytes_count: 0,
|
||||
});
|
||||
|
||||
// Should have 2 items
|
||||
let recent: Vec<_> = buffer.iter_recent(50).collect();
|
||||
assert_eq!(recent.len(), 2);
|
||||
|
||||
// Add more items than capacity
|
||||
buffer.push(SessionEvent {
|
||||
timestamp: 300,
|
||||
success: true,
|
||||
duration_ms: 3000,
|
||||
files_count: 2,
|
||||
bytes_count: 200,
|
||||
});
|
||||
|
||||
buffer.push(SessionEvent {
|
||||
timestamp: 400,
|
||||
success: true,
|
||||
duration_ms: 4000,
|
||||
files_count: 3,
|
||||
bytes_count: 300,
|
||||
});
|
||||
|
||||
// Should still have only 3 items (capacity limit)
|
||||
let recent: Vec<_> = buffer.iter_recent(50).collect();
|
||||
assert_eq!(recent.len(), 3);
|
||||
|
||||
// Should not include the first item (timestamp 100) as it was overwritten
|
||||
assert!(recent.iter().all(|e| e.timestamp >= 200));
|
||||
}
|
||||
}
|
||||
@@ -1,821 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::collections::{HashMap, VecDeque};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
use reqwest::header::HeaderMap;
|
||||
|
||||
use crate::db::Database;
|
||||
use crate::models::webdav_metrics::*;
|
||||
use crate::services::webdav::build_user_agent;
|
||||
|
||||
/// Maximum number of response times to keep in memory to prevent unbounded growth
|
||||
const MAX_RESPONSE_TIMES: usize = 1000;
|
||||
|
||||
/// Duration after which inactive sessions are considered stale and cleaned up
|
||||
const SESSION_TIMEOUT_MINUTES: u64 = 60;
|
||||
|
||||
/// Duration after which inactive directory scans are considered stale and cleaned up
|
||||
const DIRECTORY_TIMEOUT_MINUTES: u64 = 30;
|
||||
|
||||
/// WebDAV metrics collector that tracks performance and operations
|
||||
///
|
||||
/// This service collects detailed metrics about WebDAV sync operations including:
|
||||
/// - Overall sync session metrics (files processed, time taken, etc.)
|
||||
/// - Per-directory scan metrics (discovery time, file counts, errors)
|
||||
/// - Individual HTTP request metrics (response times, success/failure rates)
|
||||
///
|
||||
/// The metrics are stored in the database for analysis and can be used to:
|
||||
/// - Identify performance bottlenecks
|
||||
/// - Track sync operation success rates
|
||||
/// - Analyze network performance patterns
|
||||
/// - Generate insights for optimization
|
||||
#[derive(Clone)]
|
||||
pub struct WebDAVMetricsTracker {
|
||||
db: Database,
|
||||
/// Active sessions being tracked
|
||||
active_sessions: Arc<RwLock<HashMap<Uuid, ActiveSession>>>,
|
||||
/// Active directory scans being tracked
|
||||
active_directories: Arc<RwLock<HashMap<Uuid, ActiveDirectoryScan>>>,
|
||||
}
|
||||
|
||||
/// Represents an active sync session being tracked
|
||||
struct ActiveSession {
|
||||
session_id: Uuid,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
started_at: Instant,
|
||||
last_activity: Instant,
|
||||
counters: SessionCounters,
|
||||
}
|
||||
|
||||
/// Session-level counters that are updated during the sync
|
||||
#[derive(Default)]
|
||||
struct SessionCounters {
|
||||
directories_discovered: i32,
|
||||
directories_processed: i32,
|
||||
files_discovered: i32,
|
||||
files_processed: i32,
|
||||
total_bytes_discovered: i64,
|
||||
total_bytes_processed: i64,
|
||||
directories_skipped: i32,
|
||||
files_skipped: i32,
|
||||
skip_reasons: HashMap<String, i32>,
|
||||
}
|
||||
|
||||
/// Represents an active directory scan being tracked
|
||||
struct ActiveDirectoryScan {
|
||||
metric_id: Uuid,
|
||||
session_id: Uuid,
|
||||
directory_path: String,
|
||||
started_at: Instant,
|
||||
last_activity: Instant,
|
||||
counters: DirectoryCounters,
|
||||
}
|
||||
|
||||
/// Directory-level counters
|
||||
#[derive(Default)]
|
||||
struct DirectoryCounters {
|
||||
files_found: i32,
|
||||
subdirectories_found: i32,
|
||||
total_size_bytes: i64,
|
||||
files_processed: i32,
|
||||
files_skipped: i32,
|
||||
files_failed: i32,
|
||||
http_requests_made: i32,
|
||||
propfind_requests: i32,
|
||||
get_requests: i32,
|
||||
errors_encountered: i32,
|
||||
error_types: Vec<String>,
|
||||
warnings_count: i32,
|
||||
response_times: VecDeque<i64>, // Use VecDeque for O(1) front removal
|
||||
etag_matches: i32,
|
||||
etag_mismatches: i32,
|
||||
cache_hits: i32,
|
||||
cache_misses: i32,
|
||||
}
|
||||
|
||||
impl WebDAVMetricsTracker {
|
||||
/// Create a new WebDAV metrics tracker
|
||||
pub fn new(db: Database) -> Self {
|
||||
Self {
|
||||
db,
|
||||
active_sessions: Arc::new(RwLock::new(HashMap::new())),
|
||||
active_directories: Arc::new(RwLock::new(HashMap::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start tracking a new WebDAV sync session
|
||||
pub async fn start_session(
|
||||
&self,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
sync_type: String,
|
||||
root_path: String,
|
||||
max_depth: Option<i32>,
|
||||
) -> Result<Uuid> {
|
||||
let create_session = CreateWebDAVSyncSession {
|
||||
user_id,
|
||||
source_id,
|
||||
sync_type,
|
||||
root_path,
|
||||
max_depth,
|
||||
};
|
||||
|
||||
let session_id = self.db.create_webdav_sync_session(&create_session).await?;
|
||||
|
||||
let now = Instant::now();
|
||||
let active_session = ActiveSession {
|
||||
session_id,
|
||||
user_id,
|
||||
source_id,
|
||||
started_at: now,
|
||||
last_activity: now,
|
||||
counters: SessionCounters::default(),
|
||||
};
|
||||
|
||||
self.active_sessions.write().await.insert(session_id, active_session);
|
||||
|
||||
info!(
|
||||
"Started WebDAV metrics tracking for session {} (user: {}, source: {:?})",
|
||||
session_id, user_id, source_id
|
||||
);
|
||||
|
||||
Ok(session_id)
|
||||
}
|
||||
|
||||
/// Update session counters
|
||||
pub async fn update_session_counters(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
directories_discovered_delta: i32,
|
||||
directories_processed_delta: i32,
|
||||
files_discovered_delta: i32,
|
||||
files_processed_delta: i32,
|
||||
bytes_discovered_delta: i64,
|
||||
bytes_processed_delta: i64,
|
||||
) -> Result<()> {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
if let Some(session) = sessions.get_mut(&session_id) {
|
||||
session.last_activity = Instant::now();
|
||||
session.counters.directories_discovered += directories_discovered_delta;
|
||||
session.counters.directories_processed += directories_processed_delta;
|
||||
session.counters.files_discovered += files_discovered_delta;
|
||||
session.counters.files_processed += files_processed_delta;
|
||||
session.counters.total_bytes_discovered += bytes_discovered_delta;
|
||||
session.counters.total_bytes_processed += bytes_processed_delta;
|
||||
|
||||
debug!(
|
||||
"Updated session {} counters: +{} dirs, +{} files, +{} bytes",
|
||||
session_id, directories_processed_delta, files_processed_delta, bytes_processed_delta
|
||||
);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record skipped items with reasons
|
||||
pub async fn record_skipped_items(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
directories_skipped: i32,
|
||||
files_skipped: i32,
|
||||
skip_reason: &str,
|
||||
) -> Result<()> {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
if let Some(session) = sessions.get_mut(&session_id) {
|
||||
session.last_activity = Instant::now();
|
||||
session.counters.directories_skipped += directories_skipped;
|
||||
session.counters.files_skipped += files_skipped;
|
||||
*session.counters.skip_reasons.entry(skip_reason.to_string()).or_insert(0) +=
|
||||
directories_skipped + files_skipped;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finish a sync session and calculate final metrics
|
||||
pub async fn finish_session(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
final_status: WebDAVSyncStatus,
|
||||
error_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
let session = {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
sessions.remove(&session_id)
|
||||
};
|
||||
|
||||
if let Some(session) = session {
|
||||
// Convert skip_reasons to JSON
|
||||
let skip_reasons_json = if session.counters.skip_reasons.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(serde_json::to_value(&session.counters.skip_reasons)?)
|
||||
};
|
||||
|
||||
let update = UpdateWebDAVSyncSession {
|
||||
directories_discovered: Some(session.counters.directories_discovered),
|
||||
directories_processed: Some(session.counters.directories_processed),
|
||||
files_discovered: Some(session.counters.files_discovered),
|
||||
files_processed: Some(session.counters.files_processed),
|
||||
total_bytes_discovered: Some(session.counters.total_bytes_discovered),
|
||||
total_bytes_processed: Some(session.counters.total_bytes_processed),
|
||||
directories_skipped: Some(session.counters.directories_skipped),
|
||||
files_skipped: Some(session.counters.files_skipped),
|
||||
skip_reasons: skip_reasons_json,
|
||||
status: Some(final_status),
|
||||
final_error_message: error_message,
|
||||
};
|
||||
|
||||
self.db.update_webdav_sync_session(session_id, &update).await?;
|
||||
|
||||
// Small delay to ensure all previous HTTP request inserts are committed
|
||||
// This addresses a transaction isolation issue where the finalize function
|
||||
// can't see the requests that were just inserted
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
|
||||
self.db.finalize_webdav_sync_session(session_id).await?;
|
||||
|
||||
info!(
|
||||
"Finished WebDAV session {} - processed {} files ({} bytes) in {} directories",
|
||||
session_id,
|
||||
session.counters.files_processed,
|
||||
session.counters.total_bytes_processed,
|
||||
session.counters.directories_processed
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start tracking a directory scan
|
||||
pub async fn start_directory_scan(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
directory_path: String,
|
||||
directory_depth: i32,
|
||||
parent_directory_path: Option<String>,
|
||||
) -> Result<Uuid> {
|
||||
let create_metric = CreateWebDAVDirectoryMetric {
|
||||
session_id,
|
||||
user_id,
|
||||
source_id,
|
||||
directory_path: directory_path.clone(),
|
||||
directory_depth,
|
||||
parent_directory_path,
|
||||
};
|
||||
|
||||
let metric_id = self.db.create_webdav_directory_metric(&create_metric).await?;
|
||||
|
||||
let now = Instant::now();
|
||||
let active_scan = ActiveDirectoryScan {
|
||||
metric_id,
|
||||
session_id,
|
||||
directory_path: directory_path.clone(),
|
||||
started_at: now,
|
||||
last_activity: now,
|
||||
counters: DirectoryCounters::default(),
|
||||
};
|
||||
|
||||
self.active_directories.write().await.insert(metric_id, active_scan);
|
||||
|
||||
debug!(
|
||||
"Started directory scan tracking for '{}' (metric: {}, session: {})",
|
||||
directory_path, metric_id, session_id
|
||||
);
|
||||
|
||||
Ok(metric_id)
|
||||
}
|
||||
|
||||
/// Update directory scan counters
|
||||
pub async fn update_directory_counters(
|
||||
&self,
|
||||
metric_id: Uuid,
|
||||
files_found_delta: i32,
|
||||
subdirectories_found_delta: i32,
|
||||
size_bytes_delta: i64,
|
||||
files_processed_delta: i32,
|
||||
files_skipped_delta: i32,
|
||||
files_failed_delta: i32,
|
||||
) -> Result<()> {
|
||||
let mut directories = self.active_directories.write().await;
|
||||
if let Some(scan) = directories.get_mut(&metric_id) {
|
||||
scan.last_activity = Instant::now();
|
||||
scan.counters.files_found += files_found_delta;
|
||||
scan.counters.subdirectories_found += subdirectories_found_delta;
|
||||
scan.counters.total_size_bytes += size_bytes_delta;
|
||||
scan.counters.files_processed += files_processed_delta;
|
||||
scan.counters.files_skipped += files_skipped_delta;
|
||||
scan.counters.files_failed += files_failed_delta;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record directory scan error
|
||||
pub async fn record_directory_error(
|
||||
&self,
|
||||
metric_id: Uuid,
|
||||
error_type: &str,
|
||||
is_warning: bool,
|
||||
) -> Result<()> {
|
||||
let mut directories = self.active_directories.write().await;
|
||||
if let Some(scan) = directories.get_mut(&metric_id) {
|
||||
scan.last_activity = Instant::now();
|
||||
if is_warning {
|
||||
scan.counters.warnings_count += 1;
|
||||
} else {
|
||||
scan.counters.errors_encountered += 1;
|
||||
scan.counters.error_types.push(error_type.to_string());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record ETag comparison result
|
||||
pub async fn record_etag_result(
|
||||
&self,
|
||||
metric_id: Uuid,
|
||||
etag_matched: bool,
|
||||
cache_hit: bool,
|
||||
) -> Result<()> {
|
||||
let mut directories = self.active_directories.write().await;
|
||||
if let Some(scan) = directories.get_mut(&metric_id) {
|
||||
scan.last_activity = Instant::now();
|
||||
if etag_matched {
|
||||
scan.counters.etag_matches += 1;
|
||||
} else {
|
||||
scan.counters.etag_mismatches += 1;
|
||||
}
|
||||
|
||||
if cache_hit {
|
||||
scan.counters.cache_hits += 1;
|
||||
} else {
|
||||
scan.counters.cache_misses += 1;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Finish a directory scan
|
||||
pub async fn finish_directory_scan(
|
||||
&self,
|
||||
metric_id: Uuid,
|
||||
status: &str,
|
||||
skip_reason: Option<String>,
|
||||
error_message: Option<String>,
|
||||
) -> Result<()> {
|
||||
let scan = {
|
||||
let mut directories = self.active_directories.write().await;
|
||||
directories.remove(&metric_id)
|
||||
};
|
||||
|
||||
if let Some(scan) = scan {
|
||||
// Convert error types to JSON
|
||||
let error_types_json = if scan.counters.error_types.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(serde_json::to_value(&scan.counters.error_types)?)
|
||||
};
|
||||
|
||||
let update = UpdateWebDAVDirectoryMetric {
|
||||
files_found: Some(scan.counters.files_found),
|
||||
subdirectories_found: Some(scan.counters.subdirectories_found),
|
||||
total_size_bytes: Some(scan.counters.total_size_bytes),
|
||||
files_processed: Some(scan.counters.files_processed),
|
||||
files_skipped: Some(scan.counters.files_skipped),
|
||||
files_failed: Some(scan.counters.files_failed),
|
||||
http_requests_made: Some(scan.counters.http_requests_made),
|
||||
propfind_requests: Some(scan.counters.propfind_requests),
|
||||
get_requests: Some(scan.counters.get_requests),
|
||||
errors_encountered: Some(scan.counters.errors_encountered),
|
||||
error_types: error_types_json,
|
||||
warnings_count: Some(scan.counters.warnings_count),
|
||||
etag_matches: Some(scan.counters.etag_matches),
|
||||
etag_mismatches: Some(scan.counters.etag_mismatches),
|
||||
cache_hits: Some(scan.counters.cache_hits),
|
||||
cache_misses: Some(scan.counters.cache_misses),
|
||||
status: Some(status.to_string()),
|
||||
skip_reason,
|
||||
error_message,
|
||||
};
|
||||
|
||||
self.db.update_webdav_directory_metric(metric_id, &update).await?;
|
||||
|
||||
debug!(
|
||||
"Finished directory scan '{}' - found {} files, processed {} files, {} errors",
|
||||
scan.directory_path,
|
||||
scan.counters.files_found,
|
||||
scan.counters.files_processed,
|
||||
scan.counters.errors_encountered
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Record an HTTP request metric
|
||||
pub async fn record_http_request(
|
||||
&self,
|
||||
session_id: Option<Uuid>,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
request_type: WebDAVRequestType,
|
||||
operation_type: WebDAVOperationType,
|
||||
target_path: String,
|
||||
duration: Duration,
|
||||
request_size_bytes: Option<i64>,
|
||||
response_size_bytes: Option<i64>,
|
||||
http_status_code: Option<i32>,
|
||||
success: bool,
|
||||
retry_attempt: i32,
|
||||
error_type: Option<String>,
|
||||
error_message: Option<String>,
|
||||
server_headers: Option<&HeaderMap>,
|
||||
remote_ip: Option<String>,
|
||||
) -> Result<Uuid> {
|
||||
// Extract server information from headers
|
||||
let server_header = server_headers
|
||||
.and_then(|h| h.get("server"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let dav_header = server_headers
|
||||
.and_then(|h| h.get("dav"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let etag_value = server_headers
|
||||
.and_then(|h| h.get("etag"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let content_type = server_headers
|
||||
.and_then(|h| h.get("content-type"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|s| s.to_string());
|
||||
|
||||
let last_modified = server_headers
|
||||
.and_then(|h| h.get("last-modified"))
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| chrono::DateTime::parse_from_rfc2822(s).ok())
|
||||
.map(|dt| dt.with_timezone(&chrono::Utc));
|
||||
|
||||
let metric = CreateWebDAVRequestMetric {
|
||||
session_id,
|
||||
directory_metric_id,
|
||||
user_id,
|
||||
source_id,
|
||||
request_type,
|
||||
operation_type,
|
||||
target_path: target_path.clone(),
|
||||
duration_ms: duration.as_millis() as i64,
|
||||
request_size_bytes,
|
||||
response_size_bytes,
|
||||
http_status_code,
|
||||
dns_lookup_ms: None, // Could be enhanced with detailed timing
|
||||
tcp_connect_ms: None, // Could be enhanced with detailed timing
|
||||
tls_handshake_ms: None, // Could be enhanced with detailed timing
|
||||
time_to_first_byte_ms: None, // Could be enhanced with detailed timing
|
||||
success,
|
||||
retry_attempt,
|
||||
error_type: error_type.clone(),
|
||||
error_message,
|
||||
server_header,
|
||||
dav_header,
|
||||
etag_value,
|
||||
last_modified,
|
||||
content_type,
|
||||
remote_ip,
|
||||
user_agent: Some(build_user_agent()),
|
||||
};
|
||||
|
||||
tracing::debug!("Recording request with session_id: {:?}", session_id);
|
||||
let request_id = self.db.record_webdav_request_metric(&metric).await?;
|
||||
|
||||
// Update active directory counters if applicable
|
||||
if let Some(dir_metric_id) = directory_metric_id {
|
||||
let mut directories = self.active_directories.write().await;
|
||||
if let Some(scan) = directories.get_mut(&dir_metric_id) {
|
||||
scan.last_activity = Instant::now();
|
||||
scan.counters.http_requests_made += 1;
|
||||
|
||||
// Implement bounded circular buffer for response times using VecDeque for O(1) operations
|
||||
scan.counters.response_times.push_back(duration.as_millis() as i64);
|
||||
if scan.counters.response_times.len() > MAX_RESPONSE_TIMES {
|
||||
scan.counters.response_times.pop_front(); // O(1) removal of oldest entry
|
||||
}
|
||||
|
||||
match request_type {
|
||||
WebDAVRequestType::PropFind => scan.counters.propfind_requests += 1,
|
||||
WebDAVRequestType::Get => scan.counters.get_requests += 1,
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if !success {
|
||||
scan.counters.errors_encountered += 1;
|
||||
if let Some(err_type) = &error_type {
|
||||
scan.counters.error_types.push(err_type.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Recorded HTTP request: {} {} -> {} ({}ms, success: {})",
|
||||
request_type, target_path,
|
||||
http_status_code.map(|c| c.to_string()).unwrap_or_else(|| "N/A".to_string()),
|
||||
duration.as_millis(), success
|
||||
);
|
||||
|
||||
Ok(request_id)
|
||||
}
|
||||
|
||||
/// Get metrics summary for a user or source
|
||||
pub async fn get_metrics_summary(
|
||||
&self,
|
||||
query: &WebDAVMetricsQuery,
|
||||
) -> Result<Option<WebDAVMetricsSummary>> {
|
||||
self.db.get_webdav_metrics_summary(query).await
|
||||
}
|
||||
|
||||
/// Get performance insights for a session
|
||||
pub async fn get_performance_insights(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid,
|
||||
) -> Result<Option<WebDAVPerformanceInsights>> {
|
||||
self.db.get_webdav_performance_insights(session_id, user_id).await
|
||||
}
|
||||
|
||||
/// List recent sessions for a user
|
||||
pub async fn list_sessions(
|
||||
&self,
|
||||
query: &WebDAVMetricsQuery,
|
||||
) -> Result<Vec<WebDAVSyncSession>> {
|
||||
self.db.list_webdav_sync_sessions(query).await
|
||||
}
|
||||
|
||||
/// Get detailed session information
|
||||
pub async fn get_session_details(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid,
|
||||
) -> Result<Option<WebDAVSyncSession>> {
|
||||
self.db.get_webdav_sync_session(session_id, user_id).await
|
||||
}
|
||||
|
||||
/// Get directory metrics for a session
|
||||
pub async fn get_directory_metrics(
|
||||
&self,
|
||||
session_id: Uuid,
|
||||
user_id: Uuid,
|
||||
) -> Result<Vec<WebDAVDirectoryMetric>> {
|
||||
self.db.get_webdav_directory_metrics(session_id, user_id).await
|
||||
}
|
||||
|
||||
/// Get request metrics for analysis
|
||||
pub async fn get_request_metrics(
|
||||
&self,
|
||||
session_id: Option<Uuid>,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
limit: Option<i32>,
|
||||
) -> Result<Vec<WebDAVRequestMetric>> {
|
||||
self.db.get_webdav_request_metrics(session_id, directory_metric_id, user_id, limit).await
|
||||
}
|
||||
|
||||
/// Clean up old metrics (should be called periodically)
|
||||
pub async fn cleanup_old_metrics(&self, days_to_keep: i32) -> Result<u64> {
|
||||
self.db.cleanup_old_webdav_metrics(days_to_keep).await
|
||||
}
|
||||
|
||||
/// Utility method to record a simple operation timing
|
||||
pub async fn time_operation<T, F, Fut>(
|
||||
&self,
|
||||
session_id: Option<Uuid>,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
request_type: WebDAVRequestType,
|
||||
operation_type: WebDAVOperationType,
|
||||
target_path: String,
|
||||
operation: F,
|
||||
) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>,
|
||||
{
|
||||
let start_time = Instant::now();
|
||||
let result = operation().await;
|
||||
let duration = start_time.elapsed();
|
||||
|
||||
let (success, error_type, error_message) = match &result {
|
||||
Ok(_) => (true, None, None),
|
||||
Err(e) => (false, Some("operation_error".to_string()), Some(e.to_string())),
|
||||
};
|
||||
|
||||
// Record the request metric (ignore errors in metrics recording)
|
||||
let _ = self.record_http_request(
|
||||
session_id,
|
||||
directory_metric_id,
|
||||
user_id,
|
||||
source_id,
|
||||
request_type,
|
||||
operation_type,
|
||||
target_path,
|
||||
duration,
|
||||
None, // request_size_bytes
|
||||
None, // response_size_bytes
|
||||
None, // http_status_code
|
||||
success,
|
||||
0, // retry_attempt
|
||||
error_type,
|
||||
error_message,
|
||||
None, // server_headers
|
||||
None, // remote_ip
|
||||
).await;
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Clean up stale sessions and directories to prevent memory leaks
|
||||
/// This should be called periodically (e.g., every 15-30 minutes)
|
||||
pub async fn cleanup_stale_sessions(&self) -> Result<(usize, usize)> {
|
||||
let now = Instant::now();
|
||||
let session_timeout = Duration::from_secs(SESSION_TIMEOUT_MINUTES * 60);
|
||||
let directory_timeout = Duration::from_secs(DIRECTORY_TIMEOUT_MINUTES * 60);
|
||||
|
||||
let mut sessions_cleaned = 0;
|
||||
let mut directories_cleaned = 0;
|
||||
|
||||
// Cleanup stale sessions
|
||||
{
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
let stale_sessions: Vec<Uuid> = sessions
|
||||
.iter()
|
||||
.filter(|(_, session)| {
|
||||
now.duration_since(session.last_activity) > session_timeout
|
||||
})
|
||||
.map(|(session_id, _)| *session_id)
|
||||
.collect();
|
||||
|
||||
for session_id in &stale_sessions {
|
||||
if let Some(session) = sessions.remove(session_id) {
|
||||
sessions_cleaned += 1;
|
||||
warn!(
|
||||
"🧹 Cleaned up stale WebDAV session {} after {} minutes of inactivity",
|
||||
session_id,
|
||||
now.duration_since(session.last_activity).as_secs() / 60
|
||||
);
|
||||
|
||||
// Try to finalize the session in the database
|
||||
let _ = self.finish_session(
|
||||
*session_id,
|
||||
WebDAVSyncStatus::Failed,
|
||||
Some("Session timed out due to inactivity".to_string()),
|
||||
).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup stale directory scans
|
||||
{
|
||||
let mut directories = self.active_directories.write().await;
|
||||
let stale_directories: Vec<Uuid> = directories
|
||||
.iter()
|
||||
.filter(|(_, scan)| {
|
||||
now.duration_since(scan.last_activity) > directory_timeout
|
||||
})
|
||||
.map(|(metric_id, _)| *metric_id)
|
||||
.collect();
|
||||
|
||||
for metric_id in &stale_directories {
|
||||
if let Some(scan) = directories.remove(metric_id) {
|
||||
directories_cleaned += 1;
|
||||
warn!(
|
||||
"🧹 Cleaned up stale directory scan {} for path '{}' after {} minutes of inactivity",
|
||||
metric_id,
|
||||
scan.directory_path,
|
||||
now.duration_since(scan.last_activity).as_secs() / 60
|
||||
);
|
||||
|
||||
// Try to finalize the directory scan in the database
|
||||
let _ = self.finish_directory_scan(
|
||||
*metric_id,
|
||||
"timeout",
|
||||
Some("Scan timed out due to inactivity".to_string()),
|
||||
Some("Directory scan exceeded maximum time limit".to_string()),
|
||||
).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if sessions_cleaned > 0 || directories_cleaned > 0 {
|
||||
info!(
|
||||
"🧹 Cleanup completed: {} stale sessions and {} stale directory scans removed",
|
||||
sessions_cleaned, directories_cleaned
|
||||
);
|
||||
}
|
||||
|
||||
Ok((sessions_cleaned, directories_cleaned))
|
||||
}
|
||||
|
||||
/// Get the number of active sessions and directories currently being tracked
|
||||
pub async fn get_active_counts(&self) -> (usize, usize) {
|
||||
let sessions_count = self.active_sessions.read().await.len();
|
||||
let directories_count = self.active_directories.read().await.len();
|
||||
(sessions_count, directories_count)
|
||||
}
|
||||
|
||||
/// Manually cleanup all active sessions and directories (useful for testing)
|
||||
pub async fn cleanup_all(&self) -> Result<(usize, usize)> {
|
||||
// Cleanup all sessions
|
||||
let sessions_cleaned = {
|
||||
let mut sessions = self.active_sessions.write().await;
|
||||
let count = sessions.len();
|
||||
for (session_id, _) in sessions.drain() {
|
||||
let _ = self.finish_session(
|
||||
session_id,
|
||||
WebDAVSyncStatus::Failed,
|
||||
Some("Manually cleaned up".to_string()),
|
||||
).await;
|
||||
}
|
||||
count
|
||||
};
|
||||
|
||||
// Cleanup all directories
|
||||
let directories_cleaned = {
|
||||
let mut directories = self.active_directories.write().await;
|
||||
let count = directories.len();
|
||||
for (metric_id, _) in directories.drain() {
|
||||
let _ = self.finish_directory_scan(
|
||||
metric_id,
|
||||
"cleanup",
|
||||
Some("Manually cleaned up".to_string()),
|
||||
Some("Manual cleanup operation".to_string()),
|
||||
).await;
|
||||
}
|
||||
count
|
||||
};
|
||||
|
||||
info!(
|
||||
"🧹 Manual cleanup completed: {} sessions and {} directories removed",
|
||||
sessions_cleaned, directories_cleaned
|
||||
);
|
||||
|
||||
Ok((sessions_cleaned, directories_cleaned))
|
||||
}
|
||||
}
|
||||
|
||||
/// Extension trait to add metrics tracking to any operation
|
||||
pub trait WebDAVMetricsExt {
|
||||
async fn with_metrics<T, F, Fut>(
|
||||
self,
|
||||
tracker: &WebDAVMetricsTracker,
|
||||
session_id: Option<Uuid>,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
request_type: WebDAVRequestType,
|
||||
operation_type: WebDAVOperationType,
|
||||
target_path: String,
|
||||
operation: F,
|
||||
) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>;
|
||||
}
|
||||
|
||||
impl<S> WebDAVMetricsExt for S {
|
||||
async fn with_metrics<T, F, Fut>(
|
||||
self,
|
||||
tracker: &WebDAVMetricsTracker,
|
||||
session_id: Option<Uuid>,
|
||||
directory_metric_id: Option<Uuid>,
|
||||
user_id: Uuid,
|
||||
source_id: Option<Uuid>,
|
||||
request_type: WebDAVRequestType,
|
||||
operation_type: WebDAVOperationType,
|
||||
target_path: String,
|
||||
operation: F,
|
||||
) -> Result<T>
|
||||
where
|
||||
F: FnOnce() -> Fut,
|
||||
Fut: std::future::Future<Output = Result<T>>,
|
||||
{
|
||||
tracker.time_operation(
|
||||
session_id,
|
||||
directory_metric_id,
|
||||
user_id,
|
||||
source_id,
|
||||
request_type,
|
||||
operation_type,
|
||||
target_path,
|
||||
operation,
|
||||
).await
|
||||
}
|
||||
}
|
||||
@@ -363,6 +363,7 @@ pub async fn create_test_app_state_with_config(config: Config) -> Result<Arc<App
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
oidc_client: None,
|
||||
webdav_metrics_collector: None,
|
||||
sync_progress_tracker,
|
||||
user_watch_service: None,
|
||||
}))
|
||||
@@ -399,6 +400,7 @@ pub async fn create_test_app_state_with_options(options: TestAppStateOptions) ->
|
||||
webdav_scheduler: None,
|
||||
source_scheduler: None,
|
||||
queue_service,
|
||||
webdav_metrics_collector: None,
|
||||
oidc_client: None,
|
||||
sync_progress_tracker,
|
||||
user_watch_service,
|
||||
|
||||
@@ -307,6 +307,7 @@ impl TestContext {
|
||||
oidc_client: None,
|
||||
sync_progress_tracker: Arc::new(crate::services::sync_progress_tracker::SyncProgressTracker::new()),
|
||||
user_watch_service,
|
||||
webdav_metrics_collector: None,
|
||||
});
|
||||
|
||||
let app = Router::new()
|
||||
|
||||
@@ -54,7 +54,6 @@ mod tests {
|
||||
let _sources_router = crate::routes::sources::router();
|
||||
let _users_router = crate::routes::users::router();
|
||||
let _webdav_router = crate::routes::webdav::router();
|
||||
let _webdav_metrics_router = crate::routes::webdav_metrics::router();
|
||||
let _ignored_files_router = crate::routes::ignored_files::ignored_files_routes();
|
||||
}
|
||||
|
||||
|
||||
@@ -1,801 +0,0 @@
|
||||
use anyhow::Result;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use uuid::Uuid;
|
||||
|
||||
use readur::{
|
||||
db::Database,
|
||||
models::webdav_metrics::*,
|
||||
models::{CreateUser, UserRole},
|
||||
services::webdav_metrics_tracker::WebDAVMetricsTracker,
|
||||
test_helpers::create_test_app_state,
|
||||
};
|
||||
|
||||
/// Helper to create a test user using the proper models
|
||||
async fn create_test_user(db: &Database) -> Result<Uuid> {
|
||||
let user_suffix = Uuid::new_v4().simple().to_string();
|
||||
let create_user = CreateUser {
|
||||
username: format!("testuser_{}", user_suffix),
|
||||
email: format!("test_{}@example.com", user_suffix),
|
||||
password: "test_password".to_string(),
|
||||
role: Some(UserRole::User),
|
||||
};
|
||||
|
||||
let created_user = db.create_user(create_user).await?;
|
||||
Ok(created_user.id)
|
||||
}
|
||||
|
||||
/// Helper to create a test WebDAV source
|
||||
async fn create_test_source(db: &Database, user_id: Uuid) -> Result<Uuid> {
|
||||
let source_id = Uuid::new_v4();
|
||||
sqlx::query(
|
||||
"INSERT INTO sources (id, user_id, name, source_type, config, enabled, created_at, updated_at)
|
||||
VALUES ($1, $2, $3, 'webdav', $4, true, NOW(), NOW())"
|
||||
)
|
||||
.bind(source_id)
|
||||
.bind(user_id)
|
||||
.bind(format!("Test WebDAV Source {}", source_id))
|
||||
.bind(serde_json::json!({
|
||||
"server_url": "https://example.com/webdav",
|
||||
"username": "testuser",
|
||||
"password": "testpass",
|
||||
"watch_folders": ["/Documents"],
|
||||
"file_extensions": ["pdf", "txt", "doc", "docx"],
|
||||
"auto_sync": true,
|
||||
"sync_interval_minutes": 60
|
||||
}))
|
||||
.execute(&db.pool)
|
||||
.await?;
|
||||
|
||||
Ok(source_id)
|
||||
}
|
||||
|
||||
/// Test basic session creation and management
|
||||
#[tokio::test]
|
||||
async fn test_webdav_session_lifecycle() -> Result<()> {
|
||||
let app_state = create_test_app_state().await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
|
||||
let user_id = create_test_user(&app_state.db).await?;
|
||||
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
|
||||
|
||||
// Start a sync session
|
||||
let session_id = metrics_tracker
|
||||
.start_session(
|
||||
user_id,
|
||||
source_id,
|
||||
"full_sync".to_string(),
|
||||
"/Documents".to_string(),
|
||||
Some(10),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Update session counters
|
||||
metrics_tracker
|
||||
.update_session_counters(
|
||||
session_id,
|
||||
5, // directories_discovered
|
||||
3, // directories_processed
|
||||
20, // files_discovered
|
||||
15, // files_processed
|
||||
1024 * 1024, // bytes_discovered (1MB)
|
||||
512 * 1024, // bytes_processed (512KB)
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Record some skipped items
|
||||
metrics_tracker
|
||||
.record_skipped_items(
|
||||
session_id,
|
||||
1, // directories_skipped
|
||||
2, // files_skipped
|
||||
"permission_denied",
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Finish the session
|
||||
metrics_tracker
|
||||
.finish_session(
|
||||
session_id,
|
||||
WebDAVSyncStatus::Completed,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Verify session was recorded correctly
|
||||
let session = metrics_tracker
|
||||
.get_session_details(session_id, user_id)
|
||||
.await?
|
||||
.expect("Session should exist");
|
||||
|
||||
assert_eq!(session.user_id, user_id);
|
||||
assert_eq!(session.source_id, source_id);
|
||||
assert_eq!(session.sync_type, "full_sync");
|
||||
assert_eq!(session.root_path, "/Documents");
|
||||
assert_eq!(session.directories_discovered, 5);
|
||||
assert_eq!(session.directories_processed, 3);
|
||||
assert_eq!(session.files_discovered, 20);
|
||||
assert_eq!(session.files_processed, 15);
|
||||
assert_eq!(session.directories_skipped, 1);
|
||||
assert_eq!(session.files_skipped, 2);
|
||||
assert_eq!(session.status, "completed");
|
||||
assert!(session.duration_ms.is_some());
|
||||
|
||||
println!("✅ Session lifecycle test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test directory metrics tracking
|
||||
#[tokio::test]
|
||||
async fn test_directory_metrics_tracking() -> Result<()> {
|
||||
let app_state = create_test_app_state().await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
|
||||
let user_id = create_test_user(&app_state.db).await?;
|
||||
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
|
||||
|
||||
// Start session
|
||||
let session_id = metrics_tracker
|
||||
.start_session(
|
||||
user_id,
|
||||
source_id,
|
||||
"incremental_sync".to_string(),
|
||||
"/Photos".to_string(),
|
||||
Some(5),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Start directory scan
|
||||
let dir_metric_id = metrics_tracker
|
||||
.start_directory_scan(
|
||||
session_id,
|
||||
user_id,
|
||||
source_id,
|
||||
"/Photos/2023".to_string(),
|
||||
2,
|
||||
Some("/Photos".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Update directory counters
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
10, // files_found
|
||||
2, // subdirectories_found
|
||||
5 * 1024 * 1024, // size_bytes (5MB)
|
||||
8, // files_processed
|
||||
1, // files_skipped
|
||||
1, // files_failed
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Record some errors and warnings
|
||||
metrics_tracker
|
||||
.record_directory_error(dir_metric_id, "timeout", false)
|
||||
.await?;
|
||||
|
||||
metrics_tracker
|
||||
.record_directory_error(dir_metric_id, "large_file", true)
|
||||
.await?;
|
||||
|
||||
// Record ETag results
|
||||
metrics_tracker
|
||||
.record_etag_result(dir_metric_id, true, true)
|
||||
.await?;
|
||||
|
||||
metrics_tracker
|
||||
.record_etag_result(dir_metric_id, false, false)
|
||||
.await?;
|
||||
|
||||
// Finish directory scan
|
||||
metrics_tracker
|
||||
.finish_directory_scan(
|
||||
dir_metric_id,
|
||||
"completed",
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Finish session
|
||||
metrics_tracker
|
||||
.finish_session(session_id, WebDAVSyncStatus::Completed, None)
|
||||
.await?;
|
||||
|
||||
// Verify directory metrics
|
||||
let dir_metrics = metrics_tracker
|
||||
.get_directory_metrics(session_id, user_id)
|
||||
.await?;
|
||||
|
||||
assert_eq!(dir_metrics.len(), 1);
|
||||
let dir_metric = &dir_metrics[0];
|
||||
|
||||
assert_eq!(dir_metric.directory_path, "/Photos/2023");
|
||||
assert_eq!(dir_metric.directory_depth, 2);
|
||||
assert_eq!(dir_metric.files_found, 10);
|
||||
assert_eq!(dir_metric.subdirectories_found, 2);
|
||||
assert_eq!(dir_metric.files_processed, 8);
|
||||
assert_eq!(dir_metric.files_skipped, 1);
|
||||
assert_eq!(dir_metric.files_failed, 1);
|
||||
assert_eq!(dir_metric.errors_encountered, 1);
|
||||
assert_eq!(dir_metric.warnings_count, 1);
|
||||
assert_eq!(dir_metric.etag_matches, 1);
|
||||
assert_eq!(dir_metric.etag_mismatches, 1);
|
||||
assert_eq!(dir_metric.cache_hits, 1);
|
||||
assert_eq!(dir_metric.cache_misses, 1);
|
||||
assert!(dir_metric.scan_duration_ms.is_some());
|
||||
|
||||
println!("✅ Directory metrics test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test HTTP request metrics recording
|
||||
#[tokio::test]
|
||||
async fn test_http_request_metrics() -> Result<()> {
|
||||
let app_state = create_test_app_state().await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
|
||||
let user_id = create_test_user(&app_state.db).await?;
|
||||
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
|
||||
|
||||
// Start session and directory
|
||||
let session_id = metrics_tracker
|
||||
.start_session(user_id, source_id, "test_sync".to_string(), "/".to_string(), None)
|
||||
.await?;
|
||||
|
||||
let dir_metric_id = metrics_tracker
|
||||
.start_directory_scan(session_id, user_id, source_id, "/test".to_string(), 1, None)
|
||||
.await?;
|
||||
|
||||
// Record successful PROPFIND request
|
||||
let request_id_1 = metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
Some(dir_metric_id),
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::PropFind,
|
||||
WebDAVOperationType::Discovery,
|
||||
"/test".to_string(),
|
||||
Duration::from_millis(250),
|
||||
Some(512),
|
||||
Some(2048),
|
||||
Some(207), // Multi-Status
|
||||
true,
|
||||
0,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
Some("192.168.1.100".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Record failed GET request
|
||||
let request_id_2 = metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
Some(dir_metric_id),
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::Get,
|
||||
WebDAVOperationType::Download,
|
||||
"/test/file.pdf".to_string(),
|
||||
Duration::from_millis(5000),
|
||||
None,
|
||||
None,
|
||||
Some(404),
|
||||
false,
|
||||
1, // retry attempt
|
||||
Some("not_found".to_string()),
|
||||
Some("File not found".to_string()),
|
||||
None,
|
||||
Some("192.168.1.100".to_string()),
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Finish directory and session
|
||||
metrics_tracker
|
||||
.finish_directory_scan(dir_metric_id, "completed", None, None)
|
||||
.await?;
|
||||
|
||||
metrics_tracker
|
||||
.finish_session(session_id, WebDAVSyncStatus::Completed, None)
|
||||
.await?;
|
||||
|
||||
// Verify request metrics
|
||||
let request_metrics = metrics_tracker
|
||||
.get_request_metrics(Some(session_id), None, user_id, Some(10))
|
||||
.await?;
|
||||
|
||||
assert_eq!(request_metrics.len(), 2);
|
||||
|
||||
// Find the PROPFIND request
|
||||
let propfind_request = request_metrics
|
||||
.iter()
|
||||
.find(|r| r.request_type == "PROPFIND")
|
||||
.expect("Should find PROPFIND request");
|
||||
|
||||
assert_eq!(propfind_request.operation_type, "discovery");
|
||||
assert_eq!(propfind_request.target_path, "/test");
|
||||
assert_eq!(propfind_request.duration_ms, 250);
|
||||
assert_eq!(propfind_request.request_size_bytes, Some(512));
|
||||
assert_eq!(propfind_request.response_size_bytes, Some(2048));
|
||||
assert_eq!(propfind_request.http_status_code, Some(207));
|
||||
assert!(propfind_request.success);
|
||||
assert_eq!(propfind_request.retry_attempt, 0);
|
||||
|
||||
// Find the GET request
|
||||
let get_request = request_metrics
|
||||
.iter()
|
||||
.find(|r| r.request_type == "GET")
|
||||
.expect("Should find GET request");
|
||||
|
||||
assert_eq!(get_request.operation_type, "download");
|
||||
assert_eq!(get_request.target_path, "/test/file.pdf");
|
||||
assert_eq!(get_request.duration_ms, 5000);
|
||||
assert_eq!(get_request.http_status_code, Some(404));
|
||||
assert!(!get_request.success);
|
||||
assert_eq!(get_request.retry_attempt, 1);
|
||||
assert_eq!(get_request.error_type, Some("not_found".to_string()));
|
||||
|
||||
println!("✅ HTTP request metrics test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test metrics summary generation
|
||||
#[tokio::test]
|
||||
async fn test_metrics_summary() -> Result<()> {
|
||||
let app_state = create_test_app_state().await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
|
||||
let user_id = create_test_user(&app_state.db).await?;
|
||||
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
|
||||
|
||||
// Create multiple sessions with various outcomes
|
||||
for i in 0..3 {
|
||||
let session_id = metrics_tracker
|
||||
.start_session(
|
||||
user_id,
|
||||
source_id,
|
||||
format!("test_sync_{}", i),
|
||||
format!("/test_{}", i),
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Update counters
|
||||
metrics_tracker
|
||||
.update_session_counters(
|
||||
session_id,
|
||||
5, 5, 10, 10,
|
||||
1024 * (i + 1) as i64, // Different sizes for each
|
||||
512 * (i + 1) as i64,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Record some requests
|
||||
for j in 0..5 {
|
||||
let success = i != 2 || j < 3; // Make last session partially fail
|
||||
let status_code = if success { Some(200) } else { Some(500) };
|
||||
|
||||
metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
None,
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::Get,
|
||||
WebDAVOperationType::Download,
|
||||
format!("/test_{}/file_{}", i, j),
|
||||
Duration::from_millis(100 * (j + 1) as u64),
|
||||
None,
|
||||
Some(1024),
|
||||
status_code,
|
||||
success,
|
||||
0,
|
||||
if !success { Some("server_error".to_string()) } else { None },
|
||||
if !success { Some("Internal server error".to_string()) } else { None },
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
let status = if i == 2 { WebDAVSyncStatus::Failed } else { WebDAVSyncStatus::Completed };
|
||||
metrics_tracker
|
||||
.finish_session(session_id, status, None)
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Get metrics summary
|
||||
let query = WebDAVMetricsQuery {
|
||||
user_id: Some(user_id),
|
||||
source_id,
|
||||
start_time: Some(chrono::Utc::now() - chrono::Duration::hours(1)),
|
||||
end_time: Some(chrono::Utc::now()),
|
||||
limit: None,
|
||||
offset: None,
|
||||
};
|
||||
|
||||
let summary = metrics_tracker
|
||||
.get_metrics_summary(&query)
|
||||
.await?
|
||||
.expect("Should have summary data");
|
||||
|
||||
assert_eq!(summary.total_sessions, 3);
|
||||
assert_eq!(summary.successful_sessions, 2);
|
||||
assert_eq!(summary.failed_sessions, 1);
|
||||
assert_eq!(summary.total_files_processed, 30); // 10 files per session
|
||||
assert_eq!(summary.total_http_requests, 15); // 5 requests per session
|
||||
assert!(summary.request_success_rate > 0.0);
|
||||
assert!(summary.avg_request_duration_ms > 0.0);
|
||||
|
||||
println!("✅ Metrics summary test passed");
|
||||
println!("Summary: {} total sessions, {} successful, {} failed",
|
||||
summary.total_sessions, summary.successful_sessions, summary.failed_sessions);
|
||||
println!("Success rate: {:.1}%, Avg request time: {:.0}ms",
|
||||
summary.request_success_rate,
|
||||
summary.avg_request_duration_ms);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test performance insights generation
|
||||
#[tokio::test]
|
||||
async fn test_performance_insights() -> Result<()> {
|
||||
let app_state = create_test_app_state().await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
|
||||
let user_id = create_test_user(&app_state.db).await?;
|
||||
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
|
||||
|
||||
// Create a session with detailed metrics
|
||||
let session_id = metrics_tracker
|
||||
.start_session(user_id, source_id, "performance_test".to_string(), "/perf".to_string(), None)
|
||||
.await?;
|
||||
|
||||
// Create multiple directories with different performance characteristics
|
||||
let dir_paths = ["/perf/fast", "/perf/slow", "/perf/medium"];
|
||||
let scan_times = [100, 5000, 1000]; // milliseconds
|
||||
|
||||
for (path, scan_time) in dir_paths.iter().zip(scan_times.iter()) {
|
||||
let dir_metric_id = metrics_tracker
|
||||
.start_directory_scan(session_id, user_id, source_id, path.to_string(), 2, Some("/perf".to_string()))
|
||||
.await?;
|
||||
|
||||
// Simulate directory processing
|
||||
tokio::time::sleep(Duration::from_millis(*scan_time as u64 / 10)).await; // Reduce for test speed
|
||||
|
||||
metrics_tracker
|
||||
.update_directory_counters(dir_metric_id, 5, 1, 2048, 5, 0, 0)
|
||||
.await?;
|
||||
|
||||
// Record some requests for this directory
|
||||
for i in 0..3 {
|
||||
metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
Some(dir_metric_id),
|
||||
user_id,
|
||||
source_id,
|
||||
if i == 0 { WebDAVRequestType::PropFind } else { WebDAVRequestType::Get },
|
||||
if i == 0 { WebDAVOperationType::Discovery } else { WebDAVOperationType::Download },
|
||||
format!("{}/file_{}", path, i),
|
||||
Duration::from_millis(*scan_time as u64 / 3),
|
||||
None,
|
||||
Some(1024),
|
||||
Some(200),
|
||||
true,
|
||||
0,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
}
|
||||
|
||||
metrics_tracker
|
||||
.finish_directory_scan(dir_metric_id, "completed", None, None)
|
||||
.await?;
|
||||
}
|
||||
|
||||
metrics_tracker
|
||||
.finish_session(session_id, WebDAVSyncStatus::Completed, None)
|
||||
.await?;
|
||||
|
||||
// Get performance insights
|
||||
let insights = metrics_tracker
|
||||
.get_performance_insights(session_id, user_id)
|
||||
.await?
|
||||
.expect("Should have performance insights");
|
||||
|
||||
assert_eq!(insights.session_id, session_id);
|
||||
assert!(insights.avg_directory_scan_time_ms > 0.0);
|
||||
assert_eq!(insights.slowest_directories.len(), 3);
|
||||
|
||||
// Verify slowest directory is at the top
|
||||
let slowest = &insights.slowest_directories[0];
|
||||
assert_eq!(slowest.path, "/perf/slow");
|
||||
|
||||
// Verify request distribution
|
||||
assert_eq!(insights.request_distribution.total_count, 9); // 3 requests per directory
|
||||
assert_eq!(insights.request_distribution.propfind_count, 3); // 1 per directory
|
||||
assert_eq!(insights.request_distribution.get_count, 6); // 2 per directory
|
||||
|
||||
println!("✅ Performance insights test passed");
|
||||
println!("Avg directory scan time: {:.1}ms", insights.avg_directory_scan_time_ms);
|
||||
println!("Slowest directory: {} ({}ms)",
|
||||
slowest.path, slowest.scan_duration_ms);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Integration test demonstrating the complete metrics collection workflow
|
||||
#[tokio::test]
|
||||
async fn test_complete_metrics_workflow() -> Result<()> {
|
||||
let app_state = create_test_app_state().await
|
||||
.map_err(|e| anyhow::anyhow!("Failed to create test app state: {}", e))?;
|
||||
let user_id = create_test_user(&app_state.db).await?;
|
||||
let source_id = Some(create_test_source(&app_state.db, user_id).await?);
|
||||
|
||||
let metrics_tracker = WebDAVMetricsTracker::new(app_state.db.clone());
|
||||
|
||||
println!("🚀 Starting complete WebDAV metrics workflow test");
|
||||
|
||||
// Step 1: Start sync session
|
||||
let session_id = metrics_tracker
|
||||
.start_session(
|
||||
user_id,
|
||||
source_id,
|
||||
"complete_test".to_string(),
|
||||
"/Documents".to_string(),
|
||||
Some(10),
|
||||
)
|
||||
.await?;
|
||||
|
||||
println!("📊 Session {} started", session_id);
|
||||
|
||||
// Step 2: Simulate directory discovery and processing
|
||||
let directories = [
|
||||
("/Documents", 0),
|
||||
("/Documents/2023", 1),
|
||||
("/Documents/2023/Reports", 2),
|
||||
("/Documents/2024", 1),
|
||||
];
|
||||
|
||||
let mut total_files = 0;
|
||||
let mut total_bytes = 0i64;
|
||||
|
||||
for (dir_path, depth) in directories.iter() {
|
||||
let parent = if *depth == 0 {
|
||||
None
|
||||
} else {
|
||||
dir_path.rfind('/').map(|pos| dir_path[..pos].to_string())
|
||||
};
|
||||
|
||||
let dir_metric_id = metrics_tracker
|
||||
.start_directory_scan(
|
||||
session_id,
|
||||
user_id,
|
||||
source_id,
|
||||
dir_path.to_string(),
|
||||
*depth,
|
||||
parent,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Simulate discovery request
|
||||
let discovery_duration = Duration::from_millis(150 + *depth as u64 * 50);
|
||||
let files_in_dir = 3 + *depth;
|
||||
let bytes_in_dir = (files_in_dir as i64) * 1024 * 256; // 256KB per file
|
||||
|
||||
metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
Some(dir_metric_id),
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::PropFind,
|
||||
WebDAVOperationType::Discovery,
|
||||
dir_path.to_string(),
|
||||
discovery_duration,
|
||||
Some(512),
|
||||
Some(2048),
|
||||
Some(207),
|
||||
true,
|
||||
0,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Update directory counters with discovery results
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
files_in_dir,
|
||||
1, // subdirectories
|
||||
bytes_in_dir,
|
||||
0, // files_processed (will update later)
|
||||
0, // files_skipped
|
||||
0, // files_failed
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Simulate file downloads
|
||||
for file_idx in 0..files_in_dir {
|
||||
let file_path = format!("{}/file_{}.pdf", dir_path, file_idx);
|
||||
let download_duration = Duration::from_millis(200 + file_idx as u64 * 100);
|
||||
let file_size = 256 * 1024; // 256KB
|
||||
|
||||
let success = file_idx < files_in_dir - 1; // Last file fails
|
||||
let status_code = if success { Some(200) } else { Some(404) };
|
||||
|
||||
metrics_tracker
|
||||
.record_http_request(
|
||||
Some(session_id),
|
||||
Some(dir_metric_id),
|
||||
user_id,
|
||||
source_id,
|
||||
WebDAVRequestType::Get,
|
||||
WebDAVOperationType::Download,
|
||||
file_path,
|
||||
download_duration,
|
||||
None,
|
||||
if success { Some(file_size) } else { None },
|
||||
status_code,
|
||||
success,
|
||||
0,
|
||||
if !success { Some("not_found".to_string()) } else { None },
|
||||
if !success { Some("File not found".to_string()) } else { None },
|
||||
None,
|
||||
None,
|
||||
)
|
||||
.await?;
|
||||
|
||||
if success {
|
||||
// Update counters for successful download
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
0, 0, 0, // no change to discovery counts
|
||||
1, // files_processed
|
||||
0, // files_skipped
|
||||
0, // files_failed
|
||||
)
|
||||
.await?;
|
||||
|
||||
total_files += 1;
|
||||
total_bytes += file_size;
|
||||
} else {
|
||||
// Update counters for failed download
|
||||
metrics_tracker
|
||||
.update_directory_counters(
|
||||
dir_metric_id,
|
||||
0, 0, 0, // no change to discovery counts
|
||||
0, // files_processed
|
||||
0, // files_skipped
|
||||
1, // files_failed
|
||||
)
|
||||
.await?;
|
||||
|
||||
metrics_tracker
|
||||
.record_directory_error(dir_metric_id, "file_not_found", false)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Record ETag activity
|
||||
metrics_tracker
|
||||
.record_etag_result(dir_metric_id, true, true)
|
||||
.await?;
|
||||
|
||||
// Finish directory scan
|
||||
metrics_tracker
|
||||
.finish_directory_scan(dir_metric_id, "completed", None, None)
|
||||
.await?;
|
||||
|
||||
println!("📁 Processed directory {} with {} files", dir_path, files_in_dir);
|
||||
}
|
||||
|
||||
// Step 3: Update session with final counts
|
||||
metrics_tracker
|
||||
.update_session_counters(
|
||||
session_id,
|
||||
directories.len() as i32,
|
||||
directories.len() as i32,
|
||||
total_files,
|
||||
total_files - 4, // Subtract failed files
|
||||
total_bytes,
|
||||
total_bytes - (4 * 256 * 1024), // Subtract failed file bytes
|
||||
)
|
||||
.await?;
|
||||
|
||||
// Step 4: Finish session
|
||||
metrics_tracker
|
||||
.finish_session(session_id, WebDAVSyncStatus::Completed, None)
|
||||
.await?;
|
||||
|
||||
println!("✅ Session completed successfully");
|
||||
|
||||
// Step 5: Verify all metrics were recorded correctly
|
||||
|
||||
// Check session details
|
||||
let session = metrics_tracker
|
||||
.get_session_details(session_id, user_id)
|
||||
.await?
|
||||
.expect("Session should exist");
|
||||
|
||||
assert_eq!(session.status, "completed");
|
||||
assert!(session.duration_ms.is_some());
|
||||
assert!(session.total_http_requests > 0);
|
||||
assert!(session.successful_requests > 0);
|
||||
assert!(session.failed_requests > 0);
|
||||
|
||||
// Check directory metrics
|
||||
let dir_metrics = metrics_tracker
|
||||
.get_directory_metrics(session_id, user_id)
|
||||
.await?;
|
||||
|
||||
assert_eq!(dir_metrics.len(), directories.len());
|
||||
|
||||
// Check request metrics
|
||||
let request_metrics = metrics_tracker
|
||||
.get_request_metrics(Some(session_id), None, user_id, None)
|
||||
.await?;
|
||||
|
||||
assert!(request_metrics.len() > 0);
|
||||
let propfind_count = request_metrics.iter().filter(|r| r.request_type == "PROPFIND").count();
|
||||
let get_count = request_metrics.iter().filter(|r| r.request_type == "GET").count();
|
||||
assert_eq!(propfind_count, directories.len());
|
||||
assert!(get_count > 0);
|
||||
|
||||
// Check performance insights
|
||||
let insights = metrics_tracker
|
||||
.get_performance_insights(session_id, user_id)
|
||||
.await?
|
||||
.expect("Should have insights");
|
||||
|
||||
assert_eq!(insights.slowest_directories.len(), directories.len());
|
||||
assert!(insights.request_distribution.total_count > 0);
|
||||
assert!(insights.error_analysis.total_errors > 0);
|
||||
|
||||
// Check summary metrics
|
||||
let query = WebDAVMetricsQuery {
|
||||
user_id: Some(user_id),
|
||||
source_id,
|
||||
start_time: Some(chrono::Utc::now() - chrono::Duration::hours(1)),
|
||||
end_time: Some(chrono::Utc::now()),
|
||||
limit: None,
|
||||
offset: None,
|
||||
};
|
||||
|
||||
let summary = metrics_tracker
|
||||
.get_metrics_summary(&query)
|
||||
.await?
|
||||
.expect("Should have summary");
|
||||
|
||||
assert_eq!(summary.total_sessions, 1);
|
||||
assert_eq!(summary.successful_sessions, 1);
|
||||
|
||||
println!("📈 Metrics Summary:");
|
||||
println!(" - Sessions: {} total, {} successful", summary.total_sessions, summary.successful_sessions);
|
||||
println!(" - Files: {} processed", summary.total_files_processed);
|
||||
println!(" - Requests: {} total, {:.1}% success rate",
|
||||
summary.total_http_requests, summary.request_success_rate);
|
||||
println!(" - Performance: {:.0}ms avg request time",
|
||||
summary.avg_request_duration_ms);
|
||||
|
||||
println!("🎉 Complete metrics workflow test passed!");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Reference in New Issue
Block a user