mirror of
https://github.com/readur/readur.git
synced 2026-02-21 06:19:59 -06:00
677 lines
28 KiB
Rust
677 lines
28 KiB
Rust
use anyhow::Result;
|
|
use chrono::Utc;
|
|
use sqlx::Row;
|
|
use uuid::Uuid;
|
|
|
|
use crate::db::Database;
|
|
use crate::models::webdav_metrics::*;
|
|
|
|
impl Database {
|
|
/// Create a new WebDAV sync session
|
|
pub async fn create_webdav_sync_session(&self, session: &CreateWebDAVSyncSession) -> Result<Uuid> {
|
|
self.with_retry(|| async {
|
|
let row = sqlx::query(
|
|
r#"
|
|
INSERT INTO webdav_sync_sessions (
|
|
user_id, source_id, sync_type, root_path, max_depth
|
|
) VALUES ($1, $2, $3, $4, $5)
|
|
RETURNING id
|
|
"#
|
|
)
|
|
.bind(session.user_id)
|
|
.bind(session.source_id)
|
|
.bind(&session.sync_type)
|
|
.bind(&session.root_path)
|
|
.bind(session.max_depth)
|
|
.fetch_one(&self.pool)
|
|
.await?;
|
|
|
|
Ok(row.get::<Uuid, _>("id"))
|
|
}).await
|
|
}
|
|
|
|
/// Update a WebDAV sync session with new metrics
|
|
pub async fn update_webdav_sync_session(
|
|
&self,
|
|
session_id: Uuid,
|
|
update: &UpdateWebDAVSyncSession
|
|
) -> Result<bool> {
|
|
self.with_retry(|| async {
|
|
let rows_affected = sqlx::query(
|
|
r#"
|
|
UPDATE webdav_sync_sessions SET
|
|
directories_discovered = COALESCE($2, directories_discovered),
|
|
directories_processed = COALESCE($3, directories_processed),
|
|
files_discovered = COALESCE($4, files_discovered),
|
|
files_processed = COALESCE($5, files_processed),
|
|
total_bytes_discovered = COALESCE($6, total_bytes_discovered),
|
|
total_bytes_processed = COALESCE($7, total_bytes_processed),
|
|
directories_skipped = COALESCE($8, directories_skipped),
|
|
files_skipped = COALESCE($9, files_skipped),
|
|
skip_reasons = COALESCE($10, skip_reasons),
|
|
status = COALESCE($11, status),
|
|
final_error_message = COALESCE($12, final_error_message),
|
|
updated_at = NOW()
|
|
WHERE id = $1
|
|
"#
|
|
)
|
|
.bind(session_id)
|
|
.bind(update.directories_discovered)
|
|
.bind(update.directories_processed)
|
|
.bind(update.files_discovered)
|
|
.bind(update.files_processed)
|
|
.bind(update.total_bytes_discovered)
|
|
.bind(update.total_bytes_processed)
|
|
.bind(update.directories_skipped)
|
|
.bind(update.files_skipped)
|
|
.bind(&update.skip_reasons)
|
|
.bind(update.status.as_ref().map(|s| s.to_string()))
|
|
.bind(&update.final_error_message)
|
|
.execute(&self.pool)
|
|
.await?;
|
|
|
|
Ok(rows_affected.rows_affected() > 0)
|
|
}).await
|
|
}
|
|
|
|
/// Finalize a WebDAV sync session (calculate final metrics)
|
|
pub async fn finalize_webdav_sync_session(&self, session_id: Uuid) -> Result<()> {
|
|
self.with_retry(|| async {
|
|
// Debug: Check how many requests exist for this session before finalizing
|
|
let request_count: (i64,) = sqlx::query_as(
|
|
"SELECT COUNT(*) FROM webdav_request_metrics WHERE session_id = $1"
|
|
)
|
|
.bind(session_id)
|
|
.fetch_one(&self.pool)
|
|
.await?;
|
|
|
|
tracing::debug!("Finalizing session {}: found {} HTTP requests", session_id, request_count.0);
|
|
|
|
// Instead of using the PostgreSQL function, do the aggregation in Rust
|
|
// to avoid transaction isolation issues
|
|
let (successful_requests, failed_requests, total_requests, network_time_ms): (i64, i64, i64, i64) = sqlx::query_as(
|
|
r#"
|
|
SELECT
|
|
COUNT(*) FILTER (WHERE success = true),
|
|
COUNT(*) FILTER (WHERE success = false),
|
|
COUNT(*),
|
|
CAST(COALESCE(SUM(duration_ms), 0) AS BIGINT)
|
|
FROM webdav_request_metrics
|
|
WHERE session_id = $1
|
|
"#
|
|
)
|
|
.bind(session_id)
|
|
.fetch_one(&self.pool)
|
|
.await?;
|
|
|
|
tracing::debug!("Direct aggregation - total: {}, successful: {}, failed: {}", total_requests, successful_requests, failed_requests);
|
|
|
|
// Get the slowest operation
|
|
let slowest_operation: Option<(i64, String)> = sqlx::query_as(
|
|
"SELECT duration_ms, target_path FROM webdav_request_metrics WHERE session_id = $1 ORDER BY duration_ms DESC LIMIT 1"
|
|
)
|
|
.bind(session_id)
|
|
.fetch_optional(&self.pool)
|
|
.await?;
|
|
|
|
// Update the session directly with Rust-calculated values
|
|
sqlx::query(
|
|
r#"
|
|
UPDATE webdav_sync_sessions SET
|
|
completed_at = NOW(),
|
|
duration_ms = EXTRACT(EPOCH FROM (NOW() - started_at)) * 1000,
|
|
total_http_requests = $2,
|
|
successful_requests = $3,
|
|
failed_requests = $4,
|
|
retry_attempts = 0,
|
|
network_time_ms = $5,
|
|
slowest_operation_ms = $6,
|
|
slowest_operation_path = $7,
|
|
processing_rate_files_per_sec = CASE
|
|
WHEN files_processed > 0 AND EXTRACT(EPOCH FROM (NOW() - started_at)) > 0
|
|
THEN files_processed / EXTRACT(EPOCH FROM (NOW() - started_at))
|
|
ELSE 0
|
|
END,
|
|
avg_file_size_bytes = CASE
|
|
WHEN files_processed > 0
|
|
THEN total_bytes_processed / files_processed
|
|
ELSE 0
|
|
END,
|
|
status = CASE
|
|
WHEN status = 'in_progress' THEN 'completed'
|
|
ELSE status
|
|
END,
|
|
updated_at = NOW()
|
|
WHERE id = $1
|
|
"#
|
|
)
|
|
.bind(session_id)
|
|
.bind(total_requests as i32)
|
|
.bind(successful_requests as i32)
|
|
.bind(failed_requests as i32)
|
|
.bind(network_time_ms)
|
|
.bind(slowest_operation.as_ref().map(|(ms, _)| *ms))
|
|
.bind(slowest_operation.as_ref().map(|(_, path)| path.as_str()))
|
|
.execute(&self.pool)
|
|
.await?;
|
|
|
|
// Check the session after finalization
|
|
let session_after: (i32, i32, i32) = sqlx::query_as(
|
|
"SELECT total_http_requests, successful_requests, failed_requests FROM webdav_sync_sessions WHERE id = $1"
|
|
)
|
|
.bind(session_id)
|
|
.fetch_one(&self.pool)
|
|
.await?;
|
|
|
|
tracing::debug!("After finalization - total: {}, successful: {}, failed: {}", session_after.0, session_after.1, session_after.2);
|
|
|
|
Ok(())
|
|
}).await
|
|
}
|
|
|
|
/// Get a WebDAV sync session by ID
|
|
pub async fn get_webdav_sync_session(
|
|
&self,
|
|
session_id: Uuid,
|
|
user_id: Uuid
|
|
) -> Result<Option<WebDAVSyncSession>> {
|
|
self.with_retry(|| async {
|
|
let session = sqlx::query_as::<_, WebDAVSyncSession>(
|
|
"SELECT * FROM webdav_sync_sessions WHERE id = $1 AND user_id = $2"
|
|
)
|
|
.bind(session_id)
|
|
.bind(user_id)
|
|
.fetch_optional(&self.pool)
|
|
.await?;
|
|
|
|
Ok(session)
|
|
}).await
|
|
}
|
|
|
|
/// List WebDAV sync sessions with optional filtering
|
|
pub async fn list_webdav_sync_sessions(
|
|
&self,
|
|
query: &WebDAVMetricsQuery
|
|
) -> Result<Vec<WebDAVSyncSession>> {
|
|
self.with_retry(|| async {
|
|
let start_time = query.start_time.unwrap_or_else(|| Utc::now() - chrono::Duration::days(7));
|
|
let end_time = query.end_time.unwrap_or_else(|| Utc::now());
|
|
let limit = query.limit.unwrap_or(100).min(1000); // Cap at 1000
|
|
let offset = query.offset.unwrap_or(0);
|
|
|
|
let sessions = sqlx::query_as::<_, WebDAVSyncSession>(
|
|
r#"
|
|
SELECT * FROM webdav_sync_sessions
|
|
WHERE started_at BETWEEN $1 AND $2
|
|
AND ($3::UUID IS NULL OR user_id = $3)
|
|
AND ($4::UUID IS NULL OR source_id = $4)
|
|
ORDER BY started_at DESC
|
|
LIMIT $5 OFFSET $6
|
|
"#
|
|
)
|
|
.bind(start_time)
|
|
.bind(end_time)
|
|
.bind(query.user_id)
|
|
.bind(query.source_id)
|
|
.bind(limit as i64)
|
|
.bind(offset as i64)
|
|
.fetch_all(&self.pool)
|
|
.await?;
|
|
|
|
Ok(sessions)
|
|
}).await
|
|
}
|
|
|
|
/// Create a new WebDAV directory metric
|
|
pub async fn create_webdav_directory_metric(
|
|
&self,
|
|
metric: &CreateWebDAVDirectoryMetric
|
|
) -> Result<Uuid> {
|
|
self.with_retry(|| async {
|
|
let row = sqlx::query(
|
|
r#"
|
|
INSERT INTO webdav_directory_metrics (
|
|
session_id, user_id, source_id, directory_path,
|
|
directory_depth, parent_directory_path
|
|
) VALUES ($1, $2, $3, $4, $5, $6)
|
|
RETURNING id
|
|
"#
|
|
)
|
|
.bind(metric.session_id)
|
|
.bind(metric.user_id)
|
|
.bind(metric.source_id)
|
|
.bind(&metric.directory_path)
|
|
.bind(metric.directory_depth)
|
|
.bind(&metric.parent_directory_path)
|
|
.fetch_one(&self.pool)
|
|
.await?;
|
|
|
|
Ok(row.get::<Uuid, _>("id"))
|
|
}).await
|
|
}
|
|
|
|
/// Update a WebDAV directory metric
|
|
pub async fn update_webdav_directory_metric(
|
|
&self,
|
|
metric_id: Uuid,
|
|
update: &UpdateWebDAVDirectoryMetric
|
|
) -> Result<bool> {
|
|
self.with_retry(|| async {
|
|
let rows_affected = sqlx::query(
|
|
r#"
|
|
UPDATE webdav_directory_metrics SET
|
|
completed_at = CASE
|
|
WHEN completed_at IS NULL THEN NOW()
|
|
ELSE completed_at
|
|
END,
|
|
scan_duration_ms = CASE
|
|
WHEN completed_at IS NULL THEN EXTRACT(EPOCH FROM (NOW() - started_at)) * 1000
|
|
ELSE scan_duration_ms
|
|
END,
|
|
files_found = COALESCE($2, files_found),
|
|
subdirectories_found = COALESCE($3, subdirectories_found),
|
|
total_size_bytes = COALESCE($4, total_size_bytes),
|
|
files_processed = COALESCE($5, files_processed),
|
|
files_skipped = COALESCE($6, files_skipped),
|
|
files_failed = COALESCE($7, files_failed),
|
|
http_requests_made = COALESCE($8, http_requests_made),
|
|
propfind_requests = COALESCE($9, propfind_requests),
|
|
get_requests = COALESCE($10, get_requests),
|
|
errors_encountered = COALESCE($11, errors_encountered),
|
|
error_types = COALESCE($12, error_types),
|
|
warnings_count = COALESCE($13, warnings_count),
|
|
etag_matches = COALESCE($14, etag_matches),
|
|
etag_mismatches = COALESCE($15, etag_mismatches),
|
|
cache_hits = COALESCE($16, cache_hits),
|
|
cache_misses = COALESCE($17, cache_misses),
|
|
status = COALESCE($18, status),
|
|
skip_reason = COALESCE($19, skip_reason),
|
|
error_message = COALESCE($20, error_message)
|
|
WHERE id = $1
|
|
"#
|
|
)
|
|
.bind(metric_id)
|
|
.bind(update.files_found)
|
|
.bind(update.subdirectories_found)
|
|
.bind(update.total_size_bytes)
|
|
.bind(update.files_processed)
|
|
.bind(update.files_skipped)
|
|
.bind(update.files_failed)
|
|
.bind(update.http_requests_made)
|
|
.bind(update.propfind_requests)
|
|
.bind(update.get_requests)
|
|
.bind(update.errors_encountered)
|
|
.bind(&update.error_types)
|
|
.bind(update.warnings_count)
|
|
.bind(update.etag_matches)
|
|
.bind(update.etag_mismatches)
|
|
.bind(update.cache_hits)
|
|
.bind(update.cache_misses)
|
|
.bind(&update.status)
|
|
.bind(&update.skip_reason)
|
|
.bind(&update.error_message)
|
|
.execute(&self.pool)
|
|
.await?;
|
|
|
|
Ok(rows_affected.rows_affected() > 0)
|
|
}).await
|
|
}
|
|
|
|
/// Get directory metrics for a session
|
|
pub async fn get_webdav_directory_metrics(
|
|
&self,
|
|
session_id: Uuid,
|
|
user_id: Uuid
|
|
) -> Result<Vec<WebDAVDirectoryMetric>> {
|
|
self.with_retry(|| async {
|
|
let metrics = sqlx::query_as::<_, WebDAVDirectoryMetric>(
|
|
r#"
|
|
SELECT * FROM webdav_directory_metrics
|
|
WHERE session_id = $1 AND user_id = $2
|
|
ORDER BY started_at ASC
|
|
"#
|
|
)
|
|
.bind(session_id)
|
|
.bind(user_id)
|
|
.fetch_all(&self.pool)
|
|
.await?;
|
|
|
|
Ok(metrics)
|
|
}).await
|
|
}
|
|
|
|
/// Record a WebDAV HTTP request metric
|
|
pub async fn record_webdav_request_metric(
|
|
&self,
|
|
metric: &CreateWebDAVRequestMetric
|
|
) -> Result<Uuid> {
|
|
self.with_retry(|| async {
|
|
let row = sqlx::query(
|
|
r#"
|
|
INSERT INTO webdav_request_metrics (
|
|
session_id, directory_metric_id, user_id, source_id,
|
|
request_type, operation_type, target_path, duration_ms,
|
|
request_size_bytes, response_size_bytes, http_status_code,
|
|
dns_lookup_ms, tcp_connect_ms, tls_handshake_ms, time_to_first_byte_ms,
|
|
success, retry_attempt, error_type, error_message,
|
|
server_header, dav_header, etag_value, last_modified,
|
|
content_type, remote_ip, user_agent,
|
|
completed_at
|
|
) VALUES (
|
|
$1, $2, $3, $4, $5::webdav_request_type, $6::webdav_operation_type, $7, $8, $9, $10, $11, $12, $13, $14, $15,
|
|
$16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, NOW()
|
|
)
|
|
RETURNING id
|
|
"#
|
|
)
|
|
.bind(metric.session_id)
|
|
.bind(metric.directory_metric_id)
|
|
.bind(metric.user_id)
|
|
.bind(metric.source_id)
|
|
.bind(metric.request_type.to_string().as_str())
|
|
.bind(metric.operation_type.to_string().as_str())
|
|
.bind(&metric.target_path)
|
|
.bind(metric.duration_ms)
|
|
.bind(metric.request_size_bytes)
|
|
.bind(metric.response_size_bytes)
|
|
.bind(metric.http_status_code)
|
|
.bind(metric.dns_lookup_ms)
|
|
.bind(metric.tcp_connect_ms)
|
|
.bind(metric.tls_handshake_ms)
|
|
.bind(metric.time_to_first_byte_ms)
|
|
.bind(metric.success)
|
|
.bind(metric.retry_attempt)
|
|
.bind(&metric.error_type)
|
|
.bind(&metric.error_message)
|
|
.bind(&metric.server_header)
|
|
.bind(&metric.dav_header)
|
|
.bind(&metric.etag_value)
|
|
.bind(metric.last_modified)
|
|
.bind(&metric.content_type)
|
|
.bind(&metric.remote_ip)
|
|
.bind(&metric.user_agent)
|
|
.fetch_one(&self.pool)
|
|
.await?;
|
|
|
|
Ok(row.get::<Uuid, _>("id"))
|
|
}).await
|
|
}
|
|
|
|
/// Get request metrics for a session or directory
|
|
pub async fn get_webdav_request_metrics(
|
|
&self,
|
|
session_id: Option<Uuid>,
|
|
directory_metric_id: Option<Uuid>,
|
|
user_id: Uuid,
|
|
limit: Option<i32>
|
|
) -> Result<Vec<WebDAVRequestMetric>> {
|
|
self.with_retry(|| async {
|
|
let limit = limit.unwrap_or(1000).min(10000); // Cap at 10k
|
|
|
|
let metrics = sqlx::query_as::<_, WebDAVRequestMetric>(
|
|
r#"
|
|
SELECT
|
|
id, session_id, directory_metric_id, user_id, source_id,
|
|
request_type::TEXT as request_type,
|
|
operation_type::TEXT as operation_type,
|
|
target_path, started_at, completed_at, duration_ms,
|
|
request_size_bytes, response_size_bytes, http_status_code,
|
|
dns_lookup_ms, tcp_connect_ms, tls_handshake_ms, time_to_first_byte_ms,
|
|
success, retry_attempt, error_type, error_message,
|
|
server_header, dav_header, etag_value, last_modified,
|
|
content_type, remote_ip, user_agent
|
|
FROM webdav_request_metrics
|
|
WHERE user_id = $1
|
|
AND ($2::UUID IS NULL OR session_id = $2)
|
|
AND ($3::UUID IS NULL OR directory_metric_id = $3)
|
|
ORDER BY started_at DESC
|
|
LIMIT $4
|
|
"#
|
|
)
|
|
.bind(user_id)
|
|
.bind(session_id)
|
|
.bind(directory_metric_id)
|
|
.bind(limit as i64)
|
|
.fetch_all(&self.pool)
|
|
.await?;
|
|
|
|
Ok(metrics)
|
|
}).await
|
|
}
|
|
|
|
/// Get WebDAV metrics summary for a time period
|
|
pub async fn get_webdav_metrics_summary(
|
|
&self,
|
|
query: &WebDAVMetricsQuery
|
|
) -> Result<Option<WebDAVMetricsSummary>> {
|
|
self.with_retry(|| async {
|
|
let start_time = query.start_time.unwrap_or_else(|| Utc::now() - chrono::Duration::days(1));
|
|
let end_time = query.end_time.unwrap_or_else(|| Utc::now());
|
|
|
|
// First try to call the function directly and see what happens
|
|
let summary = match sqlx::query_as::<_, WebDAVMetricsSummary>(
|
|
r#"
|
|
SELECT
|
|
total_sessions,
|
|
successful_sessions,
|
|
failed_sessions,
|
|
total_files_processed,
|
|
total_bytes_processed,
|
|
avg_session_duration_sec,
|
|
avg_processing_rate,
|
|
total_http_requests,
|
|
request_success_rate,
|
|
avg_request_duration_ms,
|
|
common_error_types
|
|
FROM get_webdav_metrics_summary($1, $2, $3, $4)
|
|
"#
|
|
)
|
|
.bind(query.user_id)
|
|
.bind(query.source_id)
|
|
.bind(start_time)
|
|
.bind(end_time)
|
|
.fetch_optional(&self.pool)
|
|
.await {
|
|
Ok(result) => result,
|
|
Err(e) => {
|
|
tracing::error!("Failed to call get_webdav_metrics_summary function: {}", e);
|
|
// Fall back to manual query if function fails
|
|
sqlx::query_as::<_, WebDAVMetricsSummary>(
|
|
r#"
|
|
SELECT
|
|
COALESCE(COUNT(*)::INTEGER, 0) as total_sessions,
|
|
COALESCE(COUNT(*) FILTER (WHERE s.status = 'completed')::INTEGER, 0) as successful_sessions,
|
|
COALESCE(COUNT(*) FILTER (WHERE s.status = 'failed')::INTEGER, 0) as failed_sessions,
|
|
COALESCE(SUM(s.files_processed), 0)::BIGINT as total_files_processed,
|
|
COALESCE(SUM(s.total_bytes_processed), 0)::BIGINT as total_bytes_processed,
|
|
COALESCE(AVG(s.duration_ms / 1000.0), 0.0)::DOUBLE PRECISION as avg_session_duration_sec,
|
|
COALESCE(AVG(s.processing_rate_files_per_sec), 0.0)::DOUBLE PRECISION as avg_processing_rate,
|
|
COALESCE(SUM(s.total_http_requests), 0)::BIGINT as total_http_requests,
|
|
CASE
|
|
WHEN SUM(s.total_http_requests) > 0
|
|
THEN (SUM(s.successful_requests)::DOUBLE PRECISION / SUM(s.total_http_requests) * 100.0)
|
|
ELSE 0.0
|
|
END as request_success_rate,
|
|
COALESCE((SELECT AVG(duration_ms) FROM webdav_request_metrics r
|
|
WHERE r.started_at BETWEEN $3 AND $4
|
|
AND ($1 IS NULL OR r.user_id = $1)
|
|
AND ($2 IS NULL OR r.source_id = $2)), 0.0)::DOUBLE PRECISION as avg_request_duration_ms,
|
|
COALESCE((SELECT jsonb_agg(jsonb_build_object('error_type', error_type, 'count', error_count))
|
|
FROM (
|
|
SELECT error_type, COUNT(*) as error_count
|
|
FROM webdav_request_metrics r
|
|
WHERE r.started_at BETWEEN $3 AND $4
|
|
AND r.success = false
|
|
AND r.error_type IS NOT NULL
|
|
AND ($1 IS NULL OR r.user_id = $1)
|
|
AND ($2 IS NULL OR r.source_id = $2)
|
|
GROUP BY error_type
|
|
ORDER BY error_count DESC
|
|
LIMIT 10
|
|
) error_summary), '[]'::jsonb) as common_error_types
|
|
FROM webdav_sync_sessions s
|
|
WHERE s.started_at BETWEEN $3 AND $4
|
|
AND ($1 IS NULL OR s.user_id = $1)
|
|
AND ($2 IS NULL OR s.source_id = $2)
|
|
"#
|
|
)
|
|
.bind(query.user_id)
|
|
.bind(query.source_id)
|
|
.bind(start_time)
|
|
.bind(end_time)
|
|
.fetch_optional(&self.pool)
|
|
.await?
|
|
}
|
|
};
|
|
|
|
Ok(summary)
|
|
}).await
|
|
}
|
|
|
|
/// Get performance insights for a specific session
|
|
pub async fn get_webdav_performance_insights(
|
|
&self,
|
|
session_id: Uuid,
|
|
user_id: Uuid
|
|
) -> Result<Option<WebDAVPerformanceInsights>> {
|
|
self.with_retry(|| async {
|
|
// Get session info
|
|
let session = self.get_webdav_sync_session(session_id, user_id).await?;
|
|
if session.is_none() {
|
|
return Ok(None);
|
|
}
|
|
|
|
// Get directory metrics
|
|
let directory_metrics = self.get_webdav_directory_metrics(session_id, user_id).await?;
|
|
|
|
// Calculate average directory scan time
|
|
let avg_directory_scan_time_ms = if !directory_metrics.is_empty() {
|
|
directory_metrics.iter()
|
|
.filter_map(|d| d.scan_duration_ms)
|
|
.sum::<i64>() as f64 / directory_metrics.len() as f64
|
|
} else {
|
|
0.0
|
|
};
|
|
|
|
// Find slowest directories
|
|
let mut slowest_directories: Vec<SlowDirectoryInfo> = directory_metrics.iter()
|
|
.filter_map(|d| {
|
|
d.scan_duration_ms.map(|duration| SlowDirectoryInfo {
|
|
path: d.directory_path.clone(),
|
|
scan_duration_ms: duration,
|
|
files_count: d.files_found,
|
|
size_bytes: d.total_size_bytes,
|
|
error_count: d.errors_encountered,
|
|
})
|
|
})
|
|
.collect();
|
|
slowest_directories.sort_by(|a, b| b.scan_duration_ms.cmp(&a.scan_duration_ms));
|
|
slowest_directories.truncate(10); // Top 10
|
|
|
|
// Get request metrics for analysis
|
|
let request_metrics = self.get_webdav_request_metrics(
|
|
Some(session_id),
|
|
None,
|
|
user_id,
|
|
Some(10000)
|
|
).await?;
|
|
|
|
// Calculate request type distribution
|
|
let propfind_requests: Vec<_> = request_metrics.iter()
|
|
.filter(|r| r.request_type == "PROPFIND")
|
|
.collect();
|
|
let get_requests: Vec<_> = request_metrics.iter()
|
|
.filter(|r| r.request_type == "GET")
|
|
.collect();
|
|
|
|
let request_distribution = RequestTypeDistribution {
|
|
propfind_count: propfind_requests.len() as i32,
|
|
get_count: get_requests.len() as i32,
|
|
head_count: request_metrics.iter().filter(|r| r.request_type == "HEAD").count() as i32,
|
|
options_count: request_metrics.iter().filter(|r| r.request_type == "OPTIONS").count() as i32,
|
|
total_count: request_metrics.len() as i32,
|
|
avg_propfind_duration_ms: if !propfind_requests.is_empty() {
|
|
propfind_requests.iter().map(|r| r.duration_ms).sum::<i64>() as f64 / propfind_requests.len() as f64
|
|
} else { 0.0 },
|
|
avg_get_duration_ms: if !get_requests.is_empty() {
|
|
get_requests.iter().map(|r| r.duration_ms).sum::<i64>() as f64 / get_requests.len() as f64
|
|
} else { 0.0 },
|
|
};
|
|
|
|
// Analyze errors
|
|
let total_errors = request_metrics.iter().filter(|r| !r.success).count() as i32;
|
|
let network_errors = request_metrics.iter()
|
|
.filter(|r| !r.success && r.error_type.as_ref().map_or(false, |e| e.contains("network") || e.contains("timeout")))
|
|
.count() as i32;
|
|
let auth_errors = request_metrics.iter()
|
|
.filter(|r| !r.success && r.http_status_code.map_or(false, |s| s == 401 || s == 403))
|
|
.count() as i32;
|
|
let timeout_errors = request_metrics.iter()
|
|
.filter(|r| !r.success && r.error_type.as_ref().map_or(false, |e| e.contains("timeout")))
|
|
.count() as i32;
|
|
let server_errors = request_metrics.iter()
|
|
.filter(|r| !r.success && r.http_status_code.map_or(false, |s| s >= 500))
|
|
.count() as i32;
|
|
|
|
// Find most problematic paths
|
|
let mut path_errors: std::collections::HashMap<String, i32> = std::collections::HashMap::new();
|
|
for metric in &request_metrics {
|
|
if !metric.success {
|
|
*path_errors.entry(metric.target_path.clone()).or_insert(0) += 1;
|
|
}
|
|
}
|
|
let mut most_problematic_paths: Vec<_> = path_errors.into_iter().collect();
|
|
most_problematic_paths.sort_by(|a, b| b.1.cmp(&a.1));
|
|
let most_problematic_paths: Vec<String> = most_problematic_paths.into_iter()
|
|
.take(5)
|
|
.map(|(path, _)| path)
|
|
.collect();
|
|
|
|
let error_analysis = ErrorAnalysis {
|
|
total_errors,
|
|
network_errors,
|
|
auth_errors,
|
|
timeout_errors,
|
|
server_errors,
|
|
most_problematic_paths,
|
|
};
|
|
|
|
// Create simple performance trends (would be more sophisticated in production)
|
|
let performance_trends = PerformanceTrends {
|
|
requests_per_minute: vec![], // Would calculate from time-series data
|
|
avg_response_time_trend: vec![],
|
|
error_rate_trend: vec![],
|
|
throughput_mbps_trend: vec![],
|
|
};
|
|
|
|
Ok(Some(WebDAVPerformanceInsights {
|
|
session_id,
|
|
avg_directory_scan_time_ms,
|
|
slowest_directories,
|
|
request_distribution,
|
|
error_analysis,
|
|
performance_trends,
|
|
}))
|
|
}).await
|
|
}
|
|
|
|
/// Clean up old WebDAV metrics (for maintenance)
|
|
pub async fn cleanup_old_webdav_metrics(&self, days_to_keep: i32) -> Result<u64> {
|
|
self.with_retry(|| async {
|
|
let cutoff_date = Utc::now() - chrono::Duration::days(days_to_keep as i64);
|
|
|
|
let result = sqlx::query(
|
|
r#"
|
|
DELETE FROM webdav_sync_sessions
|
|
WHERE created_at < $1
|
|
AND status IN ('completed', 'failed', 'cancelled')
|
|
"#
|
|
)
|
|
.bind(cutoff_date)
|
|
.execute(&self.pool)
|
|
.await?;
|
|
|
|
Ok(result.rows_affected())
|
|
}).await
|
|
}
|
|
} |