From 389e868580d55867475f5677b8618b3854248c32 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Thu, 4 Sep 2025 20:46:22 -0600 Subject: [PATCH 01/71] Activate github/workflow --- .env.sample | 105 +++++++----------- .../ci.yml | 0 .../deploy-production.yml | 0 .../deploy-staging.yml | 0 ENVIRONMENT_SETUP.md | 10 +- README.md | 87 +++++++++------ S3_SETUP.md | 2 +- aws-secret-example.json | 5 +- aws-secret-production.json | 4 +- aws-secret-staging.json | 4 +- config-examples.md | 24 ++-- src/config/constants.rs | 7 +- src/config/secrets.rs | 26 ++++- src/config/settings.rs | 26 ++++- src/handlers/file/exists.rs | 4 + src/main.rs | 14 ++- src/server/app_state.rs | 15 +-- src/server/service.rs | 13 +-- src/server/startup.rs | 11 +- src/utils/auth.rs | 12 +- src/utils/validator.rs | 52 ++++++++- 21 files changed, 261 insertions(+), 160 deletions(-) rename .github/{workflows.deactivated => workflows}/ci.yml (100%) rename .github/{workflows.deactivated => workflows}/deploy-production.yml (100%) rename .github/{workflows.deactivated => workflows}/deploy-staging.yml (100%) diff --git a/.env.sample b/.env.sample index 90074c3..a6813bd 100644 --- a/.env.sample +++ b/.env.sample @@ -1,88 +1,67 @@ -# COSMIC Sync Server - Sample Environment Variables +# COSMIC Sync Server - .env.sample -# Server configuration +# Environment +ENVIRONMENT=development + +# Server SERVER_HOST=0.0.0.0 -SERVER_PORT=50051 +GRPC_PORT=50051 WORKER_THREADS=4 - -# Authentication AUTH_TOKEN_EXPIRY_HOURS=24 - -# Limits MAX_CONCURRENT_REQUESTS=100 MAX_FILE_SIZE=52428800 +HEARTBEAT_INTERVAL_SECS=10 -# Database +# Database (MySQL) +DB_HOST=localhost +DB_PORT=3306 +DB_NAME=cosmic_sync DB_USER=root DB_PASS=recognizer -DB_NAME=cosmic_sync -DB_HOST=127.0.0.1 -DB_PORT=3306 -DB_POOL=10 +DB_POOL=5 DATABASE_CONNECTION_TIMEOUT=30 DATABASE_LOG_QUERIES=false -# OAuth -OAUTH_CLIENT_ID= -OAUTH_CLIENT_SECRET= -OAUTH_REDIRECT_URI=http://localhost:50051/oauth/callback -OAUTH_AUTH_URL= -OAUTH_TOKEN_URL= -OAUTH_USER_INFO_URL= +# Storage (choose one) +# STORAGE_TYPE=database +# STORAGE_TYPE=s3 +STORAGE_TYPE=database +# STORAGE_PATH=/tmp/cosmic-sync + +# S3 (if STORAGE_TYPE=s3) +AWS_REGION=us-east-2 +AWS_S3_BUCKET=cosmic-sync-files +S3_KEY_PREFIX=files/ +# For S3-compatible (e.g., MinIO) +# S3_ENDPOINT_URL=http://localhost:9000 +S3_FORCE_PATH_STYLE=true +S3_TIMEOUT_SECONDS=30 +S3_MAX_RETRIES=3 +# Optional static credentials (IAM role preferred in cloud) +# AWS_ACCESS_KEY_ID= +# AWS_SECRET_ACCESS_KEY= +# AWS_SESSION_TOKEN= # Logging LOG_LEVEL=info -LOG_TO_FILE=false +LOG_TO_FILE=true LOG_FILE=logs/cosmic-sync-server.log LOG_MAX_FILE_SIZE=10485760 LOG_MAX_BACKUPS=5 - -# Storage (S3/MinIO) -S3_BUCKET=cosmic-sync-dev -S3_REGION=us-east-1 -S3_ENDPOINT=http://127.0.0.1:9000 -S3_FORCE_PATH_STYLE=true -S3_ACCESS_KEY=minioadmin -S3_SECRET_KEY=minioadmin -S3_KEY_PREFIX=files/ -S3_TIMEOUT_SECONDS=30 -S3_MAX_RETRIES=2 +LOG_FORMAT=text # Feature flags -COSMIC_SYNC_DEV_MODE=1 -COSMIC_SYNC_TEST_MODE=0 -METRICS_ENABLED=1 -STORAGE_ENCRYPTION=1 +COSMIC_SYNC_DEV_MODE=false +COSMIC_SYNC_TEST_MODE=false +COSMIC_SYNC_DEBUG_MODE=false +ENABLE_METRICS=false +STORAGE_ENCRYPTION=true +REQUEST_VALIDATION=true -# Message Broker (RabbitMQ) +# Message broker (RabbitMQ) RABBITMQ_ENABLED=false RABBITMQ_URL=amqp://guest:guest@127.0.0.1:5672/%2f RABBITMQ_EXCHANGE=cosmic.sync -RABBITMQ_QUEUE_PREFIX=cosmic -RABBITMQ_PREFETCH=64 +RABBITMQ_QUEUE_PREFIX=cosmic.sync +RABBITMQ_PREFETCH=200 RABBITMQ_DURABLE=true - -# Redis (idempotency for consumer) -# Enable by building consumer with --features redis-cache -REDIS_URL=redis://127.0.0.1:6379/0 -IDEMPOTENCY_TTL_SECS=3600 - -# Watcher folder validation controls -# Set to 1 to allow numeric-only segments without checks -WATCHER_FOLDER_ALLOW_NUMERIC=0 -# Comma-separated list of allowed numeric-only segments (e.g., build numbers) -WATCHER_FOLDER_NUMERIC_SEGMENT_WHITELIST= -# Regex that allowed numeric-only segments must match (leave empty to disable) -WATCHER_FOLDER_NUMERIC_SEGMENT_REGEX= - -# Retention policy -# Logical delete TTL (seconds) before physical purge -FILE_TTL_SECS=2592000 -# Keep this many newest revisions per (file_path, group) -MAX_FILE_REVISIONS=10 - -# Server-side encoding key (hex, 32-byte for AES-256-GCM) -SERVER_ENCODE_KEY= - -# Environment marker (optional; used by Config::build in main) -ENVIRONMENT=development diff --git a/.github/workflows.deactivated/ci.yml b/.github/workflows/ci.yml similarity index 100% rename from .github/workflows.deactivated/ci.yml rename to .github/workflows/ci.yml diff --git a/.github/workflows.deactivated/deploy-production.yml b/.github/workflows/deploy-production.yml similarity index 100% rename from .github/workflows.deactivated/deploy-production.yml rename to .github/workflows/deploy-production.yml diff --git a/.github/workflows.deactivated/deploy-staging.yml b/.github/workflows/deploy-staging.yml similarity index 100% rename from .github/workflows.deactivated/deploy-staging.yml rename to .github/workflows/deploy-staging.yml diff --git a/ENVIRONMENT_SETUP.md b/ENVIRONMENT_SETUP.md index da949aa..bf19d8a 100644 --- a/ENVIRONMENT_SETUP.md +++ b/ENVIRONMENT_SETUP.md @@ -41,7 +41,7 @@ AWS Secrets Manager를 사용하여 설정을 관리합니다. ```json { "DB_HOST": "staging-db.example.com", - "S3_BUCKET": "cosmic-sync-staging-files", + "AWS_S3_BUCKET": "cosmic-sync-staging-files", "OAUTH_CLIENT_ID": "cosmic-sync-staging", "LOG_LEVEL": "info" } @@ -64,7 +64,7 @@ AWS Secrets Manager를 사용하여 설정을 관리합니다. ```json { "DB_HOST": "prod-db.example.com", - "S3_BUCKET": "cosmic-sync-production-files", + "AWS_S3_BUCKET": "cosmic-sync-production-files", "OAUTH_CLIENT_ID": "cosmic-sync-production", "LOG_LEVEL": "warn" } @@ -91,8 +91,8 @@ Staging/Production 환경에서는 다음 IAM 권한이 필요합니다: "secretsmanager:GetSecretValue" ], "Resource": [ - "arn:aws:secretsmanager:us-east-2:*:secret:staging/so-dod/cosmic-sync/config-*", - "arn:aws:secretsmanager:us-east-2:*:secret:production/pop-os/cosmic-sync/config-*" + "arn:aws:secretsmanager:us-east-2:*:secret:staging/so-dod/cosmic-sync/config*", + "arn:aws:secretsmanager:us-east-2:*:secret:production/pop-os/cosmic-sync/config*" ] }, { @@ -152,12 +152,14 @@ ENV ENVIRONMENT=development ```dockerfile ENV ENVIRONMENT=staging ENV AWS_REGION=us-east-2 +ENV USE_AWS_SECRET_MANAGER=true ``` ### Production ```dockerfile ENV ENVIRONMENT=production ENV AWS_REGION=us-east-2 +ENV USE_AWS_SECRET_MANAGER=true ``` ## 🔍 로그 확인 diff --git a/README.md b/README.md index c411b30..3d3ef5f 100755 --- a/README.md +++ b/README.md @@ -27,32 +27,56 @@ Create a `.env` file in the project root or copy the provided `.env.sample`: cp .env.sample .env ``` -Then edit the `.env` file to configure the following settings: +Then edit the `.env` file to configure the following settings (keys unified): ``` -# Server configuration +# Environment +ENVIRONMENT=development + +# Server SERVER_HOST=0.0.0.0 -SERVER_PORT=50051 +GRPC_PORT=50051 WORKER_THREADS=4 - -# Authentication AUTH_TOKEN_EXPIRY_HOURS=24 - -# Request limits MAX_CONCURRENT_REQUESTS=100 -MAX_FILE_SIZE=52428800 # 50MB in bytes - -# Database configuration -DATABASE_URL=mysql://username:password@localhost:3306/cosmic_sync - -# Logging configuration +MAX_FILE_SIZE=52428800 +HEARTBEAT_INTERVAL_SECS=10 + +# Database (MySQL) +DB_HOST=localhost +DB_PORT=3306 +DB_NAME=cosmic_sync +DB_USER=username +DB_PASS=password +DB_POOL=5 +DATABASE_CONNECTION_TIMEOUT=30 +DATABASE_LOG_QUERIES=false + +# Storage +STORAGE_TYPE=database # or s3 +# STORAGE_PATH=/tmp/cosmic-sync + +# S3 (if STORAGE_TYPE=s3) +AWS_REGION=us-east-2 +AWS_S3_BUCKET=cosmic-sync-files +S3_KEY_PREFIX=files/ +# S3_ENDPOINT_URL=http://localhost:9000 +S3_FORCE_PATH_STYLE=true +S3_TIMEOUT_SECONDS=30 +S3_MAX_RETRIES=3 +# AWS_ACCESS_KEY_ID=... +# AWS_SECRET_ACCESS_KEY=... +# AWS_SESSION_TOKEN=... + +# Logging LOG_LEVEL=info LOG_TO_FILE=true LOG_FILE=logs/cosmic-sync-server.log -LOG_MAX_FILE_SIZE=10485760 # 10MB in bytes +LOG_MAX_FILE_SIZE=10485760 LOG_MAX_BACKUPS=5 +LOG_FORMAT=text # json for production -# OAuth configuration +# OAuth OAUTH_CLIENT_ID=your_client_id OAUTH_CLIENT_SECRET=your_client_secret OAUTH_REDIRECT_URI=http://localhost:50051/oauth/callback @@ -61,21 +85,22 @@ OAUTH_TOKEN_URL=https://oauth-provider.com/token OAUTH_USER_INFO_URL=https://oauth-provider.com/userinfo # Feature flags -test_MODE=false -DEBUG_MODE=false -METRICS_ENABLED=true +COSMIC_SYNC_DEV_MODE=false +COSMIC_SYNC_TEST_MODE=false +COSMIC_SYNC_DEBUG_MODE=false +ENABLE_METRICS=false STORAGE_ENCRYPTION=true - -# Message broker (RabbitMQ) -MESSAGE_BROKER_ENABLED=false -MESSAGE_BROKER_URL=amqps://user:pass@host:5671/vhost -MESSAGE_BROKER_EXCHANGE=cosmic.sync -MESSAGE_BROKER_QUEUE_PREFIX=cosmic -MESSAGE_BROKER_PREFETCH=64 -MESSAGE_BROKER_DURABLE=true -# Consumer tuning (optional) -RETRY_TTL_MS=5000 -MAX_RETRIES=3 +REQUEST_VALIDATION=true + +# RabbitMQ +RABBITMQ_ENABLED=false +RABBITMQ_URL=amqp://guest:guest@127.0.0.1:5672/%2f +RABBITMQ_EXCHANGE=cosmic.sync +RABBITMQ_QUEUE_PREFIX=cosmic.sync +RABBITMQ_PREFETCH=200 +RABBITMQ_DURABLE=true +# RETRY_TTL_MS=5000 +# MAX_RETRIES=3 ``` ### Database Preparation @@ -109,10 +134,10 @@ To run the server with specific environment variables and debug options: ```bash # Run with development mode, debug mode, and debug logging -RUST_LOG=debug sudo -E /home/yongjinchong/.cargo/bin/cargo run +LOG_LEVEL=debug LOG_FORMAT=text sudo -E /home/yongjinchong/.cargo/bin/cargo run # Run the compiled binary directly with root privileges -RUST_LOG=debug sudo -E ./target/debug/cosmic-sync-server +LOG_LEVEL=debug LOG_FORMAT=text sudo -E ./target/debug/cosmic-sync-server ``` ## Project Structure diff --git a/S3_SETUP.md b/S3_SETUP.md index 32800aa..fa2050e 100644 --- a/S3_SETUP.md +++ b/S3_SETUP.md @@ -16,7 +16,7 @@ S3를 사용하기 위해서는 AWS 자격 증명을 설정해야 합니다. 다 export AWS_REGION=us-east-2 export AWS_ACCESS_KEY_ID=your_access_key_id export AWS_SECRET_ACCESS_KEY=your_secret_access_key -export S3_BUCKET=cosmic-sync-files +export AWS_S3_BUCKET=cosmic-sync-files ``` ### 방법 2: AWS Credentials 파일 사용 diff --git a/aws-secret-example.json b/aws-secret-example.json index 1958d72..82e7206 100644 --- a/aws-secret-example.json +++ b/aws-secret-example.json @@ -9,14 +9,15 @@ "DATABASE_LOG_QUERIES": "false", "SERVER_HOST": "0.0.0.0", - "SERVER_PORT": "50051", + "GRPC_PORT": "50051", "WORKER_THREADS": "8", "AUTH_TOKEN_EXPIRY_HOURS": "24", "MAX_FILE_SIZE": "104857600", "MAX_CONCURRENT_REQUESTS": "1000", "STORAGE_TYPE": "s3", - "S3_BUCKET": "cosmic-sync-prod-files", + "AWS_REGION": "us-east-2", + "AWS_S3_BUCKET": "cosmic-sync-prod-files", "S3_KEY_PREFIX": "files/", "S3_TIMEOUT_SECONDS": "30", "S3_MAX_RETRIES": "3", diff --git a/aws-secret-production.json b/aws-secret-production.json index 6705471..c06c2d7 100644 --- a/aws-secret-production.json +++ b/aws-secret-production.json @@ -9,7 +9,7 @@ "DATABASE_LOG_QUERIES": "false", "SERVER_HOST": "0.0.0.0", - "SERVER_PORT": "50051", + "GRPC_PORT": "50051", "WORKER_THREADS": "16", "AUTH_TOKEN_EXPIRY_HOURS": "24", "MAX_FILE_SIZE": "52428800", @@ -17,7 +17,7 @@ "STORAGE_TYPE": "s3", "AWS_REGION": "us-east-2", - "S3_BUCKET": "cosmic-sync-production-files", + "AWS_S3_BUCKET": "cosmic-sync-production-files", "S3_KEY_PREFIX": "files/", "S3_TIMEOUT_SECONDS": "30", "S3_MAX_RETRIES": "3", diff --git a/aws-secret-staging.json b/aws-secret-staging.json index f956011..264e9f0 100644 --- a/aws-secret-staging.json +++ b/aws-secret-staging.json @@ -9,7 +9,7 @@ "DATABASE_LOG_QUERIES": "false", "SERVER_HOST": "0.0.0.0", - "SERVER_PORT": "50051", + "GRPC_PORT": "50051", "WORKER_THREADS": "8", "AUTH_TOKEN_EXPIRY_HOURS": "24", "MAX_FILE_SIZE": "52428800", @@ -17,7 +17,7 @@ "STORAGE_TYPE": "s3", "AWS_REGION": "us-east-2", - "S3_BUCKET": "cosmic-sync-staging-files", + "AWS_S3_BUCKET": "cosmic-sync-staging-files", "S3_KEY_PREFIX": "files/", "S3_TIMEOUT_SECONDS": "30", "S3_MAX_RETRIES": "3", diff --git a/config-examples.md b/config-examples.md index ca6043e..8608404 100644 --- a/config-examples.md +++ b/config-examples.md @@ -4,17 +4,17 @@ ## 환경 설정 -서버는 `ENV` 환경 변수를 통해 현재 환경을 감지합니다: +서버는 `ENVIRONMENT` 환경 변수를 통해 현재 환경을 감지합니다: -- `development` (기본값): 로컬 환경 변수 사용 -- `staging`: AWS Secrets Manager 사용 -- `production`: AWS Secrets Manager 사용 +- `development` (기본값): 로컬 `.env` 또는 환경 변수 사용 +- `staging`: AWS Secrets Manager 사용 (us-east-2) +- `production`: AWS Secrets Manager 사용 (us-east-2) ## 로컬 개발 환경 (.env 파일) ```bash # Environment Configuration -ENV=development +ENVIRONMENT=development # Database DB_HOST=localhost @@ -28,7 +28,7 @@ DATABASE_LOG_QUERIES=false # Server SERVER_HOST=0.0.0.0 -SERVER_PORT=50051 +GRPC_PORT=50051 WORKER_THREADS=4 AUTH_TOKEN_EXPIRY_HOURS=24 MAX_FILE_SIZE=52428800 @@ -36,10 +36,10 @@ MAX_CONCURRENT_REQUESTS=100 # Storage STORAGE_TYPE=database -STORAGE_PATH=/tmp/cosmic-sync +# STORAGE_PATH=/tmp/cosmic-sync # S3 Configuration (if using S3 storage) -S3_BUCKET=cosmic-sync-files +AWS_S3_BUCKET=cosmic-sync-files S3_KEY_PREFIX=files/ S3_ENDPOINT_URL=http://localhost:9000 S3_FORCE_PATH_STYLE=true @@ -64,8 +64,8 @@ REQUEST_VALIDATION=true # Container Mode (optional) COSMIC_SYNC_USE_CONTAINER=false -# Rust Logging -RUST_LOG=cosmic_sync_server=info,info +# Log format +LOG_FORMAT=text ``` ## AWS Secrets Manager 설정 (Staging/Production) @@ -74,7 +74,7 @@ RUST_LOG=cosmic_sync_server=info,info ```bash ENV=staging # or production -AWS_REGION=us-east-1 +AWS_REGION=us-east-2 AWS_SECRET_NAME=cosmic-sync-server-config ``` @@ -111,7 +111,7 @@ aws secretsmanager create-secret \ "MAX_CONCURRENT_REQUESTS": "1000", "STORAGE_TYPE": "s3", - "S3_BUCKET": "cosmic-sync-prod-files", + "AWS_S3_BUCKET": "cosmic-sync-prod-files", "S3_KEY_PREFIX": "files/", "S3_TIMEOUT_SECONDS": "30", "S3_MAX_RETRIES": "3", diff --git a/src/config/constants.rs b/src/config/constants.rs index 3f7e682..3de5a3e 100644 --- a/src/config/constants.rs +++ b/src/config/constants.rs @@ -1,7 +1,7 @@ // Centralized configuration constants // Network / gRPC -pub const DEFAULT_GRPC_HOST: &str = "[::1]"; +pub const DEFAULT_GRPC_HOST: &str = "0.0.0.0"; pub const DEFAULT_GRPC_PORT: u16 = 50051; // HTTP (Actix) default port retained for compatibility with HTTP endpoints pub const DEFAULT_HTTP_PORT: u16 = 8080; @@ -34,7 +34,7 @@ pub const DEFAULT_LOG_MAX_FILE_SIZE_BYTES: usize = 10 * 1024 * 1024; pub const DEFAULT_LOG_MAX_BACKUPS: usize = 5; // S3 -pub const DEFAULT_S3_REGION: &str = "us-east-1"; +pub const DEFAULT_S3_REGION: &str = "us-east-2"; pub const DEFAULT_S3_BUCKET: &str = "cosmic-sync-files"; pub const DEFAULT_S3_KEY_PREFIX: &str = "files/"; pub const DEFAULT_S3_FORCE_PATH_STYLE: bool = false; @@ -47,6 +47,9 @@ pub const HTTP_KEEPALIVE_SECS: u64 = 60; pub const HTTP2_KEEPALIVE_INTERVAL_SECS: u64 = 30; pub const HTTP2_KEEPALIVE_TIMEOUT_SECS: u64 = 90; +// Heartbeat +pub const DEFAULT_HEARTBEAT_INTERVAL_SECS: u64 = 10; + // CORS pub const DEFAULT_CORS_MAX_AGE_SECS: u64 = 3600; diff --git a/src/config/secrets.rs b/src/config/secrets.rs index 4676060..6149e23 100644 --- a/src/config/secrets.rs +++ b/src/config/secrets.rs @@ -277,10 +277,10 @@ impl ConfigLoader { use super::settings::S3Config; // Use infrastructure standard environment variable names - let region = self.get_config_value("AWS_S3_REGION", Some("us-west-2")).await - .unwrap_or_else(|| "us-west-2".to_string()); - let bucket = self.get_config_value("AWS_S3_BUCKET", Some("cosmic-sync-files")).await - .unwrap_or_else(|| "cosmic-sync-files".to_string()); + let region = self.get_config_value("AWS_REGION", Some(crate::config::constants::DEFAULT_S3_REGION)).await + .unwrap_or_else(|| crate::config::constants::DEFAULT_S3_REGION.to_string()); + let bucket = self.get_config_value("AWS_S3_BUCKET", Some(crate::config::constants::DEFAULT_S3_BUCKET)).await + .unwrap_or_else(|| crate::config::constants::DEFAULT_S3_BUCKET.to_string()); let key_prefix = self.get_config_value("S3_KEY_PREFIX", Some("files/")).await .unwrap_or_else(|| "files/".to_string()); @@ -293,7 +293,9 @@ impl ConfigLoader { .map(|v| v == "1" || v.to_lowercase() == "true") .unwrap_or(false); - let use_secret_manager = self.environment.is_cloud(); + let use_secret_manager = self.get_config_value("USE_AWS_SECRET_MANAGER", None).await + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(self.environment.is_cloud()); let secret_name = self.get_secret_name(); let timeout_seconds = self.get_config_value("S3_TIMEOUT_SECONDS", Some("30")).await @@ -346,6 +348,10 @@ impl ConfigLoader { .and_then(|r| r.parse::().ok()) .unwrap_or(crate::config::constants::DEFAULT_MAX_CONCURRENT_REQUESTS); + let heartbeat_interval_secs = self.get_config_value("HEARTBEAT_INTERVAL_SECS", Some(&crate::config::constants::DEFAULT_HEARTBEAT_INTERVAL_SECS.to_string())).await + .and_then(|s| s.parse::().ok()) + .unwrap_or(crate::config::constants::DEFAULT_HEARTBEAT_INTERVAL_SECS); + ServerConfig { host, port, @@ -354,6 +360,7 @@ impl ConfigLoader { auth_token_expiry_hours, max_file_size, max_concurrent_requests, + heartbeat_interval_secs, } } @@ -368,7 +375,7 @@ impl ConfigLoader { let region = self.get_config_value("AWS_REGION", Some(crate::config::constants::DEFAULT_S3_REGION)).await .unwrap_or_else(|| crate::config::constants::DEFAULT_S3_REGION.to_string()); - let bucket = self.get_config_value("S3_BUCKET", Some(crate::config::constants::DEFAULT_S3_BUCKET)).await + let bucket = self.get_config_value("AWS_S3_BUCKET", Some(crate::config::constants::DEFAULT_S3_BUCKET)).await .unwrap_or_else(|| crate::config::constants::DEFAULT_S3_BUCKET.to_string()); let key_prefix = self.get_config_value("S3_KEY_PREFIX", Some(crate::config::constants::DEFAULT_S3_KEY_PREFIX)).await .unwrap_or_else(|| crate::config::constants::DEFAULT_S3_KEY_PREFIX.to_string()); @@ -435,6 +442,8 @@ impl ConfigLoader { let max_backups = self.get_config_value("LOG_MAX_BACKUPS", Some("5")).await .and_then(|b| b.parse::().ok()) .unwrap_or(5); + let format = self.get_config_value("LOG_FORMAT", Some("text")).await + .unwrap_or_else(|| "text".to_string()); LoggingConfig { level, @@ -442,6 +451,7 @@ impl ConfigLoader { log_file, max_file_size, max_backups, + format, } } @@ -467,6 +477,9 @@ impl ConfigLoader { let transport_encrypt_metadata = self.get_config_value("COSMIC_TRANSPORT_ENCRYPT_METADATA", Some("true")).await .map(|v| v == "1" || v.to_lowercase() == "true") .unwrap_or(true); + let dev_mode = self.get_config_value("COSMIC_SYNC_DEV_MODE", Some("false")).await + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(false); FeatureFlags { test_mode, @@ -475,6 +488,7 @@ impl ConfigLoader { storage_encryption, request_validation, transport_encrypt_metadata, + dev_mode, } } diff --git a/src/config/settings.rs b/src/config/settings.rs index ea0926a..32efa00 100755 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -75,6 +75,8 @@ pub struct ServerConfig { pub max_file_size: usize, /// Maximum number of concurrent requests pub max_concurrent_requests: usize, + /// Heartbeat interval seconds for streaming keepalive + pub heartbeat_interval_secs: u64, } impl Default for ServerConfig { @@ -87,6 +89,7 @@ impl Default for ServerConfig { auth_token_expiry_hours: crate::config::constants::DEFAULT_AUTH_TOKEN_EXPIRY_HOURS, max_file_size: crate::config::constants::DEFAULT_MAX_FILE_SIZE_BYTES, max_concurrent_requests: crate::config::constants::DEFAULT_MAX_CONCURRENT_REQUESTS, + heartbeat_interval_secs: crate::config::constants::DEFAULT_HEARTBEAT_INTERVAL_SECS, } } } @@ -95,7 +98,7 @@ impl ServerConfig { /// Load configuration from environment variables or use defaults pub fn load() -> Self { let host = env::var("SERVER_HOST").unwrap_or_else(|_| crate::config::constants::DEFAULT_GRPC_HOST.to_string()); - let port = env::var("SERVER_PORT") + let port = env::var("GRPC_PORT") .ok() .and_then(|p| p.parse::().ok()) .unwrap_or(crate::config::constants::DEFAULT_GRPC_PORT); @@ -116,6 +119,10 @@ impl ServerConfig { .ok() .and_then(|r| r.parse::().ok()) .unwrap_or(crate::config::constants::DEFAULT_MAX_CONCURRENT_REQUESTS); + let heartbeat_interval_secs = env::var("HEARTBEAT_INTERVAL_SECS") + .ok() + .and_then(|s| s.parse::().ok()) + .unwrap_or(crate::config::constants::DEFAULT_HEARTBEAT_INTERVAL_SECS); Self { host, @@ -125,6 +132,7 @@ impl ServerConfig { auth_token_expiry_hours, max_file_size, max_concurrent_requests, + heartbeat_interval_secs, } } @@ -233,6 +241,8 @@ pub struct LoggingConfig { pub max_file_size: usize, /// Maximum number of backups to keep pub max_backups: usize, + /// Log output format: text or json + pub format: String, } impl Default for LoggingConfig { @@ -243,6 +253,7 @@ impl Default for LoggingConfig { log_file: crate::config::constants::DEFAULT_LOG_FILE.to_string(), max_file_size: crate::config::constants::DEFAULT_LOG_MAX_FILE_SIZE_BYTES, max_backups: crate::config::constants::DEFAULT_LOG_MAX_BACKUPS, + format: "text".to_string(), } } } @@ -263,6 +274,7 @@ impl LoggingConfig { .ok() .and_then(|b| b.parse::().ok()) .unwrap_or(crate::config::constants::DEFAULT_LOG_MAX_BACKUPS); + let format = env::var("LOG_FORMAT").unwrap_or_else(|_| "text".to_string()); Self { level, @@ -270,6 +282,7 @@ impl LoggingConfig { log_file, max_file_size, max_backups, + format, } } } @@ -289,6 +302,8 @@ pub struct FeatureFlags { pub request_validation: bool, /// Encrypt metadata (path/name) on transport to clients pub transport_encrypt_metadata: bool, + /// Enable developer mode (unifies COSMIC_SYNC_DEV_MODE) + pub dev_mode: bool, } impl Default for FeatureFlags { @@ -300,6 +315,7 @@ impl Default for FeatureFlags { storage_encryption: true, request_validation: true, transport_encrypt_metadata: true, + dev_mode: false, } } } @@ -325,7 +341,10 @@ impl FeatureFlags { let transport_encrypt_metadata = env::var("COSMIC_TRANSPORT_ENCRYPT_METADATA") .map(|v| v == "1" || v.to_lowercase() == "true") .unwrap_or(true); - + let dev_mode = env::var("COSMIC_SYNC_DEV_MODE") + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(false); + Self { test_mode, debug_mode, @@ -333,6 +352,7 @@ impl FeatureFlags { storage_encryption, request_validation, transport_encrypt_metadata, + dev_mode, } } } @@ -460,7 +480,7 @@ impl S3Config { pub fn load() -> Self { Self { region: env::var("AWS_REGION").unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_REGION.to_string()), - bucket: env::var("S3_BUCKET").unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_BUCKET.to_string()), + bucket: env::var("AWS_S3_BUCKET").unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_BUCKET.to_string()), key_prefix: env::var("S3_KEY_PREFIX").unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_KEY_PREFIX.to_string()), access_key_id: env::var("AWS_ACCESS_KEY_ID").ok(), secret_access_key: env::var("AWS_SECRET_ACCESS_KEY").ok(), diff --git a/src/handlers/file/exists.rs b/src/handlers/file/exists.rs index afddc3e..e91235f 100644 --- a/src/handlers/file/exists.rs +++ b/src/handlers/file/exists.rs @@ -50,5 +50,9 @@ pub async fn handle_check_file_exists(handler: &FileHandler, req: CheckFileExist + + + + diff --git a/src/main.rs b/src/main.rs index 0b83f79..070d29b 100644 --- a/src/main.rs +++ b/src/main.rs @@ -4,11 +4,12 @@ use dotenv::dotenv; use tracing::{info, error, warn, instrument}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; use cosmic_sync_server::config::constants; +use cosmic_sync_server::config::settings::LoggingConfig; use cosmic_sync_server::{ server::startup::start_server, config::{Config, Environment, ConfigLoader}, - config::settings::{ServerConfig, DatabaseConfig, LoggingConfig, FeatureFlags, StorageConfig}, + config::settings::{ServerConfig, DatabaseConfig, FeatureFlags, StorageConfig}, error::{Result, SyncError}, storage::init_storage, container::ContainerBuilder, @@ -96,13 +97,14 @@ async fn start_legacy() -> Result<()> { /// Initialize structured logging with performance optimizations #[instrument] fn init_tracing() -> Result<()> { - let log_level = env::var("RUST_LOG") - .unwrap_or_else(|_| "cosmic_sync_server=info,info".to_string()); + // Use unified app logging config + let logging_cfg = LoggingConfig::load(); + let log_level = logging_cfg.level; let subscriber = tracing_subscriber::registry() .with( tracing_subscriber::EnvFilter::try_from_default_env() - .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(&log_level)) + .unwrap_or_else(|_| tracing_subscriber::EnvFilter::new(&format!("cosmic_sync_server={},info", log_level))) ) .with( tracing_subscriber::fmt::layer() @@ -113,8 +115,8 @@ fn init_tracing() -> Result<()> { .compact() ); - // JSON logging for production - if env::var("LOG_FORMAT").unwrap_or_default() == "json" { + // JSON logging for production (unified via LOG_FORMAT) + if logging_cfg.format.to_lowercase() == "json" { let json_layer = tracing_subscriber::fmt::layer() .json() .with_current_span(false) diff --git a/src/server/app_state.rs b/src/server/app_state.rs index d9b0ae7..c1fd75c 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -235,16 +235,11 @@ impl AppState { storage: Arc, config: &ServerConfig, ) -> Result { - // create simple Config object (reuse server config; others default) - let full_config = Config { - server: config.clone(), - database: crate::config::settings::DatabaseConfig::default(), - logging: crate::config::settings::LoggingConfig::default(), - features: crate::config::settings::FeatureFlags::default(), - storage: crate::config::settings::StorageConfig::default(), - message_broker: crate::config::settings::MessageBrokerConfig::load(), - server_encode_key: None, - }; + // Load full config via async loader to respect Secrets Manager and unified keys + let mut full_config = crate::config::settings::Config::load_async() + .await + .unwrap_or_else(|_| crate::config::settings::Config::load()); + full_config.server = config.clone(); // initialize notification manager let notification_manager = Arc::new(NotificationManager::new_with_storage(storage.clone())); diff --git a/src/server/service.rs b/src/server/service.rs index 67496db..33a5983 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -708,8 +708,8 @@ impl SyncService for SyncServiceImpl { } // 장치 검증 - let is_dev_mode = std::env::var("COSMIC_SYNC_DEV_MODE").unwrap_or_default() == "1"; - let is_test_mode = std::env::var("COSMIC_SYNC_TEST_MODE").unwrap_or_default() == "1"; + let is_dev_mode = self.app_state.config.features.dev_mode; + let is_test_mode = self.app_state.config.features.test_mode; if !is_dev_mode && !is_test_mode { let is_valid_device = match self.app_state.storage.validate_device(&account_hash, &device_hash).await { @@ -782,8 +782,8 @@ impl SyncService for SyncServiceImpl { } // 장치 검증 - let is_dev_mode = std::env::var("COSMIC_SYNC_DEV_MODE").unwrap_or_default() == "1"; - let is_test_mode = std::env::var("COSMIC_SYNC_TEST_MODE").unwrap_or_default() == "1"; + let is_dev_mode = self.app_state.config.features.dev_mode; + let is_test_mode = self.app_state.config.features.test_mode; if !is_dev_mode && !is_test_mode { let is_valid_device = match self.app_state.storage.validate_device(&account_hash, &device_hash).await { @@ -822,10 +822,7 @@ impl SyncService for SyncServiceImpl { info!("Registered watcher group update subscriber: {}", sub_key); // 연결 상태 확인용 초기 메시지 전송 (PING 역할) - let heartbeat_interval = std::env::var("HEARTBEAT_INTERVAL_SECS") - .ok() - .and_then(|s| s.parse::().ok()) - .unwrap_or(10); // 기본값 10초로 단축 (이전 30초) + let heartbeat_interval = self.app_state.config.server.heartbeat_interval_secs; // 클라이언트 연결 상태 모니터링을 위한 태스크 let notification_manager_clone = self.app_state.notification_manager.clone(); diff --git a/src/server/startup.rs b/src/server/startup.rs index 2287211..1587662 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -51,9 +51,14 @@ pub async fn start_server(config: ServerConfig) -> Result<()> { } else { tracing::info!("Effective storage_path: (memory fallback if not set)"); } - tracing::info!("Features: dev_mode={}, test_mode={}", - std::env::var("COSMIC_SYNC_DEV_MODE").unwrap_or_default(), - std::env::var("COSMIC_SYNC_TEST_MODE").unwrap_or_default()); + tracing::info!( + "Features: dev_mode={}, test_mode={}, metrics={}, request_validation={}, transport_encrypt_metadata={}", + app_state.config.features.dev_mode, + app_state.config.features.test_mode, + app_state.config.features.metrics_enabled, + app_state.config.features.request_validation, + app_state.config.features.transport_encrypt_metadata + ); // Run servers with graceful shutdown tokio::select! { diff --git a/src/utils/auth.rs b/src/utils/auth.rs index 56be85e..59b1ea0 100644 --- a/src/utils/auth.rs +++ b/src/utils/auth.rs @@ -2,7 +2,17 @@ use std::env; use tracing::{debug, error}; use tonic::Status; -/// Check if development or test mode is enabled +/// Check if development or test mode is enabled (from FeatureFlags) +pub fn is_dev_or_test_mode_from_flags(flags: &crate::config::settings::FeatureFlags) -> bool { + if flags.dev_mode || flags.test_mode { + debug!("Dev/Test mode enabled: skipping device validation"); + true + } else { + false + } +} + +/// Backward-compatible helper: reads from env if flags not available pub fn is_dev_or_test_mode() -> bool { let is_dev_mode = env::var("COSMIC_SYNC_DEV_MODE").unwrap_or_default() == "1"; let is_test_mode = env::var("COSMIC_SYNC_TEST_MODE").unwrap_or_default() == "1"; diff --git a/src/utils/validator.rs b/src/utils/validator.rs index b4c30dd..847fc7f 100644 --- a/src/utils/validator.rs +++ b/src/utils/validator.rs @@ -21,19 +21,23 @@ pub fn validate_watcher_folder(folder: &str) -> Result<(), String> { use regex::Regex; use std::collections::HashSet; - let allow_numeric = std::env::var("WATCHER_FOLDER_ALLOW_NUMERIC").unwrap_or_else(|_| "0".to_string()) == "1"; + const ENV_ALLOW_NUMERIC: &str = "WATCHER_FOLDER_ALLOW_NUMERIC"; + const ENV_WHITELIST: &str = "WATCHER_FOLDER_NUMERIC_SEGMENT_WHITELIST"; + const ENV_REGEX: &str = "WATCHER_FOLDER_NUMERIC_SEGMENT_REGEX"; + + let allow_numeric = std::env::var(ENV_ALLOW_NUMERIC).unwrap_or_else(|_| "0".to_string()) == "1"; if allow_numeric { return Ok(()); } - let whitelist_env = std::env::var("WATCHER_FOLDER_NUMERIC_SEGMENT_WHITELIST").unwrap_or_default(); + let whitelist_env = std::env::var(ENV_WHITELIST).unwrap_or_default(); let whitelist: HashSet = whitelist_env .split(',') .map(|s| s.trim().to_string()) .filter(|s| !s.is_empty()) .collect(); - let regex_opt = match std::env::var("WATCHER_FOLDER_NUMERIC_SEGMENT_REGEX") { + let regex_opt = match std::env::var(ENV_REGEX) { Ok(pat) if !pat.trim().is_empty() => Regex::new(pat.trim()).ok(), _ => None, }; @@ -44,10 +48,50 @@ pub fn validate_watcher_folder(folder: &str) -> Result<(), String> { if is_numeric_only { let allowed = whitelist.contains(seg) || regex_opt.as_ref().map_or(false, |re| re.is_match(seg)); if !allowed { - return Err(format!("Watcher folder contains numeric-only segment '{}' which is not allowed (set WATCHER_FOLDER_ALLOW_NUMERIC=1 or whitelist/regex)", seg)); + return Err(format!("Watcher folder contains numeric-only segment '{}' which is not allowed (set {}=1 or whitelist/regex)", seg, ENV_ALLOW_NUMERIC)); } } } Ok(()) +} + +/// Validate watcher folder path using options provided by caller (from Secrets/Config) +pub fn validate_watcher_folder_with_options( + folder: &str, + allow_numeric: bool, + whitelist_csv: &str, + regex_pattern: Option<&str>, +) -> Result<(), String> { + use regex::Regex; + use std::collections::HashSet; + + if allow_numeric { return Ok(()); } + + let whitelist: HashSet = whitelist_csv + .split(',') + .map(|s| s.trim().to_string()) + .filter(|s| !s.is_empty()) + .collect(); + + let regex_opt = match regex_pattern { + Some(pat) if !pat.trim().is_empty() => Regex::new(pat.trim()).ok(), + _ => None, + }; + + for seg in folder.split('/') { + if seg.is_empty() || seg == "~" || seg == "." || seg == ".." { continue; } + let is_numeric_only = seg.chars().all(|c| c.is_ascii_digit()); + if is_numeric_only { + let allowed = whitelist.contains(seg) || regex_opt.as_ref().map_or(false, |re| re.is_match(seg)); + if !allowed { + return Err(format!( + "Watcher folder contains numeric-only segment '{}' which is not allowed (enable allow_numeric or whitelist/regex)", + seg + )); + } + } + } + + Ok(()) } \ No newline at end of file From 539e1a908602de9fe9a117500032f00dcc800e15 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Mon, 8 Sep 2025 13:56:57 -0600 Subject: [PATCH 02/71] Remove crashed marker/Merge import block --- src/main.rs | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/src/main.rs b/src/main.rs index 608a229..a2899b8 100644 --- a/src/main.rs +++ b/src/main.rs @@ -2,25 +2,12 @@ use cosmic_sync_server::config::constants; use cosmic_sync_server::config::settings::LoggingConfig; use dotenv::dotenv; use std::env; -use std::sync::Arc; use tracing::{error, info, instrument, warn}; use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt}; -<<<<<<< HEAD -use cosmic_sync_server::config::constants; -use cosmic_sync_server::config::settings::LoggingConfig; - -use cosmic_sync_server::{ - server::startup::start_server, - config::{Config, Environment, ConfigLoader}, - config::settings::{ServerConfig, DatabaseConfig, FeatureFlags, StorageConfig}, - error::{Result, SyncError}, - storage::init_storage, -======= use cosmic_sync_server::{ config::settings::{DatabaseConfig, FeatureFlags, ServerConfig, StorageConfig}, config::{Config, ConfigLoader, Environment}, ->>>>>>> 19a199c13fd9f5851074270388fa72e2254c92e9 container::ContainerBuilder, error::{Result, SyncError}, server::startup::{start_server, start_server_with_storage}, From f63361a64ab5c3ca3201802b8472033f7c8aa132 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Mon, 8 Sep 2025 15:09:01 -0600 Subject: [PATCH 03/71] Bypass rabbit_consumer code --- Dockerfile | 2 +- src/bin/rabbit_consumer.rs | 28 +++++++++++++++++----------- 2 files changed, 18 insertions(+), 12 deletions(-) diff --git a/Dockerfile b/Dockerfile index a097f55..2b3b573 100644 --- a/Dockerfile +++ b/Dockerfile @@ -37,7 +37,7 @@ COPY src ./src # Build the application RUN cargo build --release - +# RUN cargo build --release --bin cosmic-sync-server # Runtime stage FROM debian:bookworm-slim diff --git a/src/bin/rabbit_consumer.rs b/src/bin/rabbit_consumer.rs index c3dfe2e..a4ad7a8 100644 --- a/src/bin/rabbit_consumer.rs +++ b/src/bin/rabbit_consumer.rs @@ -2,7 +2,8 @@ use tokio::signal; use tokio_stream::StreamExt; use tracing::{debug, error, info, warn}; -use cosmic_sync_server::{config::settings::MessageBrokerConfig, RabbitMqEventBus}; +use cosmic_sync_server::config::settings::MessageBrokerConfig; +use cosmic_sync_server::server::event_bus::RabbitMqEventBus; use lapin::{ options::*, @@ -305,16 +306,21 @@ async fn main() -> anyhow::Result<()> { // Simple handler: try parse JSON, if parse fails, route to retry/dlq with attempts let mut attempts = 0u32; - if let Some(headers) = delivery.properties.headers().as_ref() { - if let Some(AMQPValue::LongInt(n)) = - headers.inner().get("x-retry-count") - { - attempts = (*n).max(0) as u32; - } - if let Some(AMQPValue::LongUInt(n)) = - headers.inner().get("x-retry-count") - { - attempts = *n as u32; + if let Some(v) = delivery + .properties + .headers() + .as_ref() + .and_then(|h| h.inner().get("x-retry-count")) + .cloned() + { + match v { + AMQPValue::LongInt(n) => { + attempts = n.max(0) as u32; + } + AMQPValue::LongUInt(n) => { + attempts = n as u32; + } + _ => {} } } From a53a0609a116eccc733c64fc0c7e48ee38445bba Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Mon, 8 Sep 2025 15:44:17 -0600 Subject: [PATCH 04/71] Bypass rabbit_consumer code --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 2b3b573..8102c77 100644 --- a/Dockerfile +++ b/Dockerfile @@ -36,8 +36,8 @@ COPY src ./src #COPY crates ./crates # Build the application -RUN cargo build --release -# RUN cargo build --release --bin cosmic-sync-server +# RUN cargo build --release +RUN cargo build --release --bin cosmic-sync-server # Runtime stage FROM debian:bookworm-slim From 12362cf3b9e15bf5be76b83f58e04100a87c7c1b Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 9 Sep 2025 13:46:24 -0600 Subject: [PATCH 05/71] Update deploy procss --- .github/workflows/deploy-staging.yml | 50 ++++++++-------------------- 1 file changed, 14 insertions(+), 36 deletions(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 9ecaee7..585723d 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -66,46 +66,24 @@ jobs: container-name: app image: ${{ steps.build-image.outputs.image }} - - name: Deploy Amazon ECS task definition + - name: Deploy Amazon ECS task definition (no wait) uses: aws-actions/amazon-ecs-deploy-task-definition@v2 with: task-definition: ${{ steps.task-def.outputs.task-definition }} service: staging-genesis76-cosmic-sync cluster: genesis76-us-east-2 - wait-for-service-stability: true + wait-for-service-stability: false - - name: Check ECS Service Status + - name: App health check (no ECS read permissions required) run: | - echo "Checking ECS service status..." - aws ecs describe-services \ - --cluster genesis76-us-east-2 \ - --services staging-genesis76-cosmic-sync \ - --query 'services[0].{Status:status,RunningCount:runningCount,PendingCount:pendingCount,DesiredCount:desiredCount}' - - echo "Getting recent ECS events..." - aws ecs describe-services \ - --cluster genesis76-us-east-2 \ - --services staging-genesis76-cosmic-sync \ - --query 'services[0].events[:10]' - - echo "Getting task details..." - TASK_ARN=$(aws ecs list-tasks \ - --cluster genesis76-us-east-2 \ - --service-name staging-genesis76-cosmic-sync \ - --query 'taskArns[0]' --output text) - - if [ "$TASK_ARN" != "None" ]; then - echo "Task ARN: $TASK_ARN" - aws ecs describe-tasks \ - --cluster genesis76-us-east-2 \ - --tasks $TASK_ARN \ - --query 'tasks[0].{LastStatus:lastStatus,HealthStatus:healthStatus,CreatedAt:createdAt,StoppedReason:stoppedReason}' - - echo "Getting container details..." - aws ecs describe-tasks \ - --cluster genesis76-us-east-2 \ - --tasks $TASK_ARN \ - --query 'tasks[0].containers[?name==`app`].{Name:name,LastStatus:lastStatus,ExitCode:exitCode,Reason:reason}' - else - echo "No tasks found" - fi \ No newline at end of file + echo "Waiting for app health endpoint..." + set +e + for i in $(seq 1 30); do + if curl -fsS https://sync.genesis76.com/health; then + echo "App is healthy" + exit 0 + fi + sleep 5 + done + echo "App health check failed" + exit 1 \ No newline at end of file From 5fdd3995a58200e003509a80c5317177c6120026 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 9 Sep 2025 17:11:11 -0600 Subject: [PATCH 06/71] Update cosmic-sync-server health check --- .github/workflows/deploy-staging.yml | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 585723d..a88211f 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -74,16 +74,21 @@ jobs: cluster: genesis76-us-east-2 wait-for-service-stability: false + - name: App health check (no ECS read permissions required) + env: + HEALTHCHECK_URL: https://sync.genesis76.com/health run: | echo "Waiting for app health endpoint..." set +e + sleep 30 + URL="${HEALTHCHECK_URL:-https://sync.genesis76.com/health}" for i in $(seq 1 30); do - if curl -fsS https://sync.genesis76.com/health; then + if curl -fsS --http2 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' "$URL"; then echo "App is healthy" exit 0 fi sleep 5 done echo "App health check failed" - exit 1 \ No newline at end of file + exit 1 From 1982e986a8859dfe7fb02bfa382223ce0b95a371 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 9 Sep 2025 17:21:07 -0600 Subject: [PATCH 07/71] Update cosmic-sync-server health check --- .github/workflows/deploy-staging.yml | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 0300dfb..507129a 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -87,6 +87,11 @@ jobs: echo "App is healthy" exit 0 fi + # Fallback to HTTP/1.1 in case ALB enforces HTTP/1.1 only + if curl -fsS --http1.1 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' "$URL"; then + echo "App is healthy (HTTP/1.1)" + exit 0 + fi sleep 5 done echo "App health check failed" From 53745b675e6097b72da069ec2ca3be4275fce193 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 11:57:11 -0600 Subject: [PATCH 08/71] Update service loading delay/ Update related service env --- .github/workflows/ci.yml | 6 +- .github/workflows/deploy-staging.yml | 10 +- Dockerfile | 8 +- README.md | 11 +- src/bin/rabbit_consumer.rs | 11 +- src/config/secrets.rs | 46 +++++++ src/config/settings.rs | 180 ++++++++++++--------------- 7 files changed, 151 insertions(+), 121 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 64f7aa4..09c4e22 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -71,15 +71,15 @@ jobs: sudo apt-get install -y protobuf-compiler - name: Build - run: cargo build --verbose + run: cargo build --verbose --features redis-cache - name: Test run: cargo test --verbose env: DATABASE_URL: mysql://root:cosmic_sync@localhost:3306/cosmic_sync_test - REDIS_URL: redis://localhost:6379 - DB_HOST: localhost REDIS_HOST: localhost + REDIS_PORT: 6379 + DB_HOST: localhost Format: runs-on: ubuntu-latest diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 507129a..87c0b66 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -80,19 +80,19 @@ jobs: run: | echo "Waiting for app health endpoint..." set +e - sleep 30 + sleep 60 URL="${HEALTHCHECK_URL:-https://sync.genesis76.com/health}" - for i in $(seq 1 30); do - if curl -fsS --http2 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' "$URL"; then + for i in $(seq 1 60); do + if curl -fsS --http2 --connect-timeout 5 --max-time 8 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' -H 'Cache-Control: no-cache, no-store' -H 'Pragma: no-cache' "$URL"; then echo "App is healthy" exit 0 fi # Fallback to HTTP/1.1 in case ALB enforces HTTP/1.1 only - if curl -fsS --http1.1 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' "$URL"; then + if curl -fsS --http1.1 --connect-timeout 5 --max-time 8 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' -H 'Cache-Control: no-cache, no-store' -H 'Pragma: no-cache' "$URL"; then echo "App is healthy (HTTP/1.1)" exit 0 fi - sleep 5 + sleep 10 done echo "App health check failed" exit 1 diff --git a/Dockerfile b/Dockerfile index 316c917..157c717 100644 --- a/Dockerfile +++ b/Dockerfile @@ -23,13 +23,13 @@ COPY Cargo.toml Cargo.lock build.rs ./ COPY proto ./proto RUN mkdir -p src && echo "fn main() {}" > src/main.rs && echo "pub fn dummy() {}" > src/lib.rs -RUN cargo build --release +RUN cargo build --release --features redis-cache RUN rm -f src/main.rs src/lib.rs COPY src ./src RUN cargo clean -RUN cargo build --release --bin cosmic-sync-server +RUN cargo build --release --bin cosmic-sync-server --features redis-cache # Runtime stage FROM debian:bookworm-slim @@ -61,8 +61,8 @@ USER appuser EXPOSE 50051 8080 # Health check using HTTP endpoint -HEALTHCHECK --interval=30s --timeout=5s --start-period=60s --retries=3 \ - CMD curl -f http://localhost:8080/health || exit 1 +HEALTHCHECK --interval=30s --timeout=5s --start-period=90s --retries=3 \ + CMD curl -fsS --connect-timeout 2 --max-time 5 http://localhost:8080/health || exit 1 # Run the server CMD ["./cosmic-sync-server"] \ No newline at end of file diff --git a/README.md b/README.md index 21313d3..81ac098 100755 --- a/README.md +++ b/README.md @@ -264,4 +264,13 @@ If file synchronization isn't working: ## License -This project is licensed under the terms of the GNU General Public License v3.0. \ No newline at end of file +This project is licensed under the terms of the GNU General Public License v3.0. + +## Cloud DB/Cache configuration + +- MySQL (required) + - `DB_HOST`, `DB_PORT`, `DB_USER`, `DB_PASS`, `DB_NAME` + - Optional TLS: `DB_SSL_MODE` (DISABLED|PREFERRED|REQUIRED|VERIFY_CA|VERIFY_IDENTITY), `DB_SSL_CA` (path) +- Redis (optional; enable with build feature `redis-cache` and/or env) + - `REDIS_ENABLED=true`, `REDIS_HOST=`, `REDIS_PORT=6379` + - Optional: `REDIS_KEY_PREFIX` (default: `cosmic.sync`) \ No newline at end of file diff --git a/src/bin/rabbit_consumer.rs b/src/bin/rabbit_consumer.rs index a4ad7a8..f655a1d 100644 --- a/src/bin/rabbit_consumer.rs +++ b/src/bin/rabbit_consumer.rs @@ -54,10 +54,11 @@ static REDIS_MANAGER: OnceCell = OnceCell::const_new(); #[cfg(feature = "redis-cache")] async fn get_redis_manager() -> Option { - let url = match std::env::var("REDIS_URL") { - Ok(u) => u, - Err(_) => return None, - }; + // Prefer explicit env, fallback to config loader + // Build from REDIS_HOST/REDIS_PORT only (do not use REDIS_URL) + let host = match std::env::var("REDIS_HOST") { Ok(h) if !h.is_empty() => h, _ => return None }; + let port = std::env::var("REDIS_PORT").ok().and_then(|p| p.parse::().ok()).unwrap_or(6379); + let url = format!("redis://{}:{}/0", host, port); let mgr_ref = REDIS_MANAGER .get_or_init(|| async move { match RedisClient::open(url.clone()) { @@ -102,7 +103,7 @@ async fn mark_seen_id_async(id: &str) -> bool { } } } else { - // No REDIS_URL set → fallback to in-memory tracker + // No REDIS_HOST/REDIS_PORT set → fallback to in-memory tracker #[cfg(not(feature = "redis-cache"))] { let mut seen = SEEN_IDS.lock().unwrap(); diff --git a/src/config/secrets.rs b/src/config/secrets.rs index 0f1a7a8..6a0c442 100644 --- a/src/config/secrets.rs +++ b/src/config/secrets.rs @@ -297,6 +297,9 @@ impl ConfigLoader { .map(|v| v == "1" || v.to_lowercase() == "true") .unwrap_or(false); + let ssl_mode = self.get_config_value("DB_SSL_MODE", None).await; + let ssl_ca_path = self.get_config_value("DB_SSL_CA", None).await; + DatabaseConfig { user, password, @@ -306,6 +309,8 @@ impl ConfigLoader { max_connections, connection_timeout, log_queries, + ssl_mode, + ssl_ca_path, } } @@ -665,6 +670,45 @@ impl ConfigLoader { } } + /// Get Redis configuration from secrets or environment + pub async fn get_redis_config(&self) -> super::settings::RedisConfig { + use super::settings::RedisConfig; + + let default_enabled = if cfg!(feature = "redis-cache") { "true" } else { "false" }; + let enabled = self + .get_config_value("REDIS_ENABLED", Some(default_enabled)) + .await + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(cfg!(feature = "redis-cache")); + + // Prefer explicit URL; otherwise compose from host/port + let url = { + let host_opt = self.get_config_value("REDIS_HOST", None).await; + match host_opt { + Some(host) if !host.is_empty() => { + let port = self + .get_config_value("REDIS_PORT", Some("6379")) + .await + .and_then(|p| p.parse::().ok()) + .unwrap_or(6379); + Some(format!("redis://{}:{}/0", host, port)) + } + _ => None, + } + }; + + let key_prefix = self + .get_config_value("REDIS_KEY_PREFIX", Some("cosmic.sync")) + .await + .unwrap_or_else(|| "cosmic.sync".to_string()); + + RedisConfig { + enabled, + url, + key_prefix, + } + } + /// Load complete configuration pub async fn load_config(&self) -> super::settings::Config { info!( @@ -678,6 +722,7 @@ impl ConfigLoader { let logging = self.get_logging_config().await; let features = self.get_feature_flags().await; let message_broker = super::settings::MessageBrokerConfig::load(); + let redis = self.get_redis_config().await; let server_encode_key = self.get_server_encode_key().await; info!("Configuration loaded successfully"); @@ -689,6 +734,7 @@ impl ConfigLoader { logging, features, message_broker, + redis, server_encode_key, } } diff --git a/src/config/settings.rs b/src/config/settings.rs index 85ee8da..0053e39 100755 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -18,6 +18,8 @@ pub struct Config { pub features: FeatureFlags, /// Message broker (RabbitMQ) configuration pub message_broker: MessageBrokerConfig, + /// Redis configuration (optional; primarily used for idempotency and caching) + pub redis: RedisConfig, /// Server-side encoding key (hex) for path/filename encryption (optional) #[serde(skip)] pub server_encode_key: Option>, @@ -32,6 +34,7 @@ impl Default for Config { logging: LoggingConfig::default(), features: FeatureFlags::default(), message_broker: MessageBrokerConfig::default(), + redis: RedisConfig::default(), server_encode_key: None, } } @@ -47,6 +50,7 @@ impl Config { logging: LoggingConfig::load(), features: FeatureFlags::load(), message_broker: MessageBrokerConfig::load(), + redis: RedisConfig::load(), server_encode_key: None, } } @@ -170,6 +174,10 @@ pub struct DatabaseConfig { pub connection_timeout: u64, /// Enable database query logging pub log_queries: bool, + /// Optional MySQL SSL/TLS mode (e.g., DISABLED, PREFERRED, REQUIRED, VERIFY_CA, VERIFY_IDENTITY) + pub ssl_mode: Option, + /// Optional path to CA certificate for MySQL TLS verification (used with VERIFY_CA/VERIFY_IDENTITY) + pub ssl_ca_path: Option, } impl Default for DatabaseConfig { @@ -183,6 +191,8 @@ impl Default for DatabaseConfig { max_connections: crate::config::constants::DEFAULT_DB_POOL, connection_timeout: crate::config::constants::DEFAULT_DB_CONN_TIMEOUT_SECS, log_queries: crate::config::constants::DEFAULT_DB_LOG_QUERIES, + ssl_mode: None, + ssl_ca_path: None, } } } @@ -213,6 +223,8 @@ impl DatabaseConfig { let log_queries = env::var("DATABASE_LOG_QUERIES") .map(|v| v == "1" || v.to_lowercase() == "true") .unwrap_or(crate::config::constants::DEFAULT_DB_LOG_QUERIES); + let ssl_mode = env::var("DB_SSL_MODE").ok(); + let ssl_ca_path = env::var("DB_SSL_CA").ok(); Self { user, @@ -223,15 +235,36 @@ impl DatabaseConfig { max_connections, connection_timeout, log_queries, + ssl_mode, + ssl_ca_path, } } /// Generate database URL from individual components pub fn url(&self) -> String { - format!( + let mut url = format!( "mysql://{}:{}@{}:{}/{}", self.user, self.password, self.host, self.port, self.name - ) + ); + + // Append SSL/TLS options if provided + let mut params: Vec = Vec::new(); + if let Some(mode) = &self.ssl_mode { + if !mode.is_empty() { + params.push(format!("ssl-mode={}", mode)); + } + } + if let Some(ca) = &self.ssl_ca_path { + if !ca.is_empty() { + params.push(format!("ssl-ca={}", ca)); + } + } + if !params.is_empty() { + url.push('?'); + url.push_str(¶ms.join("&")); + } + + url } } @@ -399,127 +432,29 @@ pub struct StorageConfig { pub storage_type: StorageType, /// S3 configuration (when storage_type is S3) pub s3: S3Config, - /// Retention TTL in seconds for deleted data (logical -> physical purge) + /// Default TTL for files (seconds) pub file_ttl_secs: i64, - /// Maximum number of revisions to keep per file path + /// Maximum number of file revisions to keep pub max_file_revisions: i32, } -impl Default for StorageConfig { - fn default() -> Self { - Self { - storage_type: StorageType::Database, - s3: S3Config::default(), - file_ttl_secs: crate::config::constants::DEFAULT_FILE_TTL_SECS, - max_file_revisions: crate::config::constants::DEFAULT_MAX_FILE_REVISIONS, - } - } -} - -impl StorageConfig { - /// Load storage configuration from environment variables or use defaults - pub fn load() -> Self { - let storage_type = env::var("STORAGE_TYPE") - .unwrap_or_else(|_| "database".to_string()) - .parse() - .unwrap_or(StorageType::Database); - - Self { - storage_type, - s3: S3Config::load(), - file_ttl_secs: env::var("FILE_TTL_SECS") - .ok() - .and_then(|v| v.parse().ok()) - .unwrap_or(crate::config::constants::DEFAULT_FILE_TTL_SECS), - max_file_revisions: env::var("MAX_FILE_REVISIONS") - .ok() - .and_then(|v| v.parse().ok()) - .unwrap_or(crate::config::constants::DEFAULT_MAX_FILE_REVISIONS), - } - } -} - -/// S3 configuration settings +/// S3 configuration #[derive(Debug, Clone, Serialize, Deserialize)] pub struct S3Config { - /// AWS region pub region: String, - /// S3 bucket name pub bucket: String, - /// S3 object key prefix pub key_prefix: String, - /// AWS access key ID (optional - can use IAM role) pub access_key_id: Option, - /// AWS secret access key (optional - can use IAM role) pub secret_access_key: Option, - /// AWS session token (optional - for temporary credentials) pub session_token: Option, - /// S3 endpoint URL (for S3-compatible services) pub endpoint_url: Option, - /// Force path style addressing pub force_path_style: bool, - /// Use AWS Secret Manager for credentials pub use_secret_manager: bool, - /// Secret Manager secret name (when use_secret_manager is true) pub secret_name: Option, - /// Connection timeout in seconds pub timeout_seconds: u64, - /// Maximum number of retries pub max_retries: u32, } -impl Default for S3Config { - fn default() -> Self { - Self { - region: crate::config::constants::DEFAULT_S3_REGION.to_string(), - bucket: crate::config::constants::DEFAULT_S3_BUCKET.to_string(), - key_prefix: crate::config::constants::DEFAULT_S3_KEY_PREFIX.to_string(), - access_key_id: None, - secret_access_key: None, - session_token: None, - endpoint_url: None, - force_path_style: crate::config::constants::DEFAULT_S3_FORCE_PATH_STYLE, - use_secret_manager: crate::config::constants::DEFAULT_S3_USE_SECRET_MANAGER, - secret_name: None, - timeout_seconds: crate::config::constants::DEFAULT_S3_TIMEOUT_SECONDS, - max_retries: crate::config::constants::DEFAULT_S3_MAX_RETRIES, - } - } -} - -impl S3Config { - /// Load S3 configuration from environment variables or use defaults - pub fn load() -> Self { - Self { - region: env::var("AWS_REGION") - .unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_REGION.to_string()), - bucket: env::var("AWS_S3_BUCKET") - .unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_BUCKET.to_string()), - key_prefix: env::var("S3_KEY_PREFIX") - .unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_KEY_PREFIX.to_string()), - access_key_id: env::var("AWS_ACCESS_KEY_ID").ok(), - secret_access_key: env::var("AWS_SECRET_ACCESS_KEY").ok(), - session_token: env::var("AWS_SESSION_TOKEN").ok(), - endpoint_url: env::var("S3_ENDPOINT_URL").ok(), - force_path_style: env::var("S3_FORCE_PATH_STYLE") - .map(|v| v == "1" || v.to_lowercase() == "true") - .unwrap_or(crate::config::constants::DEFAULT_S3_FORCE_PATH_STYLE), - use_secret_manager: env::var("USE_AWS_SECRET_MANAGER") - .map(|v| v == "1" || v.to_lowercase() == "true") - .unwrap_or(crate::config::constants::DEFAULT_S3_USE_SECRET_MANAGER), - secret_name: env::var("AWS_SECRET_NAME").ok(), - timeout_seconds: env::var("S3_TIMEOUT_SECONDS") - .ok() - .and_then(|v| v.parse().ok()) - .unwrap_or(crate::config::constants::DEFAULT_S3_TIMEOUT_SECONDS), - max_retries: env::var("S3_MAX_RETRIES") - .ok() - .and_then(|v| v.parse().ok()) - .unwrap_or(crate::config::constants::DEFAULT_S3_MAX_RETRIES), - } - } -} - /// Message broker configuration settings (RabbitMQ) #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MessageBrokerConfig { @@ -568,3 +503,42 @@ impl MessageBrokerConfig { } } } + +/// Redis configuration settings (optional) +#[derive(Debug, Clone, Serialize, Deserialize)] +pub struct RedisConfig { + /// Enable Redis-backed features (idempotency, caching). Default aligns with build feature. + pub enabled: bool, + /// Connection URL, e.g. redis://user:pass@host:6379/0 + pub url: Option, + /// Optional key prefix for namespacing + pub key_prefix: String, +} + +impl Default for RedisConfig { + fn default() -> Self { + // Default enable aligns with compile-time feature if present + let default_enabled = cfg!(feature = "redis-cache"); + Self { + enabled: default_enabled, + url: None, + key_prefix: env::var("REDIS_KEY_PREFIX").unwrap_or_else(|_| "cosmic.sync".to_string()), + } + } +} + +impl RedisConfig { + pub fn load() -> Self { + let default_enabled = cfg!(feature = "redis-cache"); + let enabled = env::var("REDIS_ENABLED") + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(default_enabled); + let url = { + let host = env::var("REDIS_HOST").ok(); + let port = env::var("REDIS_PORT").ok().and_then(|p| p.parse::().ok()).unwrap_or(6379); + host.map(|h| format!("redis://{}:{}/0", h, port)) + }; + let key_prefix = env::var("REDIS_KEY_PREFIX").unwrap_or_else(|_| "cosmic.sync".to_string()); + Self { enabled, url, key_prefix } + } +} From c55d84a9d859ba3c64c6a349a14f3b5041dcc4d5 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 14:55:33 -0600 Subject: [PATCH 09/71] Fix build error --- src/config/settings.rs | 107 ++++++++++++++++++++++++++++++++++++++++ src/server/app_state.rs | 1 + src/server/startup.rs | 2 + 3 files changed, 110 insertions(+) diff --git a/src/config/settings.rs b/src/config/settings.rs index 0053e39..4e0e0a8 100755 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -455,6 +455,113 @@ pub struct S3Config { pub max_retries: u32, } +impl Default for S3Config { + fn default() -> Self { + Self { + region: crate::config::constants::DEFAULT_S3_REGION.to_string(), + bucket: crate::config::constants::DEFAULT_S3_BUCKET.to_string(), + key_prefix: crate::config::constants::DEFAULT_S3_KEY_PREFIX.to_string(), + access_key_id: None, + secret_access_key: None, + session_token: None, + endpoint_url: None, + force_path_style: crate::config::constants::DEFAULT_S3_FORCE_PATH_STYLE, + use_secret_manager: crate::config::constants::DEFAULT_S3_USE_SECRET_MANAGER, + secret_name: None, + timeout_seconds: crate::config::constants::DEFAULT_S3_TIMEOUT_SECONDS, + max_retries: crate::config::constants::DEFAULT_S3_MAX_RETRIES, + } + } +} + +impl S3Config { + pub fn load() -> Self { + let region = env::var("AWS_REGION") + .unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_REGION.to_string()); + let bucket = env::var("AWS_S3_BUCKET") + .unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_BUCKET.to_string()); + let key_prefix = env::var("S3_KEY_PREFIX") + .unwrap_or_else(|_| crate::config::constants::DEFAULT_S3_KEY_PREFIX.to_string()); + + let access_key_id = env::var("AWS_ACCESS_KEY_ID").ok(); + let secret_access_key = env::var("AWS_SECRET_ACCESS_KEY").ok(); + let session_token = env::var("AWS_SESSION_TOKEN").ok(); + let endpoint_url = env::var("S3_ENDPOINT_URL").ok(); + + let force_path_style = env::var("S3_FORCE_PATH_STYLE") + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(crate::config::constants::DEFAULT_S3_FORCE_PATH_STYLE); + + let use_secret_manager = env::var("USE_AWS_SECRET_MANAGER") + .map(|v| v == "1" || v.to_lowercase() == "true") + .unwrap_or(crate::config::constants::DEFAULT_S3_USE_SECRET_MANAGER); + + let secret_name = env::var("AWS_SECRET_NAME").ok(); + + let timeout_seconds = env::var("S3_TIMEOUT_SECONDS") + .ok() + .and_then(|v| v.parse::().ok()) + .unwrap_or(crate::config::constants::DEFAULT_S3_TIMEOUT_SECONDS); + + let max_retries = env::var("S3_MAX_RETRIES") + .ok() + .and_then(|v| v.parse::().ok()) + .unwrap_or(crate::config::constants::DEFAULT_S3_MAX_RETRIES); + + Self { + region, + bucket, + key_prefix, + access_key_id, + secret_access_key, + session_token, + endpoint_url, + force_path_style, + use_secret_manager, + secret_name, + timeout_seconds, + max_retries, + } + } +} + +impl Default for StorageConfig { + fn default() -> Self { + Self { + storage_type: StorageType::default(), + s3: S3Config::default(), + file_ttl_secs: crate::config::constants::DEFAULT_FILE_TTL_SECS, + max_file_revisions: crate::config::constants::DEFAULT_MAX_FILE_REVISIONS, + } + } +} + +impl StorageConfig { + pub fn load() -> Self { + let storage_type = env::var("STORAGE_TYPE") + .unwrap_or_else(|_| "database".to_string()) + .parse() + .unwrap_or(StorageType::Database); + + let file_ttl_secs = env::var("FILE_TTL_SECS") + .ok() + .and_then(|v| v.parse::().ok()) + .unwrap_or(crate::config::constants::DEFAULT_FILE_TTL_SECS); + + let max_file_revisions = env::var("MAX_FILE_REVISIONS") + .ok() + .and_then(|v| v.parse::().ok()) + .unwrap_or(crate::config::constants::DEFAULT_MAX_FILE_REVISIONS); + + Self { + storage_type, + s3: S3Config::load(), + file_ttl_secs, + max_file_revisions, + } + } +} + /// Message broker configuration settings (RabbitMQ) #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MessageBrokerConfig { diff --git a/src/server/app_state.rs b/src/server/app_state.rs index 7545834..d39bb5f 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -456,6 +456,7 @@ impl AppState { features: crate::config::settings::FeatureFlags::default(), storage: crate::config::settings::StorageConfig::default(), message_broker: crate::config::settings::MessageBrokerConfig::load(), + redis: crate::config::settings::RedisConfig::load(), server_encode_key: None, }; diff --git a/src/server/startup.rs b/src/server/startup.rs index 4a98185..56021c8 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -320,6 +320,8 @@ fn parse_mysql_url(url: &str) -> Result max_connections: 50, connection_timeout: 30, log_queries: false, + ssl_mode: None, + ssl_ca_path: None, }) } From a43b5ac792cf80a33d2dd15f0b132c3890364b5d Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 14:56:38 -0600 Subject: [PATCH 10/71] Fix build error --- src/bin/rabbit_consumer.rs | 10 ++++++++-- src/config/secrets.rs | 6 +++++- src/config/settings.rs | 11 +++++++++-- 3 files changed, 22 insertions(+), 5 deletions(-) diff --git a/src/bin/rabbit_consumer.rs b/src/bin/rabbit_consumer.rs index f655a1d..e069ff7 100644 --- a/src/bin/rabbit_consumer.rs +++ b/src/bin/rabbit_consumer.rs @@ -56,8 +56,14 @@ static REDIS_MANAGER: OnceCell = OnceCell::const_new(); async fn get_redis_manager() -> Option { // Prefer explicit env, fallback to config loader // Build from REDIS_HOST/REDIS_PORT only (do not use REDIS_URL) - let host = match std::env::var("REDIS_HOST") { Ok(h) if !h.is_empty() => h, _ => return None }; - let port = std::env::var("REDIS_PORT").ok().and_then(|p| p.parse::().ok()).unwrap_or(6379); + let host = match std::env::var("REDIS_HOST") { + Ok(h) if !h.is_empty() => h, + _ => return None, + }; + let port = std::env::var("REDIS_PORT") + .ok() + .and_then(|p| p.parse::().ok()) + .unwrap_or(6379); let url = format!("redis://{}:{}/0", host, port); let mgr_ref = REDIS_MANAGER .get_or_init(|| async move { diff --git a/src/config/secrets.rs b/src/config/secrets.rs index 6a0c442..c6c483d 100644 --- a/src/config/secrets.rs +++ b/src/config/secrets.rs @@ -674,7 +674,11 @@ impl ConfigLoader { pub async fn get_redis_config(&self) -> super::settings::RedisConfig { use super::settings::RedisConfig; - let default_enabled = if cfg!(feature = "redis-cache") { "true" } else { "false" }; + let default_enabled = if cfg!(feature = "redis-cache") { + "true" + } else { + "false" + }; let enabled = self .get_config_value("REDIS_ENABLED", Some(default_enabled)) .await diff --git a/src/config/settings.rs b/src/config/settings.rs index 4e0e0a8..a9d3d29 100755 --- a/src/config/settings.rs +++ b/src/config/settings.rs @@ -642,10 +642,17 @@ impl RedisConfig { .unwrap_or(default_enabled); let url = { let host = env::var("REDIS_HOST").ok(); - let port = env::var("REDIS_PORT").ok().and_then(|p| p.parse::().ok()).unwrap_or(6379); + let port = env::var("REDIS_PORT") + .ok() + .and_then(|p| p.parse::().ok()) + .unwrap_or(6379); host.map(|h| format!("redis://{}:{}/0", h, port)) }; let key_prefix = env::var("REDIS_KEY_PREFIX").unwrap_or_else(|_| "cosmic.sync".to_string()); - Self { enabled, url, key_prefix } + Self { + enabled, + url, + key_prefix, + } } } From 15555aa84636438f3b096046ba88fbf188f3d3d6 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 15:16:10 -0600 Subject: [PATCH 11/71] Update builder rs --- src/container/builder.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/container/builder.rs b/src/container/builder.rs index ee271a7..1d3873b 100644 --- a/src/container/builder.rs +++ b/src/container/builder.rs @@ -157,6 +157,7 @@ impl ContainerBuilder { ..Default::default() }, message_broker: crate::config::settings::MessageBrokerConfig::default(), + redis: crate::config::settings::RedisConfig::default(), server_encode_key: None, }; From 37bc15cebe1d0c8bcbdc771ec213a88c37a96393 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 16:17:58 -0600 Subject: [PATCH 12/71] Fix static library issue --- Cargo.lock | 355 ++++++++++++++++++++++++++++++++--------- Cargo.toml | 2 +- Dockerfile | 46 ++---- src/handlers/health.rs | 68 +++++++- src/server/startup.rs | 4 + 5 files changed, 366 insertions(+), 109 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 4f2e2d6..0802d60 100755 --- a/Cargo.lock +++ b/Cargo.lock @@ -389,6 +389,45 @@ version = "1.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" +[[package]] +name = "asn1-rs" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "56624a96882bb8c26d61312ae18cb45868e5a9992ea73c58e45c3101e56a1e60" +dependencies = [ + "asn1-rs-derive", + "asn1-rs-impl", + "displaydoc", + "nom", + "num-traits", + "rusticata-macros", + "thiserror 2.0.12", + "time", +] + +[[package]] +name = "asn1-rs-derive" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", + "synstructure", +] + +[[package]] +name = "asn1-rs-impl" +version = "0.2.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "async-channel" version = "2.5.0" @@ -1168,6 +1207,15 @@ dependencies = [ "generic-array", ] +[[package]] +name = "block-padding" +version = "0.3.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "a8894febbff9f758034a5b8e12d87918f56dfc64a8e1fe757d65e29041538d93" +dependencies = [ + "generic-array", +] + [[package]] name = "blocking" version = "1.6.2" @@ -1239,6 +1287,15 @@ dependencies = [ "bytes", ] +[[package]] +name = "cbc" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "26b52a9543ae338f279b96b0b9fed9c8093744685043739079ce85cd58f289a6" +dependencies = [ + "cipher", +] + [[package]] name = "cc" version = "1.2.19" @@ -1310,6 +1367,18 @@ dependencies = [ "cc", ] +[[package]] +name = "cms" +version = "0.2.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7b77c319abfd5219629c45c34c89ba945ed3c5e49fcde9d16b6c3885f118a730" +dependencies = [ + "const-oid", + "der 0.7.10", + "spki 0.7.3", + "x509-cert", +] + [[package]] name = "combine" version = "4.6.7" @@ -1562,6 +1631,12 @@ dependencies = [ "parking_lot_core", ] +[[package]] +name = "data-encoding" +version = "2.9.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" + [[package]] name = "der" version = "0.6.1" @@ -1579,10 +1654,37 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e7c1832837b905bbfb5101e07cc24c8deddf52f93225eee6ead5f4d63d53ddcb" dependencies = [ "const-oid", + "der_derive", + "flagset", "pem-rfc7468", "zeroize", ] +[[package]] +name = "der-parser" +version = "10.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "07da5016415d5a3c4dd39b11ed26f915f52fc4e0dc197d87908bc916e51bc1a6" +dependencies = [ + "asn1-rs", + "displaydoc", + "nom", + "num-bigint", + "num-traits", + "rusticata-macros", +] + +[[package]] +name = "der_derive" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" +dependencies = [ + "proc-macro2", + "quote", + "syn 2.0.100", +] + [[package]] name = "deranged" version = "0.4.0" @@ -1626,6 +1728,15 @@ dependencies = [ "unicode-xid", ] +[[package]] +name = "des" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "ffdd80ce8ce993de27e9f063a444a4d53ce8e8db4c1f00cc03af5ad5a9867a1e" +dependencies = [ + "cipher", +] + [[package]] name = "digest" version = "0.10.7" @@ -1817,6 +1928,12 @@ version = "0.4.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +[[package]] +name = "flagset" +version = "0.4.7" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" + [[package]] name = "flate2" version = "1.1.1" @@ -1850,21 +1967,6 @@ version = "0.1.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" -[[package]] -name = "foreign-types" -version = "0.3.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f6f339eb8adc052cd2ca78910fda869aefa38d22d5cb648e6485e4d3fc06f3b1" -dependencies = [ - "foreign-types-shared", -] - -[[package]] -name = "foreign-types-shared" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "00b0228411908ca8685dba7fc2cdd70ec9990a6e753e89b6ac91a84c40fbaf4b" - [[package]] name = "form_urlencoded" version = "1.2.1" @@ -2594,6 +2696,7 @@ version = "0.1.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "879f10e63c20629ecabbb64a8010319738c66a5cd0c29b02d63d272b03751d01" dependencies = [ + "block-padding", "generic-array", ] @@ -2912,23 +3015,6 @@ dependencies = [ "rand 0.8.5", ] -[[package]] -name = "native-tls" -version = "0.2.14" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87de3442987e9dbec73158d5c715e7ad9072fda936bb03d19d7fa10e00520f0e" -dependencies = [ - "libc", - "log", - "openssl", - "openssl-probe", - "openssl-sys", - "schannel", - "security-framework 2.11.1", - "security-framework-sys", - "tempfile", -] - [[package]] name = "nom" version = "7.1.3" @@ -3051,6 +3137,15 @@ dependencies = [ "memchr", ] +[[package]] +name = "oid-registry" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "12f40cff3dde1b6087cc5d5f5d4d65712f34016a03ed60e9c08dcc392736b5b7" +dependencies = [ + "asn1-rs", +] + [[package]] name = "once_cell" version = "1.21.3" @@ -3063,50 +3158,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c08d65885ee38876c4f86fa503fb49d7b507c2b62552df7c70b2fce627e06381" -[[package]] -name = "openssl" -version = "0.10.73" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8505734d46c8ab1e19a1dce3aef597ad87dcb4c37e7188231769bd6bd51cebf8" -dependencies = [ - "bitflags 2.9.0", - "cfg-if", - "foreign-types", - "libc", - "once_cell", - "openssl-macros", - "openssl-sys", -] - -[[package]] -name = "openssl-macros" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a948666b637a0f465e8564c73e89d4dde00d72d4d473cc972f390fc3dcee7d9c" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "openssl-probe" version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e" -[[package]] -name = "openssl-sys" -version = "0.9.109" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" -dependencies = [ - "cc", - "libc", - "pkg-config", - "vcpkg", -] - [[package]] name = "outref" version = "0.5.2" @@ -3119,6 +3176,28 @@ version = "0.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" +[[package]] +name = "p12-keystore" +version = "0.1.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3cae83056e7cb770211494a0ecf66d9fa7eba7d00977e5bb91f0e925b40b937f" +dependencies = [ + "cbc", + "cms", + "der 0.7.10", + "des", + "hex", + "hmac", + "pkcs12", + "pkcs5", + "rand 0.9.1", + "rc2", + "sha1", + "sha2", + "thiserror 2.0.12", + "x509-parser", +] + [[package]] name = "p256" version = "0.11.1" @@ -3165,6 +3244,16 @@ version = "1.0.15" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "57c0d7b74b563b49d38dae00a0c37d4d6de9b432382b2892f0574ddcae73fd0a" +[[package]] +name = "pbkdf2" +version = "0.12.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f8ed6a7761f76e3b9f92dfb0a60a6a6477c61024b775147ff0973a02653abaf2" +dependencies = [ + "digest", + "hmac", +] + [[package]] name = "pem" version = "3.0.5" @@ -3266,6 +3355,36 @@ dependencies = [ "spki 0.7.3", ] +[[package]] +name = "pkcs12" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "695b3df3d3cc1015f12d70235e35b6b79befc5fa7a9b95b951eab1dd07c9efc2" +dependencies = [ + "cms", + "const-oid", + "der 0.7.10", + "digest", + "spki 0.7.3", + "x509-cert", + "zeroize", +] + +[[package]] +name = "pkcs5" +version = "0.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e847e2c91a18bfa887dd028ec33f2fe6f25db77db3619024764914affe8b69a6" +dependencies = [ + "aes", + "cbc", + "der 0.7.10", + "pbkdf2", + "scrypt", + "sha2", + "spki 0.7.3", +] + [[package]] name = "pkcs8" version = "0.9.0" @@ -3495,6 +3614,15 @@ dependencies = [ "getrandom 0.3.2", ] +[[package]] +name = "rc2" +version = "0.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "62c64daa8e9438b84aaae55010a93f396f8e60e3911590fcba770d04643fc1dd" +dependencies = [ + "cipher", +] + [[package]] name = "reactor-trait" version = "1.1.0" @@ -3713,6 +3841,15 @@ dependencies = [ "semver", ] +[[package]] +name = "rusticata-macros" +version = "4.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "faf0c4a6ece9950b9abdb62b1cfcf2a68b3b67a10ba445b3bb85be2a293d0632" +dependencies = [ + "nom", +] + [[package]] name = "rustix" version = "0.37.28" @@ -3785,12 +3922,26 @@ checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" dependencies = [ "aws-lc-rs", "once_cell", + "ring 0.17.14", "rustls-pki-types", "rustls-webpki 0.103.3", "subtle", "zeroize", ] +[[package]] +name = "rustls-connector" +version = "0.20.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "70cc376c6ba1823ae229bacf8ad93c136d93524eab0e4e5e0e4f96b9c4e5b212" +dependencies = [ + "log", + "rustls 0.23.28", + "rustls-native-certs 0.7.3", + "rustls-pki-types", + "rustls-webpki 0.103.3", +] + [[package]] name = "rustls-native-certs" version = "0.6.3" @@ -3803,6 +3954,19 @@ dependencies = [ "security-framework 2.11.1", ] +[[package]] +name = "rustls-native-certs" +version = "0.7.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5bfb394eeed242e909609f56089eecfe5fda225042e8b171791b9c95f5931e5" +dependencies = [ + "openssl-probe", + "rustls-pemfile 2.2.0", + "rustls-pki-types", + "schannel", + "security-framework 2.11.1", +] + [[package]] name = "rustls-native-certs" version = "0.8.1" @@ -3876,6 +4040,15 @@ version = "1.0.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f" +[[package]] +name = "salsa20" +version = "0.10.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "97a22f5af31f73a954c10289c93e8a50cc23d971e80ee446f1f6f7137a088213" +dependencies = [ + "cipher", +] + [[package]] name = "schannel" version = "0.1.27" @@ -3891,6 +4064,17 @@ version = "1.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "94143f37725109f92c262ed2cf5e59bce7498c01bcc1502d7b9afe439a4e9f49" +[[package]] +name = "scrypt" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0516a385866c09368f0b5bcd1caff3366aace790fcd46e2bb032697bb172fd1f" +dependencies = [ + "pbkdf2", + "salsa20", + "sha2", +] + [[package]] name = "sct" version = "0.7.1" @@ -4470,7 +4654,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "495b0abdce3dc1f8fd27240651c9e68890c14e9d9c61527b1ce44d8a5a7bd3d5" dependencies = [ "cfg-if", - "native-tls", + "p12-keystore", + "rustls-connector", "rustls-pemfile 2.2.0", ] @@ -5550,6 +5735,34 @@ version = "0.5.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +[[package]] +name = "x509-cert" +version = "0.2.5" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1301e935010a701ae5f8655edc0ad17c44bad3ac5ce8c39185f75453b720ae94" +dependencies = [ + "const-oid", + "der 0.7.10", + "spki 0.7.3", +] + +[[package]] +name = "x509-parser" +version = "0.17.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "4569f339c0c402346d4a75a9e39cf8dad310e287eef1ff56d4c68e5067f53460" +dependencies = [ + "asn1-rs", + "data-encoding", + "der-parser", + "lazy_static", + "nom", + "oid-registry", + "rusticata-macros", + "thiserror 2.0.12", + "time", +] + [[package]] name = "xmlparser" version = "0.13.6" diff --git a/Cargo.toml b/Cargo.toml index 5dd8f75..c4914cb 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -117,7 +117,7 @@ aws-types = { version = "1.3" } dashmap = "5.5" once_cell = "1.19" -lapin = { version = "2.3", default-features = false, features = ["native-tls"] } +lapin = { version = "2.5", default-features = false, features = ["rustls"] } nanoid = "0.4" # Logging compatibility diff --git a/Dockerfile b/Dockerfile index 157c717..28f742c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,14 +5,18 @@ FROM rust:slim AS builder ARG VCS_REF ARG BUILD_DATE ARG VERSION +ARG RUST_TARGET=x86_64-unknown-linux-musl -# Install build dependencies including protobuf compiler +# Install build dependencies including protobuf compiler and musl toolchain RUN apt-get update && apt-get install -y \ pkg-config \ - libssl-dev \ protobuf-compiler \ + musl-tools \ && rm -rf /var/lib/apt/lists/* +# Enable target +RUN rustup target add ${RUST_TARGET} + # Create app directory WORKDIR /app @@ -23,46 +27,26 @@ COPY Cargo.toml Cargo.lock build.rs ./ COPY proto ./proto RUN mkdir -p src && echo "fn main() {}" > src/main.rs && echo "pub fn dummy() {}" > src/lib.rs -RUN cargo build --release --features redis-cache +RUN cargo build --release --features redis-cache --target ${RUST_TARGET} RUN rm -f src/main.rs src/lib.rs COPY src ./src RUN cargo clean -RUN cargo build --release --bin cosmic-sync-server --features redis-cache +RUN cargo build --release --bin cosmic-sync-server --features redis-cache --target ${RUST_TARGET} # Runtime stage -FROM debian:bookworm-slim - -# Install runtime dependencies -RUN apt-get update && apt-get install -y \ - ca-certificates \ - libssl3 \ - curl \ - && rm -rf /var/lib/apt/lists/* - -# Create app user -RUN groupadd -r appuser && useradd -r -g appuser appuser +FROM gcr.io/distroless/static:nonroot WORKDIR /app -# Copy the binary from builder stage -COPY --from=builder /app/target/release/cosmic-sync-server /app/cosmic-sync-server - -# Copy configuration files if needed +# Copy the binary from builder stage (musl static) +ARG RUST_TARGET=x86_64-unknown-linux-musl +COPY --from=builder /app/target/${RUST_TARGET}/release/cosmic-sync-server /app/cosmic-sync-server COPY config ./config -# Create data directory -RUN mkdir -p /app/data && chown -R appuser:appuser /app +USER nonroot:nonroot -# Switch to non-root user -USER appuser - -# Expose ports EXPOSE 50051 8080 -# Health check using HTTP endpoint -HEALTHCHECK --interval=30s --timeout=5s --start-period=90s --retries=3 \ - CMD curl -fsS --connect-timeout 2 --max-time 5 http://localhost:8080/health || exit 1 - -# Run the server -CMD ["./cosmic-sync-server"] \ No newline at end of file +# Distroless lacks curl; rely on container orchestrator health checks +ENTRYPOINT ["/app/cosmic-sync-server"] \ No newline at end of file diff --git a/src/handlers/health.rs b/src/handlers/health.rs index 8acf2ba..1ca1f14 100644 --- a/src/handlers/health.rs +++ b/src/handlers/health.rs @@ -35,13 +35,32 @@ pub async fn health_check() -> ActixResult { } /// HTTP readiness check endpoint -pub async fn readiness_check() -> ActixResult { - // TODO: Add actual readiness checks (database, storage, etc.) - Ok(HttpResponse::Ok().json(json!({ - "status": "ready", +pub async fn readiness_check(app_state: web::Data) -> ActixResult { + // Perform basic dependency checks + let storage_ok = app_state + .storage + .health_check() + .await + .unwrap_or(false); + + let message_broker_enabled = crate::server::app_state::AppState::get_config().message_broker.enabled; + + let status = if storage_ok { "ready" } else { "degraded" }; + let body = json!({ + "status": status, "version": env!("CARGO_PKG_VERSION"), - "timestamp": chrono::Utc::now().to_rfc3339() - }))) + "timestamp": chrono::Utc::now().to_rfc3339(), + "dependencies": { + "database": storage_ok, + "message_broker_enabled": message_broker_enabled + } + }); + + if storage_ok { + Ok(HttpResponse::Ok().json(body)) + } else { + Ok(HttpResponse::ServiceUnavailable().json(body)) + } } /// HTTP liveness check endpoint @@ -52,3 +71,40 @@ pub async fn liveness_check() -> ActixResult { "timestamp": chrono::Utc::now().to_rfc3339() }))) } + +/// Detailed health for external debugging +pub async fn health_details(app_state: web::Data) -> ActixResult { + let cfg = crate::server::app_state::AppState::get_config(); + + // Check storage availability + let storage_ok = app_state + .storage + .health_check() + .await + .unwrap_or(false); + + // Summarize configuration flags (safe subset) + let details = json!({ + "app": { + "version": env!("CARGO_PKG_VERSION"), + "time": chrono::Utc::now().to_rfc3339(), + }, + "config": { + "host": cfg.server.host, + "grpc_port": cfg.server.port, + "http_port": crate::config::constants::DEFAULT_HTTP_PORT, + "storage_type": format!("{:?}", cfg.storage.storage_type), + "message_broker_enabled": cfg.message_broker.enabled, + }, + "dependencies": { + "database": storage_ok, + "redis_enabled": cfg.redis.enabled, + } + }); + + if storage_ok { + Ok(HttpResponse::Ok().json(details)) + } else { + Ok(HttpResponse::ServiceUnavailable().json(details)) + } +} diff --git a/src/server/startup.rs b/src/server/startup.rs index 56021c8..c15955a 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -224,6 +224,10 @@ async fn start_http_server(config: &ServerConfig, app_state: Arc) -> R "/health/live", web::get().to(handlers::health::liveness_check), ) + .route( + "/health/details", + web::get().to(handlers::health::health_details), + ) // Metrics endpoints .route( "/metrics", From 12aa27f9f8a23f3c379fd2b3b2b11ae20dee4c60 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 16:23:14 -0600 Subject: [PATCH 13/71] Fix static library issue --- Dockerfile | 17 ----------------- 1 file changed, 17 deletions(-) diff --git a/Dockerfile b/Dockerfile index 19ea031..6131157 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,21 +27,13 @@ COPY Cargo.toml Cargo.lock build.rs ./ COPY proto ./proto RUN mkdir -p src && echo "fn main() {}" > src/main.rs && echo "pub fn dummy() {}" > src/lib.rs -<<<<<<< HEAD RUN cargo build --release --features redis-cache --target ${RUST_TARGET} -======= -RUN cargo build --release --features redis-cache ->>>>>>> staging RUN rm -f src/main.rs src/lib.rs COPY src ./src RUN cargo clean -<<<<<<< HEAD RUN cargo build --release --bin cosmic-sync-server --features redis-cache --target ${RUST_TARGET} -======= -RUN cargo build --release --bin cosmic-sync-server --features redis-cache ->>>>>>> staging # Runtime stage FROM gcr.io/distroless/static:nonroot @@ -56,14 +48,5 @@ USER nonroot:nonroot EXPOSE 50051 8080 -<<<<<<< HEAD # Distroless lacks curl; rely on container orchestrator health checks ENTRYPOINT ["/app/cosmic-sync-server"] -======= -# Health check using HTTP endpoint -HEALTHCHECK --interval=30s --timeout=5s --start-period=90s --retries=3 \ - CMD curl -fsS --connect-timeout 2 --max-time 5 http://localhost:8080/health || exit 1 - -# Run the server -CMD ["./cosmic-sync-server"] ->>>>>>> staging From d015f085ddac72dbeeaf4e4b4b23a674bc12d328 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 10 Sep 2025 16:44:29 -0600 Subject: [PATCH 14/71] Fix static library issue --- src/handlers/health.rs | 24 +++++++++++------------- 1 file changed, 11 insertions(+), 13 deletions(-) diff --git a/src/handlers/health.rs b/src/handlers/health.rs index 1ca1f14..c52599e 100644 --- a/src/handlers/health.rs +++ b/src/handlers/health.rs @@ -35,15 +35,15 @@ pub async fn health_check() -> ActixResult { } /// HTTP readiness check endpoint -pub async fn readiness_check(app_state: web::Data) -> ActixResult { +pub async fn readiness_check( + app_state: web::Data, +) -> ActixResult { // Perform basic dependency checks - let storage_ok = app_state - .storage - .health_check() - .await - .unwrap_or(false); + let storage_ok = app_state.storage.health_check().await.unwrap_or(false); - let message_broker_enabled = crate::server::app_state::AppState::get_config().message_broker.enabled; + let message_broker_enabled = crate::server::app_state::AppState::get_config() + .message_broker + .enabled; let status = if storage_ok { "ready" } else { "degraded" }; let body = json!({ @@ -73,15 +73,13 @@ pub async fn liveness_check() -> ActixResult { } /// Detailed health for external debugging -pub async fn health_details(app_state: web::Data) -> ActixResult { +pub async fn health_details( + app_state: web::Data, +) -> ActixResult { let cfg = crate::server::app_state::AppState::get_config(); // Check storage availability - let storage_ok = app_state - .storage - .health_check() - .await - .unwrap_or(false); + let storage_ok = app_state.storage.health_check().await.unwrap_or(false); // Summarize configuration flags (safe subset) let details = json!({ From 53c48090c3546d657662be16eded28ff4df2acb6 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Thu, 11 Sep 2025 14:13:17 -0600 Subject: [PATCH 15/71] Update secret location --- ENVIRONMENT_SETUP.md | 6 +++--- src/config/secrets.rs | 2 +- src/main.rs | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/ENVIRONMENT_SETUP.md b/ENVIRONMENT_SETUP.md index bf19d8a..b82f8bc 100644 --- a/ENVIRONMENT_SETUP.md +++ b/ENVIRONMENT_SETUP.md @@ -34,7 +34,7 @@ ENVIRONMENT=development cargo run AWS Secrets Manager를 사용하여 설정을 관리합니다. **필요한 AWS Secret:** -- Secret Name: `staging/so-dod/cosmic-sync/config` +- Secret Name: `staging/genesis76/cosmic-sync/config` - Secret JSON: `aws-secret-staging.json` 참조 **설정 예시:** @@ -91,7 +91,7 @@ Staging/Production 환경에서는 다음 IAM 권한이 필요합니다: "secretsmanager:GetSecretValue" ], "Resource": [ - "arn:aws:secretsmanager:us-east-2:*:secret:staging/so-dod/cosmic-sync/config*", + "arn:aws:secretsmanager:us-east-2:*:secret:staging/genesis76/cosmic-sync/config*", "arn:aws:secretsmanager:us-east-2:*:secret:production/pop-os/cosmic-sync/config*" ] }, @@ -119,7 +119,7 @@ Staging/Production 환경에서는 다음 IAM 권한이 필요합니다: **Staging 환경:** ```bash aws secretsmanager create-secret \ - --name "staging/so-dod/cosmic-sync/config" \ + --name "staging/genesis76/cosmic-sync/config" \ --description "Cosmic Sync Server staging configuration" \ --secret-string file://aws-secret-staging.json \ --region us-east-2 diff --git a/src/config/secrets.rs b/src/config/secrets.rs index c6c483d..a529216 100644 --- a/src/config/secrets.rs +++ b/src/config/secrets.rs @@ -86,7 +86,7 @@ impl ConfigLoader { // Generate secret name based on environment using infrastructure patterns let secret_name = match self.environment { - Environment::Staging => "staging/so-dod/cosmic-sync/config", + Environment::Staging => "staging/genesis76/cosmic-sync/config", Environment::Production => "production/pop-os/cosmic-sync/config", Environment::Development => { warn!("Development environment should not use AWS Secrets Manager"); diff --git a/src/main.rs b/src/main.rs index a2899b8..6780ef7 100644 --- a/src/main.rs +++ b/src/main.rs @@ -188,7 +188,7 @@ async fn build_config() -> Result { if environment.is_cloud() { let secret_path = if environment == Environment::Staging { - "staging/so-dod/cosmic-sync/config" + "staging/genesis76/cosmic-sync/config" } else { "production/pop-os/cosmic-sync/config" }; From 3c1633f5bc4143e17f03c3c35ea27a4bff55210f Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Thu, 11 Sep 2025 16:55:30 -0600 Subject: [PATCH 16/71] Check system76/cosmic-sync-server repo --- .github/workflows/deploy-staging.yml | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 87c0b66..b3e2e4e 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -14,13 +14,16 @@ env: jobs: Publish: runs-on: ubuntu-latest + permissions: + id-token: write + contents: read steps: - - name: Configure AWS Credentials + - name: Configure AWS Credentials (OIDC) uses: aws-actions/configure-aws-credentials@v4 with: - aws-access-key-id: ${{ secrets.STAGING_AWS_ACCESS_KEY_ID }} - aws-secret-access-key: ${{ secrets.STAGING_AWS_SECRET_ACCESS_KEY }} + role-to-assume: ${{ secrets.STAGING_AWS_OIDC_ROLE_ARN }} + role-session-name: cosmic-sync-staging aws-region: ${{ env.AWS_REGION }} - name: Login to Amazon ECR @@ -37,6 +40,7 @@ jobs: env: ECR_REPOSITORY: ${{ secrets.STAGING_AWS_ECR_REPOSITORY }} IMAGE_TAG: ${{ github.sha }} + if: ${{ env.AWS_REGION != '' }} run: | docker build \ --build-arg BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ From 51c9b41b81fb16bf1a0310cb511d8563aa1b0f9b Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Thu, 11 Sep 2025 17:44:48 -0600 Subject: [PATCH 17/71] Check system76/cosmic-sync-server repo --- .github/workflows/deploy-staging.yml | 10 +++------- 1 file changed, 3 insertions(+), 7 deletions(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index b3e2e4e..87c0b66 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -14,16 +14,13 @@ env: jobs: Publish: runs-on: ubuntu-latest - permissions: - id-token: write - contents: read steps: - - name: Configure AWS Credentials (OIDC) + - name: Configure AWS Credentials uses: aws-actions/configure-aws-credentials@v4 with: - role-to-assume: ${{ secrets.STAGING_AWS_OIDC_ROLE_ARN }} - role-session-name: cosmic-sync-staging + aws-access-key-id: ${{ secrets.STAGING_AWS_ACCESS_KEY_ID }} + aws-secret-access-key: ${{ secrets.STAGING_AWS_SECRET_ACCESS_KEY }} aws-region: ${{ env.AWS_REGION }} - name: Login to Amazon ECR @@ -40,7 +37,6 @@ jobs: env: ECR_REPOSITORY: ${{ secrets.STAGING_AWS_ECR_REPOSITORY }} IMAGE_TAG: ${{ github.sha }} - if: ${{ env.AWS_REGION != '' }} run: | docker build \ --build-arg BUILD_DATE=$(date -u +"%Y-%m-%dT%H:%M:%SZ") \ From f66baea4fc8e24cf595983b2b8887624c0b8cd9f Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 12 Sep 2025 15:36:52 -0600 Subject: [PATCH 18/71] Fix Migration issue --- src/storage/mod.rs | 2 +- src/storage/mysql.rs | 18 ++++++++++++++++-- 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/src/storage/mod.rs b/src/storage/mod.rs index ae47e1f..c292df0 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -550,7 +550,7 @@ impl StorageFactory { } // 트랜잭션 자동 커밋 설정 확인(sqlx) - match sqlx::query_scalar::<_, String>("SELECT @@autocommit") + match sqlx::query_scalar::<_, i64>("SELECT @@autocommit") .fetch_optional(storage.get_sqlx_pool()) .await { diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 56c337d..1b651eb 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -642,8 +642,7 @@ impl MySqlStorage { INDEX (account_hash), INDEX (watcher_id), INDEX (account_hash, watcher_id), - FOREIGN KEY (account_hash) REFERENCES accounts(account_hash) ON DELETE CASCADE, - FOREIGN KEY (watcher_id) REFERENCES watchers(id) ON DELETE CASCADE + FOREIGN KEY (account_hash) REFERENCES accounts(account_hash) ON DELETE CASCADE )"; sqlx::query(create_watcher_conditions_table) @@ -801,6 +800,21 @@ impl MySqlStorage { StorageError::Database(format!("watchers 테이블 생성 실패: {}", e)) })?; info!("watchers 테이블 생성 완료"); + + // Ensure FK from watcher_conditions(watcher_id) to watchers(id) after watchers table exists + if let Err(e) = sqlx::query( + r#"ALTER TABLE watcher_conditions + ADD CONSTRAINT fk_watcher_conditions_watcher + FOREIGN KEY (watcher_id) REFERENCES watchers(id) ON DELETE CASCADE"#, + ) + .execute(self.get_sqlx_pool()) + .await + { + warn!( + "watcher_conditions FK(watcher_id) 추가 실패(이미 존재 가능): {}", + e + ); + } } // watchers 복합 유니크 인덱스 보장 (account_hash, local_group_id, watcher_id) From 7a0ba3aad88270fdbd9df802378b6bada7cae2fd Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 12 Sep 2025 17:15:48 -0600 Subject: [PATCH 19/71] Fix Migration issue --- src/storage/mod.rs | 7 ++----- src/storage/mysql.rs | 3 ++- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c292df0..9e92303 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -522,11 +522,8 @@ impl StorageFactory { let password = config.password.clone(); let database = config.name.clone(); - // 연결 URL 생성 (secure_auth=false로 SSL 비활성화) - let connection_url = format!( - "mysql://{}:{}@{}:{}/{}?ssl-mode=DISABLED", - user, password, host, port, database - ); + // 연결 URL 생성 (DatabaseConfig::url 사용) + let connection_url = config.url(); // Create storage using sqlx pool let storage = MySqlStorage::new_with_url(&connection_url) diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 1b651eb..055eca3 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -36,7 +36,8 @@ impl MySqlStorage { /// Create new storage from URL (builds both mysql_async and sqlx pools) pub async fn new_with_url(url: &str) -> Result { let sqlx_pool = SqlxMySqlPoolOptions::new() - .max_connections(10) + .max_connections(5) + .acquire_timeout(std::time::Duration::from_secs(15)) .connect(url) .await .map_err(|e| StorageError::Connection(format!("Failed to connect via sqlx: {}", e)))?; From 7b8f4ea05fe0c10b6a0b17d4a0f10f52f6ad6f39 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 12 Sep 2025 18:01:27 -0600 Subject: [PATCH 20/71] Fix Migration issue --- src/storage/file_storage.rs | 5 ++++- src/storage/mysql.rs | 2 +- 2 files changed, 5 insertions(+), 2 deletions(-) diff --git a/src/storage/file_storage.rs b/src/storage/file_storage.rs index 00019a8..626eef4 100644 --- a/src/storage/file_storage.rs +++ b/src/storage/file_storage.rs @@ -30,7 +30,10 @@ pub struct DatabaseFileStorage { impl DatabaseFileStorage { pub async fn new() -> Result { // Create MySQL storage from configuration - let config = crate::config::settings::Config::load(); + let config = match crate::config::settings::Config::load_async().await { + Ok(cfg) => cfg, + Err(_) => crate::config::settings::Config::load(), + }; match Self::create_mysql_storage_from_config(&config).await { Ok(mysql_storage) => { diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 055eca3..c2cd6b8 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -37,7 +37,7 @@ impl MySqlStorage { pub async fn new_with_url(url: &str) -> Result { let sqlx_pool = SqlxMySqlPoolOptions::new() .max_connections(5) - .acquire_timeout(std::time::Duration::from_secs(15)) + .acquire_timeout(std::time::Duration::from_secs(30)) .connect(url) .await .map_err(|e| StorageError::Connection(format!("Failed to connect via sqlx: {}", e)))?; From 77a767cab5dabbc441722edbdd53e4f27d6be5ce Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 15:22:45 -0600 Subject: [PATCH 21/71] Update schema --- src/storage/mysql.rs | 19 +++++++++++++++++++ 1 file changed, 19 insertions(+) diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index c2cd6b8..85741cd 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -354,6 +354,25 @@ impl MySqlStorage { info!("✅ files 테이블 생성 확인"); + // Create file_data table for BLOB storage + let create_file_data_table = r" + CREATE TABLE IF NOT EXISTS file_data ( + file_id BIGINT UNSIGNED NOT NULL PRIMARY KEY, + data LONGBLOB NOT NULL, + created_at BIGINT NOT NULL, + updated_at BIGINT NOT NULL, + INDEX (file_id) + )"; + + sqlx::query(create_file_data_table) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| { + StorageError::Database(format!("Failed to create file_data table: {}", e)) + })?; + + info!("✅ file_data 테이블 생성 확인"); + // Create encryption_keys table let create_encryption_keys_table = r" CREATE TABLE IF NOT EXISTS encryption_keys ( From 26bd72b97a0864a76bc26721aa664fa74463c20c Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 19:01:48 -0600 Subject: [PATCH 22/71] Fix grpc port connection --- Cargo.lock | 64 ++++++++++++++++++++++++++++++++++++++----- Cargo.toml | 7 +++-- build.rs | 6 ++++ src/server/startup.rs | 9 +++++- 4 files changed, 75 insertions(+), 11 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0802d60..c1cd42e 100755 --- a/Cargo.lock +++ b/Cargo.lock @@ -1507,6 +1507,7 @@ dependencies = [ "tokio-util", "tonic", "tonic-build", + "tonic-health", "tonic-reflection", "tracing", "tracing-actix-web", @@ -3914,6 +3915,20 @@ dependencies = [ "sct", ] +[[package]] +name = "rustls" +version = "0.22.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" +dependencies = [ + "log", + "ring 0.17.14", + "rustls-pki-types", + "rustls-webpki 0.102.8", + "subtle", + "zeroize", +] + [[package]] name = "rustls" version = "0.23.28" @@ -4016,6 +4031,17 @@ dependencies = [ "untrusted 0.9.0", ] +[[package]] +name = "rustls-webpki" +version = "0.102.8" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" +dependencies = [ + "ring 0.17.14", + "rustls-pki-types", + "untrusted 0.9.0", +] + [[package]] name = "rustls-webpki" version = "0.103.3" @@ -4848,6 +4874,17 @@ dependencies = [ "tokio", ] +[[package]] +name = "tokio-rustls" +version = "0.25.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" +dependencies = [ + "rustls 0.22.4", + "rustls-pki-types", + "tokio", +] + [[package]] name = "tokio-rustls" version = "0.26.2" @@ -4897,9 +4934,9 @@ dependencies = [ [[package]] name = "tonic" -version = "0.10.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d560933a0de61cf715926b9cac824d4c883c2c43142f787595e48280c40a1d0e" +checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" dependencies = [ "async-stream", "async-trait", @@ -4915,10 +4952,10 @@ dependencies = [ "percent-encoding", "pin-project", "prost", - "rustls 0.21.12", - "rustls-pemfile 1.0.4", + "rustls-pemfile 2.2.0", + "rustls-pki-types", "tokio", - "tokio-rustls 0.24.1", + "tokio-rustls 0.25.0", "tokio-stream", "tower 0.4.13", "tower-layer", @@ -4939,11 +4976,24 @@ dependencies = [ "syn 2.0.100", ] +[[package]] +name = "tonic-health" +version = "0.11.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "2cef6e24bc96871001a7e48e820ab240b3de2201e59b517cf52835df2f1d2350" +dependencies = [ + "async-stream", + "prost", + "tokio", + "tokio-stream", + "tonic", +] + [[package]] name = "tonic-reflection" -version = "0.10.2" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3fa37c513df1339d197f4ba21d28c918b9ef1ac1768265f11ecb6b7f1cba1b76" +checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7" dependencies = [ "prost", "prost-types", diff --git a/Cargo.toml b/Cargo.toml index c4914cb..a9920fe 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -36,8 +36,9 @@ futures = "0.3.29" futures-util = "0.3.29" # gRPC and Protocol Buffers with performance features -tonic = { version = "0.10", features = ["tls", "gzip"] } -tonic-reflection = { version = "0.10", optional = true } +tonic = { version = "0.11", features = ["tls", "gzip"] } +tonic-reflection = { version = "0.11", optional = true } +tonic-health = "0.11" prost = "0.12" prost-types = "0.12" bytes = "1.5" @@ -134,7 +135,7 @@ tonic-build = { version = "0.10", features = ["prost"] } # Feature flags for conditional compilation [features] -default = ["metrics", "compression"] +default = ["metrics", "compression", "reflection"] # Storage backends s3-storage = [] diff --git a/build.rs b/build.rs index a9efa86..2853e15 100755 --- a/build.rs +++ b/build.rs @@ -1,7 +1,13 @@ fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=proto/sync.proto"); + + let out_dir = std::env::var("OUT_DIR")?; + let descriptor_path = std::path::PathBuf::from(&out_dir).join("sync_descriptor.bin"); + tonic_build::configure() + .file_descriptor_set_path(&descriptor_path) .extern_path(".google.protobuf.Timestamp", "::prost_types::Timestamp") .compile(&["proto/sync.proto"], &["proto"])?; + Ok(()) } diff --git a/src/server/startup.rs b/src/server/startup.rs index c15955a..4dad9d6 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -22,6 +22,7 @@ use crate::{ use actix_cors::Cors; use actix_web::{middleware, web, App, HttpServer}; +use tonic_health::{server::health_reporter, ServingStatus}; /// Optimized server startup with performance monitoring #[instrument(skip(config))] @@ -134,6 +135,11 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R let sync_service = SyncServiceImpl::new(app_state.clone()); let sync_client_service = SyncClientServiceImpl::new(app_state.clone()); + let (mut health_reporter, health_service) = health_reporter(); + health_reporter + .set_service_status("", ServingStatus::Serving) + .await; + // Build optimized gRPC server let server = Server::builder() // Timeout configurations @@ -151,7 +157,8 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R // .accept_compressed(tonic::codec::CompressionEncoding::Gzip) // Add services .add_service(SyncServiceServer::new(sync_service)) - .add_service(SyncClientServiceServer::new(sync_client_service)); + .add_service(SyncClientServiceServer::new(sync_client_service)) + .add_service(health_service); // Add reflection service in development #[cfg(feature = "reflection")] From d03e4b8b8b5c43a2eba339db37e0dc50365828b6 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 19:33:46 -0600 Subject: [PATCH 23/71] Fix grpc port connection --- src/server/startup.rs | 2 ++ 1 file changed, 2 insertions(+) diff --git a/src/server/startup.rs b/src/server/startup.rs index 4dad9d6..64fa35e 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -159,6 +159,7 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R .add_service(SyncServiceServer::new(sync_service)) .add_service(SyncClientServiceServer::new(sync_client_service)) .add_service(health_service); + info!("gRPC health service registered"); // Add reflection service in development #[cfg(feature = "reflection")] @@ -172,6 +173,7 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R SyncError::Internal(format!("Failed to create reflection service: {}", e)) })?; + info!("gRPC reflection service registered"); server.add_service(reflection_service) }; From fb16db2cb9ccf970fe5f5196772445726e710e39 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 19:59:40 -0600 Subject: [PATCH 24/71] Fix grpc port connection --- src/server/startup.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server/startup.rs b/src/server/startup.rs index 70be6e7..f7435b0 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -157,7 +157,8 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R // .accept_compressed(tonic::codec::CompressionEncoding::Gzip) // Add services .add_service(SyncServiceServer::new(sync_service)) - .add_service(SyncClientServiceServer::new(sync_client_service)); + .add_service(SyncClientServiceServer::new(sync_client_service)) + .add_service(health_service); // Add reflection service in development #[cfg(feature = "reflection")] From 70f649c3a19dc448431e6218587f5243d737a03f Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 20:23:28 -0600 Subject: [PATCH 25/71] Fix grpc port connection --- build.rs | 3 ++- proto/health.proto | 22 ++++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) create mode 100644 proto/health.proto diff --git a/build.rs b/build.rs index 2853e15..9956723 100755 --- a/build.rs +++ b/build.rs @@ -1,5 +1,6 @@ fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=proto/sync.proto"); + println!("cargo:rerun-if-changed=proto/health.proto"); let out_dir = std::env::var("OUT_DIR")?; let descriptor_path = std::path::PathBuf::from(&out_dir).join("sync_descriptor.bin"); @@ -7,7 +8,7 @@ fn main() -> Result<(), Box> { tonic_build::configure() .file_descriptor_set_path(&descriptor_path) .extern_path(".google.protobuf.Timestamp", "::prost_types::Timestamp") - .compile(&["proto/sync.proto"], &["proto"])?; + .compile(&["proto/sync.proto", "proto/health.proto"], &["proto"])?; Ok(()) } diff --git a/proto/health.proto b/proto/health.proto new file mode 100644 index 0000000..2c640a9 --- /dev/null +++ b/proto/health.proto @@ -0,0 +1,22 @@ +syntax = "proto3"; + +package grpc.health.v1; + +message HealthCheckRequest { + string service = 1; +} + +message HealthCheckResponse { + enum ServingStatus { + UNKNOWN = 0; + SERVING = 1; + NOT_SERVING = 2; + SERVICE_UNKNOWN = 3; // Used only by the Watch method. + } + ServingStatus status = 1; +} + +service Health { + rpc Check(HealthCheckRequest) returns (HealthCheckResponse); + rpc Watch(HealthCheckRequest) returns (stream HealthCheckResponse); +} \ No newline at end of file From beafa6158a7d56829a9234bd003427b3a016ac6a Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 21:11:08 -0600 Subject: [PATCH 26/71] Fix grpc port connection --- src/server/startup.rs | 1 - 1 file changed, 1 deletion(-) diff --git a/src/server/startup.rs b/src/server/startup.rs index f7435b0..448107d 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -152,7 +152,6 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R .initial_connection_window_size(Some(4 * 1024 * 1024)) // 4MB .initial_stream_window_size(Some(2 * 1024 * 1024)) // 2MB .tcp_nodelay(true) - .accept_http1(true) // Compression (methods not available in current tonic version) // .accept_compressed(tonic::codec::CompressionEncoding::Gzip) // Add services From e7c62fd33a84fa54e905f1ab28bd9b22e155229b Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sun, 14 Sep 2025 21:17:40 -0600 Subject: [PATCH 27/71] Fix grpc port connection --- src/server/startup.rs | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/server/startup.rs b/src/server/startup.rs index 448107d..354470a 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -149,8 +149,6 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R .tcp_keepalive(Some(Duration::from_secs(60))) // Performance optimizations .max_concurrent_streams(Some(config.max_concurrent_requests as u32)) - .initial_connection_window_size(Some(4 * 1024 * 1024)) // 4MB - .initial_stream_window_size(Some(2 * 1024 * 1024)) // 2MB .tcp_nodelay(true) // Compression (methods not available in current tonic version) // .accept_compressed(tonic::codec::CompressionEncoding::Gzip) From c1d2bc1c59bd7a0632b269a4e5e9e33bc5db3a34 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 19 Sep 2025 16:11:58 -0600 Subject: [PATCH 28/71] Update login process - fix OAUTH infos --- src/server/app_state.rs | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) diff --git a/src/server/app_state.rs b/src/server/app_state.rs index d39bb5f..d8de196 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -116,6 +116,27 @@ impl AppState { Arc::new(NoopEventBus::default()) } } + /// Parse OAuth-related keys from AWS Secrets into env (if not present) + async fn inject_oauth_env_from_secrets() { + if let Ok(loader) = crate::config::secrets::ConfigLoader::new().await { + for k in [ + "OAUTH_CLIENT_ID", + "OAUTH_CLIENT_SECRET", + "OAUTH_REDIRECT_URI", + "OAUTH_AUTH_URL", + "OAUTH_TOKEN_URL", + "OAUTH_USER_INFO_URL", + "AUTH_SERVER_URL", + ] { + if std::env::var(k).is_err() { + if let Some(v) = loader.get_config_value(k, None).await { + std::env::set_var(k, &v); + } + } + } + } + } + /// parse MySQL URL and initialize storage async fn initialize_storage(url: &str) -> Result, AppError> { if url.starts_with("mysql://") { @@ -191,6 +212,9 @@ impl AppState { // initialize notification manager let notification_manager = Arc::new(NotificationManager::new_with_storage(storage.clone())); + // inject OAuth configs from AWS Secrets to env if present (ENVIRONMENT must be set) + Self::inject_oauth_env_from_secrets().await; + // initialize OAuth service let oauth = OAuthService::new(storage.clone()); @@ -371,6 +395,9 @@ impl AppState { // initialize notification manager let notification_manager = Arc::new(NotificationManager::new_with_storage(storage.clone())); + // inject OAuth configs from AWS Secrets to env if present (ENVIRONMENT must be set) + Self::inject_oauth_env_from_secrets().await; + // initialize OAuth service let oauth = OAuthService::new(storage.clone()); From c879fa1936b69541ce227db7c5aa4e794caf5814 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Mon, 22 Sep 2025 10:17:51 -0600 Subject: [PATCH 29/71] Update login process - fix OAUTH infos --- src/storage/mysql_auth.rs | 42 ++++++++++++--------------------------- 1 file changed, 13 insertions(+), 29 deletions(-) diff --git a/src/storage/mysql_auth.rs b/src/storage/mysql_auth.rs index 1397e48..fad1408 100644 --- a/src/storage/mysql_auth.rs +++ b/src/storage/mysql_auth.rs @@ -27,24 +27,17 @@ pub trait MySqlAuthExt { impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 생성 async fn create_auth_token(&self, auth_token: &AuthToken) -> Result<()> { - // refresh_token이 옵션 타입이므로 적절하게 처리 - let refresh_token = auth_token.refresh_token.as_deref().unwrap_or(""); - let _scope = auth_token.scope.as_deref().unwrap_or(""); - - // 인증 토큰 정보 삽입 (sqlx) + // 스키마에 맞게 최소 필드 저장 (token 컬럼 사용) sqlx::query( r#"INSERT INTO auth_tokens ( - id, account_hash, access_token, refresh_token, - token_type, expires_at, created_at - ) VALUES (?, ?, ?, ?, ?, ?, ?)"#, + id, account_hash, token, created_at, expires_at + ) VALUES (?, ?, ?, ?, ?)"#, ) .bind(&auth_token.token_id) .bind(&auth_token.account_hash) .bind(&auth_token.access_token) - .bind(refresh_token) - .bind(&auth_token.token_type) - .bind(auth_token.expires_at.timestamp()) .bind(auth_token.created_at.timestamp()) + .bind(auth_token.expires_at.timestamp()) .execute(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to insert auth token: {}", e)))?; @@ -55,13 +48,12 @@ impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 조회 async fn get_auth_token(&self, token: &str) -> Result> { debug!("데이터베이스에서 인증 토큰 조회: {}", token); - let token_data: Option<(String, String, String, String, Option, i64, i64)> = + let token_data: Option<(String, String, String, i64, i64)> = sqlx::query_as( r#"SELECT - id, account_hash, access_token, token_type, - refresh_token, expires_at, created_at + id, account_hash, token, expires_at, created_at FROM auth_tokens - WHERE access_token = ?"#, + WHERE token = ?"#, ) .bind(token) .fetch_optional(self.get_sqlx_pool()) @@ -69,15 +61,7 @@ impl MySqlAuthExt for MySqlStorage { .map_err(|e| StorageError::Database(format!("토큰 조회 쿼리 실패: {}", e)))?; match token_data { - Some(( - token_id, - account_hash, - access_token, - token_type, - refresh_token, - expires_at, - created_at, - )) => { + Some((token_id, account_hash, access_token, expires_at, created_at)) => { // 타임스탬프를 DateTime으로 변환 let expires_at = match Utc.timestamp_opt(expires_at, 0) { chrono::LocalResult::Single(dt) => dt, @@ -101,13 +85,13 @@ impl MySqlAuthExt for MySqlStorage { } }; - // AuthToken 객체 생성 + // AuthToken 객체 생성 (스키마에 없는 필드는 기본값 적용) let auth_token = AuthToken { token_id, account_hash, access_token, - token_type, - refresh_token, + token_type: "Bearer".to_string(), + refresh_token: None, scope: None, expires_at, created_at, @@ -131,7 +115,7 @@ impl MySqlAuthExt for MySqlStorage { let result: Option = sqlx::query_scalar( r#"SELECT account_hash FROM auth_tokens - WHERE access_token = ? + WHERE token = ? AND account_hash = ? AND expires_at > ?"#, ) @@ -154,7 +138,7 @@ impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 삭제 async fn delete_auth_token(&self, token: &str) -> Result<()> { - sqlx::query(r#"DELETE FROM auth_tokens WHERE access_token = ?"#) + sqlx::query(r#"DELETE FROM auth_tokens WHERE token = ?"#) .bind(token) .execute(self.get_sqlx_pool()) .await From e14710e5a75dc599693675c056c69f0b9d3fd987 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Mon, 22 Sep 2025 10:19:43 -0600 Subject: [PATCH 30/71] Update login process - fix saving auth token --- src/storage/mysql_auth.rs | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/storage/mysql_auth.rs b/src/storage/mysql_auth.rs index fad1408..623b59e 100644 --- a/src/storage/mysql_auth.rs +++ b/src/storage/mysql_auth.rs @@ -48,17 +48,16 @@ impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 조회 async fn get_auth_token(&self, token: &str) -> Result> { debug!("데이터베이스에서 인증 토큰 조회: {}", token); - let token_data: Option<(String, String, String, i64, i64)> = - sqlx::query_as( - r#"SELECT + let token_data: Option<(String, String, String, i64, i64)> = sqlx::query_as( + r#"SELECT id, account_hash, token, expires_at, created_at FROM auth_tokens WHERE token = ?"#, - ) - .bind(token) - .fetch_optional(self.get_sqlx_pool()) - .await - .map_err(|e| StorageError::Database(format!("토큰 조회 쿼리 실패: {}", e)))?; + ) + .bind(token) + .fetch_optional(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("토큰 조회 쿼리 실패: {}", e)))?; match token_data { Some((token_id, account_hash, access_token, expires_at, created_at)) => { From 22fe3c8c1aff0e411ca7ac4de63abbc7966418af Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 23 Sep 2025 11:06:56 -0600 Subject: [PATCH 31/71] Merge 443/50051 --- .cargo/config.toml | 7 + docs/GRPC_CONFIGURATION.md | 179 +++++++++ migrations/add_usage_tables.sql | 166 +++++++++ migrations/default_setup.sql | 232 ++++++++++++ scripts/migrate_usage_tables.sh | 169 +++++++++ src/auth/oauth.rs | 39 +- src/handlers/api.rs | 4 +- src/handlers/auth_handler.rs | 3 + src/handlers/file/delete.rs | 44 ++- src/handlers/file/download.rs | 83 ++++- src/handlers/file/upload.rs | 71 +++- src/handlers/mod.rs | 3 + src/handlers/usage_handler.rs | 406 ++++++++++++++++++++ src/server/app_state.rs | 58 +++ src/server/startup.rs | 17 +- src/services/mod.rs | 2 + src/services/usage_service.rs | 642 ++++++++++++++++++++++++++++++++ src/storage/mod.rs | 1 + src/storage/mysql_usage.rs | 550 +++++++++++++++++++++++++++ 19 files changed, 2645 insertions(+), 31 deletions(-) create mode 100644 .cargo/config.toml create mode 100644 docs/GRPC_CONFIGURATION.md create mode 100644 migrations/add_usage_tables.sql create mode 100644 migrations/default_setup.sql create mode 100755 scripts/migrate_usage_tables.sh create mode 100644 src/handlers/usage_handler.rs create mode 100644 src/services/usage_service.rs create mode 100644 src/storage/mysql_usage.rs diff --git a/.cargo/config.toml b/.cargo/config.toml new file mode 100644 index 0000000..7333a6e --- /dev/null +++ b/.cargo/config.toml @@ -0,0 +1,7 @@ +[build] +# Disable check-cfg for compatibility +rustflags = [] + +[target.'cfg(all())'] +rustflags = [] + diff --git a/docs/GRPC_CONFIGURATION.md b/docs/GRPC_CONFIGURATION.md new file mode 100644 index 0000000..a0bb270 --- /dev/null +++ b/docs/GRPC_CONFIGURATION.md @@ -0,0 +1,179 @@ +# gRPC Server Configuration Documentation + +## Server Binding and Protocol + +### Current Configuration +- **Protocol**: HTTP/2 only (h2c - cleartext HTTP/2) +- **Port**: 50051 (default) +- **HTTP/1 Fallback**: **DISABLED** (http2_only = true) +- **TLS**: Not configured by default (use h2c) + +### Key Settings in `src/server/startup.rs` +```rust +Server::builder() + .http2_only(true) // Critical: Disable HTTP/1 fallback + .http2_keepalive_interval(Some(Duration::from_secs(30))) + .http2_keepalive_timeout(Some(Duration::from_secs(90))) + .tcp_keepalive(Some(Duration::from_secs(60))) + .max_concurrent_streams(Some(config.max_concurrent_requests as u32)) + .tcp_nodelay(true) +``` + +### Health Check Verification +```bash +# Check HTTP/2 frame errors in logs +journalctl -u cosmic-sync -n 100 | grep -i "frame\|http2\|protocol" + +# Test gRPC health endpoint +grpcurl -plaintext localhost:50051 grpc.health.v1.Health/Check +``` + +## OAuth Provider Configuration + +### Environment Variables +```bash +# OAuth Client Configuration (Public Client) +OAUTH_CLIENT_ID=cosmic_sync_client +OAUTH_CLIENT_SECRET="" # Empty for public clients + +# Redirect URI (must exactly match registered value) +OAUTH_REDIRECT_URI=https://sync.genesis76.com/oauth/callback + +# OAuth Provider Endpoints (production) +OAUTH_AUTH_URL=https://account.genesis76.com/oauth/authorize +OAUTH_TOKEN_URL=https://account.genesis76.com/oauth/token +OAUTH_USER_INFO_URL=https://account.genesis76.com/userinfo + +# Scope +OAUTH_SCOPE=profile:read +``` + +### Client Registration Requirements +1. **Client ID**: `cosmic_sync_client` +2. **Redirect URI**: Must exactly match `https://sync.genesis76.com/oauth/callback` +3. **Grant Type**: Authorization Code +4. **Client Type**: Public (no client_secret required) +5. **PKCE Support**: Optional (server should allow non-PKCE for now) + +### CheckAuthStatus Response +When authentication is complete, returns: +```json +{ + "is_complete": true, + "success": true, + "auth_token": "", + "account_hash": "", + "encryption_key": "", + "expires_in": 3600, + "session_id": "" +} +``` + +## Device Registration RPC + +### Available Methods +- `RegisterDevice`: Register new device or update existing +- `UpdateDeviceInfo`: Update device information +- `ListDevices`: List all devices for account +- `DeleteDevice`: Deactivate device + +### Database Schema +```sql +CREATE TABLE devices ( + id VARCHAR(36) NOT NULL, + account_hash VARCHAR(255) NOT NULL, + device_hash VARCHAR(255) NOT NULL PRIMARY KEY, + device_name VARCHAR(255) NOT NULL, + device_type VARCHAR(50), + os_type VARCHAR(50), + os_version VARCHAR(50), + app_version VARCHAR(50), + last_sync BIGINT, + created_at BIGINT NOT NULL, + updated_at BIGINT NOT NULL, + is_active BOOLEAN NOT NULL DEFAULT TRUE, + INDEX (account_hash), + FOREIGN KEY (account_hash) REFERENCES accounts(account_hash) ON DELETE CASCADE +) +``` + +### Verification Commands +```bash +# Check device registration in DB +mysql -h 127.0.0.1 -P 3306 -u root -precognizer --ssl-mode=DISABLED cosmic_sync \ + -e "SELECT * FROM devices WHERE account_hash='' ORDER BY updated_at DESC LIMIT 5;" + +# Test RegisterDevice RPC +grpcurl -plaintext -d '{ + "auth_token": "", + "account_hash": "", + "device_hash": "", + "os_version": "Ubuntu 22.04", + "app_version": "1.0.0" +}' localhost:50051 sync.SyncService/RegisterDevice +``` + +## Load Balancer/Proxy Requirements + +### Critical Settings +1. **HTTP/2 End-to-End**: Maintain HTTP/2 from client to server +2. **No PROXY Protocol**: Disable send-proxy/proxy_protocol +3. **No HTTP/1 Downgrade**: Never convert h2→h1 +4. **ALPN**: If using TLS, ensure "h2" in ALPN negotiation + +### HAProxy Example (TCP Passthrough) +```haproxy +frontend fe_grpc + bind :50051 + mode tcp + default_backend be_grpc + +backend be_grpc + mode tcp + server app1 10.0.0.10:50051 check +``` + +### Nginx Example (gRPC Proxy) +```nginx +upstream grpc_backend { + server localhost:50051; +} + +server { + listen 443 ssl http2; + + location / { + grpc_pass grpc://grpc_backend; + grpc_set_header TE trailers; + } +} +``` + +### AWS ALB Configuration +- Target Group Protocol: HTTP/2 (gRPC) +- Health Check: gRPC +- No PROXY protocol headers + +## Troubleshooting + +### Common Issues +1. **"Unsupported protocol" errors**: Server is correctly rejecting HTTP/1 clients +2. **Frame errors**: Check for PROXY protocol headers being sent +3. **Auth not completing**: Verify OAuth provider URLs and client registration +4. **Device not registering**: Check auth token validity and account_hash + +### Debug Commands +```bash +# Check server logs +journalctl -u cosmic-sync -f + +# Test HTTP/2 connection +curl -v --http2-prior-knowledge http://localhost:50051/ + +# List gRPC services +grpcurl -plaintext localhost:50051 list + +# Check OAuth configuration +curl https://account.genesis76.com/.well-known/openid-configuration +``` + diff --git a/migrations/add_usage_tables.sql b/migrations/add_usage_tables.sql new file mode 100644 index 0000000..1ccb61c --- /dev/null +++ b/migrations/add_usage_tables.sql @@ -0,0 +1,166 @@ +-- Usage tracking tables for storage and bandwidth management +-- Author: Cosmic Sync Server +-- Date: 2024 + +-- Check if accounts table exists before creating foreign key constraints +SET FOREIGN_KEY_CHECKS = 0; + +-- 1. Account storage usage tracking (real-time) +CREATE TABLE IF NOT EXISTS usage_storage ( + account_hash VARCHAR(255) PRIMARY KEY, + bytes_used BIGINT UNSIGNED NOT NULL DEFAULT 0, + bytes_limit BIGINT UNSIGNED NOT NULL DEFAULT 10737418240, -- 10GB default + bytes_soft_limit BIGINT UNSIGNED NOT NULL DEFAULT 8589934592, -- 8GB (80%) + files_count INT UNSIGNED NOT NULL DEFAULT 0, + last_warning_at DATETIME NULL, + hard_blocked BOOLEAN NOT NULL DEFAULT FALSE, + grace_period_until DATETIME NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX idx_updated (updated_at), + INDEX idx_blocked (hard_blocked), + INDEX idx_account_hash (account_hash) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- 2. Daily bandwidth usage tracking +CREATE TABLE IF NOT EXISTS usage_bandwidth_daily ( + account_hash VARCHAR(255) NOT NULL, + usage_date DATE NOT NULL, + upload_bytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + download_bytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + upload_count INT UNSIGNED NOT NULL DEFAULT 0, + download_count INT UNSIGNED NOT NULL DEFAULT 0, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (account_hash, usage_date), + INDEX idx_date (usage_date), + INDEX idx_account_date (account_hash, usage_date DESC) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- 3. Monthly bandwidth aggregation +CREATE TABLE IF NOT EXISTS usage_bandwidth_monthly ( + account_hash VARCHAR(255) NOT NULL, + usage_month VARCHAR(7) NOT NULL, -- 'YYYY-MM' format + upload_bytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + download_bytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + upload_count INT UNSIGNED NOT NULL DEFAULT 0, + download_count INT UNSIGNED NOT NULL DEFAULT 0, + bandwidth_limit BIGINT UNSIGNED NOT NULL DEFAULT 107374182400, -- 100GB/month default + bandwidth_soft_limit BIGINT UNSIGNED NOT NULL DEFAULT 85899345920, -- 80GB (80%) + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (account_hash, usage_month), + INDEX idx_month (usage_month), + INDEX idx_account_month (account_hash, usage_month DESC) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- 4. Transfer event tracking for idempotency and accurate accounting +CREATE TABLE IF NOT EXISTS transfer_events ( + event_id VARCHAR(64) PRIMARY KEY, + account_hash VARCHAR(255) NOT NULL, + file_id BIGINT UNSIGNED NOT NULL, + revision BIGINT NOT NULL, + transfer_type ENUM('upload', 'download') NOT NULL, + device_hash VARCHAR(255) NOT NULL, + bytes_transferred BIGINT UNSIGNED NOT NULL, + status ENUM('pending', 'success', 'failed') NOT NULL DEFAULT 'pending', + initiated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + completed_at DATETIME NULL, + failure_reason VARCHAR(500) NULL, + INDEX idx_account_date (account_hash, initiated_at DESC), + INDEX idx_file_revision (file_id, revision), + INDEX idx_status (status, initiated_at), + INDEX idx_device (device_hash, initiated_at DESC) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- 5. Account plan overrides (optional, for custom limits) +CREATE TABLE IF NOT EXISTS account_plan_overrides ( + account_hash VARCHAR(255) PRIMARY KEY, + storage_bytes_limit BIGINT UNSIGNED NULL, + storage_bytes_soft_limit BIGINT UNSIGNED NULL, + bandwidth_monthly_limit BIGINT UNSIGNED NULL, + bandwidth_monthly_soft_limit BIGINT UNSIGNED NULL, + custom_notes TEXT NULL, + effective_from DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + effective_until DATETIME NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX idx_effective (effective_from, effective_until) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- Initial data population from existing files +-- This should be run after tables are created +INSERT IGNORE INTO usage_storage (account_hash, bytes_used, files_count) +SELECT + account_hash, + COALESCE(SUM(size), 0) as bytes_used, + COUNT(*) as files_count +FROM files +WHERE is_deleted = FALSE +GROUP BY account_hash; + +-- Create stored procedure for atomic storage update +-- Drop if exists and recreate +DROP PROCEDURE IF EXISTS update_storage_usage; + +DELIMITER $$ + +CREATE PROCEDURE update_storage_usage( + IN p_account_hash VARCHAR(255), + IN p_bytes_delta BIGINT, + IN p_files_delta INT, + OUT p_success BOOLEAN, + OUT p_current_usage BIGINT, + OUT p_limit BIGINT +) +BEGIN + DECLARE v_current_usage BIGINT; + DECLARE v_limit BIGINT; + DECLARE v_new_usage BIGINT; + + -- Get current usage and limit + SELECT bytes_used, bytes_limit + INTO v_current_usage, v_limit + FROM usage_storage + WHERE account_hash = p_account_hash + FOR UPDATE; + + -- Calculate new usage + SET v_new_usage = v_current_usage + p_bytes_delta; + + -- Check if operation is allowed + IF p_bytes_delta < 0 OR v_new_usage <= v_limit THEN + -- Update usage + UPDATE usage_storage + SET bytes_used = GREATEST(0, v_new_usage), + files_count = GREATEST(0, files_count + p_files_delta), + updated_at = NOW() + WHERE account_hash = p_account_hash; + + SET p_success = TRUE; + SET p_current_usage = GREATEST(0, v_new_usage); + ELSE + SET p_success = FALSE; + SET p_current_usage = v_current_usage; + END IF; + + SET p_limit = v_limit; +END$$ + +DELIMITER ; + +-- Re-enable foreign key checks +SET FOREIGN_KEY_CHECKS = 1; + +-- Create view for current month bandwidth +CREATE OR REPLACE VIEW v_current_month_bandwidth AS +SELECT + account_hash, + DATE_FORMAT(CURDATE(), '%Y-%m') as usage_month, + SUM(upload_bytes) as upload_bytes, + SUM(download_bytes) as download_bytes, + SUM(upload_count) as upload_count, + SUM(download_count) as download_count +FROM usage_bandwidth_daily +WHERE usage_date >= DATE_FORMAT(CURDATE(), '%Y-%m-01') +GROUP BY account_hash; diff --git a/migrations/default_setup.sql b/migrations/default_setup.sql new file mode 100644 index 0000000..288709f --- /dev/null +++ b/migrations/default_setup.sql @@ -0,0 +1,232 @@ +-- =========================================== +-- Per-account default limits: +-- Storage: 5MB (soft 4MB) +-- Bandwidth(month): 10MB (soft 8MB) +-- Includes: +-- - Safe CREATE TABLE IF NOT EXISTS +-- - Accounts table column add (only if missing) +-- - AFTER INSERT trigger for new accounts +-- - Backfill for existing accounts +-- Compatible with MySQL 5.7/8.0 +-- =========================================== +SET NAMES utf8mb4 COLLATE utf8mb4_unicode_ci; +SET collation_connection = utf8mb4_unicode_ci; + +-- Drop trigger if exists (for idempotency) +DROP TRIGGER IF EXISTS trg_accounts_after_insert_usage_defaults; + +-- ---------- 1) Runtime tables (idempotent) ---------- +CREATE TABLE IF NOT EXISTS usage_storage ( + account_hash VARCHAR(255) PRIMARY KEY, + bytes_used BIGINT UNSIGNED NOT NULL DEFAULT 0, + bytes_limit BIGINT UNSIGNED NOT NULL DEFAULT 0, + bytes_soft_limit BIGINT UNSIGNED NOT NULL DEFAULT 0, + files_count INT UNSIGNED NOT NULL DEFAULT 0, + last_warning_at DATETIME NULL, + hard_blocked BOOLEAN NOT NULL DEFAULT FALSE, + grace_period_until DATETIME NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX idx_updated (updated_at), + INDEX idx_blocked (hard_blocked), + INDEX idx_account_hash (account_hash) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +CREATE TABLE IF NOT EXISTS usage_bandwidth_monthly ( + account_hash VARCHAR(255) NOT NULL, + usage_month VARCHAR(7) NOT NULL, -- 'YYYY-MM' + upload_bytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + download_bytes BIGINT UNSIGNED NOT NULL DEFAULT 0, + upload_count INT UNSIGNED NOT NULL DEFAULT 0, + download_count INT UNSIGNED NOT NULL DEFAULT 0, + bandwidth_limit BIGINT UNSIGNED NOT NULL DEFAULT 0, + bandwidth_soft_limit BIGINT UNSIGNED NOT NULL DEFAULT 0, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + PRIMARY KEY (account_hash, usage_month), + INDEX idx_month (usage_month), + INDEX idx_account_month (account_hash, usage_month DESC) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +CREATE TABLE IF NOT EXISTS account_plan_overrides ( + account_hash VARCHAR(255) PRIMARY KEY, + storage_bytes_limit BIGINT UNSIGNED NULL, + storage_bytes_soft_limit BIGINT UNSIGNED NULL, + bandwidth_monthly_limit BIGINT UNSIGNED NULL, + bandwidth_monthly_soft_limit BIGINT UNSIGNED NULL, + custom_notes TEXT NULL, + effective_from DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + effective_until DATETIME NULL, + created_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP, + updated_at DATETIME NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP, + INDEX idx_effective (effective_from, effective_until) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci; + +-- ---------- 2) Add columns to accounts (only if missing) ---------- +-- plan_tier +SET @col_exists := ( + SELECT COUNT(*) FROM information_schema.columns + WHERE table_schema = DATABASE() AND table_name = 'accounts' AND column_name = 'plan_tier' +); +SET @sql := IF(@col_exists = 0, + 'ALTER TABLE accounts ADD COLUMN plan_tier VARCHAR(32) NULL AFTER name', + 'SELECT 1'); +PREPARE stmt FROM @sql; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +-- subscription_status +SET @col_exists := ( + SELECT COUNT(*) FROM information_schema.columns + WHERE table_schema = DATABASE() AND table_name = 'accounts' AND column_name = 'subscription_status' +); +SET @sql := IF(@col_exists = 0, + 'ALTER TABLE accounts ADD COLUMN subscription_status ENUM(''inactive'',''active'',''past_due'',''cancelled'') NOT NULL DEFAULT ''inactive'' AFTER plan_tier', + 'SELECT 1'); +PREPARE stmt FROM @sql; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +-- storage_bytes_limit +SET @col_exists := ( + SELECT COUNT(*) FROM information_schema.columns + WHERE table_schema = DATABASE() AND table_name = 'accounts' AND column_name = 'storage_bytes_limit' +); +SET @sql := IF(@col_exists = 0, + 'ALTER TABLE accounts ADD COLUMN storage_bytes_limit BIGINT UNSIGNED NULL AFTER subscription_status', + 'SELECT 1'); +PREPARE stmt FROM @sql; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +-- storage_bytes_soft_limit +SET @col_exists := ( + SELECT COUNT(*) FROM information_schema.columns + WHERE table_schema = DATABASE() AND table_name = 'accounts' AND column_name = 'storage_bytes_soft_limit' +); +SET @sql := IF(@col_exists = 0, + 'ALTER TABLE accounts ADD COLUMN storage_bytes_soft_limit BIGINT UNSIGNED NULL AFTER storage_bytes_limit', + 'SELECT 1'); +PREPARE stmt FROM @sql; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +-- bandwidth_monthly_limit +SET @col_exists := ( + SELECT COUNT(*) FROM information_schema.columns + WHERE table_schema = DATABASE() AND table_name = 'accounts' AND column_name = 'bandwidth_monthly_limit' +); +SET @sql := IF(@col_exists = 0, + 'ALTER TABLE accounts ADD COLUMN bandwidth_monthly_limit BIGINT UNSIGNED NULL AFTER storage_bytes_soft_limit', + 'SELECT 1'); +PREPARE stmt FROM @sql; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +-- bandwidth_monthly_soft_limit +SET @col_exists := ( + SELECT COUNT(*) FROM information_schema.columns + WHERE table_schema = DATABASE() AND table_name = 'accounts' AND column_name = 'bandwidth_monthly_soft_limit' +); +SET @sql := IF(@col_exists = 0, + 'ALTER TABLE accounts ADD COLUMN bandwidth_monthly_soft_limit BIGINT UNSIGNED NULL AFTER bandwidth_monthly_limit', + 'SELECT 1'); +PREPARE stmt FROM @sql; EXECUTE stmt; DEALLOCATE PREPARE stmt; + +-- ---------- 3) Trigger for new accounts (default limits) ---------- +DELIMITER $$ + +CREATE TRIGGER trg_accounts_after_insert_usage_defaults +AFTER INSERT ON accounts +FOR EACH ROW +BEGIN + DECLARE v_def_storage_hard BIGINT UNSIGNED DEFAULT 5242880; -- 5 MB + DECLARE v_def_storage_soft BIGINT UNSIGNED DEFAULT 4194304; -- 4 MB (80%) + DECLARE v_def_bw_hard BIGINT UNSIGNED DEFAULT 10485760; -- 10 MB + DECLARE v_def_bw_soft BIGINT UNSIGNED DEFAULT 8388608; -- 8 MB (80%) + DECLARE v_cur_month VARCHAR(7) DEFAULT (SELECT DATE_FORMAT(CURDATE(), '%Y-%m') COLLATE utf8mb4_unicode_ci); + + -- usage_storage + INSERT INTO usage_storage (account_hash, bytes_used, files_count, bytes_limit, bytes_soft_limit, hard_blocked) + VALUES (NEW.account_hash, 0, 0, v_def_storage_hard, v_def_storage_soft, 0) + ON DUPLICATE KEY UPDATE + bytes_limit = VALUES(bytes_limit), + bytes_soft_limit = VALUES(bytes_soft_limit); + + -- account_plan_overrides + INSERT INTO account_plan_overrides( + account_hash, + storage_bytes_limit, storage_bytes_soft_limit, + bandwidth_monthly_limit, bandwidth_monthly_soft_limit, + effective_from, effective_until + ) VALUES ( + NEW.account_hash, + v_def_storage_hard, v_def_storage_soft, + v_def_bw_hard, v_def_bw_soft, + NOW(), NULL + ) + ON DUPLICATE KEY UPDATE + storage_bytes_limit = VALUES(storage_bytes_limit), + storage_bytes_soft_limit = VALUES(storage_bytes_soft_limit), + bandwidth_monthly_limit = VALUES(bandwidth_monthly_limit), + bandwidth_monthly_soft_limit = VALUES(bandwidth_monthly_soft_limit), + effective_from = VALUES(effective_from), + effective_until = VALUES(effective_until); + + -- usage_bandwidth_monthly (current month) + INSERT INTO usage_bandwidth_monthly (account_hash, usage_month, bandwidth_limit, bandwidth_soft_limit) + VALUES (NEW.account_hash, v_cur_month, v_def_bw_hard, v_def_bw_soft) + ON DUPLICATE KEY UPDATE + bandwidth_limit = VALUES(bandwidth_limit), + bandwidth_soft_limit = VALUES(bandwidth_soft_limit); +END$$ + +DELIMITER ; + +-- ---------- 4) Backfill for existing accounts ---------- +SET @DEF_STORAGE_HARD := 5242880; -- 5 MB +SET @DEF_STORAGE_SOFT := 4194304; -- 4 MB +SET @DEF_BW_HARD := 10485760; -- 10 MB +SET @DEF_BW_SOFT := 8388608; -- 8 MB +SET @CUR_MONTH := (SELECT DATE_FORMAT(CURDATE(), '%Y-%m') COLLATE utf8mb4_unicode_ci); + +-- usage_storage: create missing rows with defaults +INSERT INTO usage_storage (account_hash, bytes_used, files_count, bytes_limit, bytes_soft_limit, hard_blocked) +SELECT a.account_hash, 0, 0, @DEF_STORAGE_HARD, @DEF_STORAGE_SOFT, 0 +FROM accounts a +LEFT JOIN usage_storage s ON s.account_hash COLLATE utf8mb4_unicode_ci = a.account_hash COLLATE utf8mb4_unicode_ci +WHERE s.account_hash IS NULL; + +-- usage_storage: fill NULL/0 limits with defaults +UPDATE usage_storage s +SET + s.bytes_limit = CASE WHEN (s.bytes_limit IS NULL OR s.bytes_limit = 0) THEN @DEF_STORAGE_HARD ELSE s.bytes_limit END, + s.bytes_soft_limit = CASE WHEN (s.bytes_soft_limit IS NULL OR s.bytes_soft_limit = 0) THEN @DEF_STORAGE_SOFT ELSE s.bytes_soft_limit END; + +-- account_plan_overrides: create missing rows with defaults +INSERT INTO account_plan_overrides ( + account_hash, + storage_bytes_limit, storage_bytes_soft_limit, + bandwidth_monthly_limit, bandwidth_monthly_soft_limit, + effective_from, effective_until +) +SELECT + a.account_hash, + @DEF_STORAGE_HARD, @DEF_STORAGE_SOFT, + @DEF_BW_HARD, @DEF_BW_SOFT, + NOW(), NULL +FROM accounts a +LEFT JOIN account_plan_overrides o ON o.account_hash COLLATE utf8mb4_unicode_ci = a.account_hash COLLATE utf8mb4_unicode_ci +WHERE o.account_hash IS NULL; + +-- usage_bandwidth_monthly (current month): create missing rows with defaults +INSERT INTO usage_bandwidth_monthly (account_hash, usage_month, bandwidth_limit, bandwidth_soft_limit) +SELECT a.account_hash, @CUR_MONTH, @DEF_BW_HARD, @DEF_BW_SOFT +FROM accounts a +LEFT JOIN usage_bandwidth_monthly m + ON m.account_hash COLLATE utf8mb4_unicode_ci = a.account_hash COLLATE utf8mb4_unicode_ci AND m.usage_month COLLATE utf8mb4_unicode_ci = @CUR_MONTH +WHERE m.account_hash IS NULL; + +-- usage_bandwidth_monthly: fill NULL/0 limits with defaults +UPDATE usage_bandwidth_monthly m +SET + m.bandwidth_limit = CASE WHEN (m.bandwidth_limit IS NULL OR m.bandwidth_limit = 0) THEN @DEF_BW_HARD ELSE m.bandwidth_limit END, + m.bandwidth_soft_limit = CASE WHEN (m.bandwidth_soft_limit IS NULL OR m.bandwidth_soft_limit = 0) THEN @DEF_BW_SOFT ELSE m.bandwidth_soft_limit END +WHERE m.usage_month COLLATE utf8mb4_unicode_ci = @CUR_MONTH; + +-- ---------- 5) (Optional) Verify ---------- +-- SELECT a.account_hash, s.bytes_soft_limit, s.bytes_limit +-- FROM accounts a JOIN usage_storage s USING(account_hash) LIMIT 5; +-- SELECT account_hash, usage_month, bandwidth_soft_limit, bandwidth_limit +-- FROM usage_bandwidth_monthly WHERE usage_month=@CUR_MONTH LIMIT 5; +-- SELECT * FROM account_plan_overrides LIMIT 5; \ No newline at end of file diff --git a/scripts/migrate_usage_tables.sh b/scripts/migrate_usage_tables.sh new file mode 100755 index 0000000..7858b13 --- /dev/null +++ b/scripts/migrate_usage_tables.sh @@ -0,0 +1,169 @@ +#!/bin/bash +# Usage tracking tables migration script +# Supports both local and server environments + +set -e + +# Color codes for output +RED='\033[0;31m' +GREEN='\033[0;32m' +YELLOW='\033[1;33m' +NC='\033[0m' # No Color + +echo -e "${GREEN}========================================${NC}" +echo -e "${GREEN} Usage Tracking Tables Migration${NC}" +echo -e "${GREEN}========================================${NC}" + +# Function to get DB credentials from environment or AWS Secrets Manager +get_db_config() { + # Check if we're in AWS environment + if [ -n "$ENVIRONMENT" ]; then + echo -e "${YELLOW}Detected AWS environment: $ENVIRONMENT${NC}" + + # Try to get config from AWS Secrets Manager + if command -v aws &> /dev/null; then + SECRET_NAME="${AWS_SECRET_NAME:-$ENVIRONMENT/cosmic-sync/config}" + echo -e "${YELLOW}Fetching DB config from AWS Secrets Manager: $SECRET_NAME${NC}" + + SECRET=$(aws secretsmanager get-secret-value --secret-id "$SECRET_NAME" --query SecretString --output text 2>/dev/null || echo "{}") + + if [ "$SECRET" != "{}" ]; then + DB_HOST=$(echo "$SECRET" | jq -r '.DB_HOST // empty') + DB_PORT=$(echo "$SECRET" | jq -r '.DB_PORT // empty') + DB_USER=$(echo "$SECRET" | jq -r '.DB_USER // empty') + DB_PASSWORD=$(echo "$SECRET" | jq -r '.DB_PASSWORD // empty') + DB_NAME=$(echo "$SECRET" | jq -r '.DB_NAME // empty') + fi + fi + fi + + # Use environment variables or defaults + DB_HOST="${DB_HOST:-${DATABASE_HOST:-127.0.0.1}}" + DB_PORT="${DB_PORT:-${DATABASE_PORT:-3306}}" + DB_USER="${DB_USER:-${DATABASE_USER:-root}}" + DB_PASSWORD="${DB_PASSWORD:-${DATABASE_PASSWORD:-recognizer}}" + DB_NAME="${DB_NAME:-${DATABASE_NAME:-cosmic_sync}}" + + # For local development, also check DATABASE_URL + if [ -z "$DB_PASSWORD" ] && [ -n "$DATABASE_URL" ]; then + # Parse DATABASE_URL (mysql://user:pass@host:port/dbname) + if [[ "$DATABASE_URL" =~ mysql://([^:]+):([^@]+)@([^:]+):([0-9]+)/(.+) ]]; then + DB_USER="${BASH_REMATCH[1]}" + DB_PASSWORD="${BASH_REMATCH[2]}" + DB_HOST="${BASH_REMATCH[3]}" + DB_PORT="${BASH_REMATCH[4]}" + DB_NAME="${BASH_REMATCH[5]}" + fi + fi +} + +# Get configuration +get_db_config + +# Display configuration (hide password) +echo -e "${YELLOW}Database Configuration:${NC}" +echo " Host: $DB_HOST" +echo " Port: $DB_PORT" +echo " User: $DB_USER" +echo " Database: $DB_NAME" +echo " Password: ***" + +# Find migration file +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +MIGRATION_FILE="$SCRIPT_DIR/../migrations/add_usage_tables.sql" + +if [ ! -f "$MIGRATION_FILE" ]; then + echo -e "${RED}Error: Migration file not found at $MIGRATION_FILE${NC}" + exit 1 +fi + +echo -e "${YELLOW}Using migration file: $MIGRATION_FILE${NC}" + +# Function to execute SQL +execute_sql() { + local sql_file=$1 + + if [ -n "$DB_PASSWORD" ]; then + mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" --ssl-mode=DISABLED "$DB_NAME" < "$sql_file" 2>&1 + else + mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" --ssl-mode=DISABLED "$DB_NAME" < "$sql_file" 2>&1 + fi +} + +# Check if tables already exist +check_tables_exist() { + local query="SELECT COUNT(*) FROM information_schema.tables WHERE table_schema = '$DB_NAME' AND table_name IN ('usage_storage', 'usage_bandwidth_daily', 'usage_bandwidth_monthly', 'transfer_events', 'account_plan_overrides');" + + if [ -n "$DB_PASSWORD" ]; then + count=$(mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" --ssl-mode=DISABLED "$DB_NAME" -sN -e "$query" 2>/dev/null || echo "0") + else + count=$(mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" --ssl-mode=DISABLED "$DB_NAME" -sN -e "$query" 2>/dev/null || echo "0") + fi + + echo "$count" +} + +# Check existing tables +existing_count=$(check_tables_exist) +if [ "$existing_count" -gt 0 ]; then + echo -e "${YELLOW}Found $existing_count existing usage tables${NC}" + read -p "Do you want to continue with migration? This will not drop existing tables. (y/n): " -n 1 -r + echo + if [[ ! $REPLY =~ ^[Yy]$ ]]; then + echo -e "${YELLOW}Migration cancelled${NC}" + exit 0 + fi +fi + +# Run migration +echo -e "${YELLOW}Running migration...${NC}" + +if execute_sql "$MIGRATION_FILE"; then + echo -e "${GREEN}✅ Migration completed successfully!${NC}" + + # Verify tables were created + new_count=$(check_tables_exist) + echo -e "${GREEN}Created/verified $new_count usage tracking tables${NC}" + + # Show table summary + echo -e "${YELLOW}Table Summary:${NC}" + if [ -n "$DB_PASSWORD" ]; then + mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" -p"$DB_PASSWORD" --ssl-mode=DISABLED "$DB_NAME" -e " + SELECT + table_name AS 'Table', + ROUND(((data_length + index_length) / 1024 / 1024), 2) AS 'Size (MB)', + table_rows AS 'Rows' + FROM information_schema.tables + WHERE table_schema = '$DB_NAME' + AND table_name IN ('usage_storage', 'usage_bandwidth_daily', 'usage_bandwidth_monthly', 'transfer_events', 'account_plan_overrides') + ORDER BY table_name;" 2>/dev/null || true + else + mysql -h "$DB_HOST" -P "$DB_PORT" -u "$DB_USER" --ssl-mode=DISABLED "$DB_NAME" -e " + SELECT + table_name AS 'Table', + ROUND(((data_length + index_length) / 1024 / 1024), 2) AS 'Size (MB)', + table_rows AS 'Rows' + FROM information_schema.tables + WHERE table_schema = '$DB_NAME' + AND table_name IN ('usage_storage', 'usage_bandwidth_daily', 'usage_bandwidth_monthly', 'transfer_events', 'account_plan_overrides') + ORDER BY table_name;" 2>/dev/null || true + fi + + echo -e "${GREEN}========================================${NC}" + echo -e "${GREEN} Migration Complete!${NC}" + echo -e "${GREEN}========================================${NC}" + echo + echo "Next steps:" + echo "1. Set environment variables:" + echo " export USAGE_TRACKING_ENABLED=true" + echo " export STORAGE_LIMIT_BYTES=10737418240 # 10GB" + echo " export BANDWIDTH_LIMIT_MONTHLY_BYTES=107374182400 # 100GB" + echo + echo "2. Restart the server to apply changes" + echo +else + echo -e "${RED}❌ Migration failed!${NC}" + echo -e "${RED}Please check the error messages above${NC}" + exit 1 +fi + diff --git a/src/auth/oauth.rs b/src/auth/oauth.rs index 645041b..149f005 100644 --- a/src/auth/oauth.rs +++ b/src/auth/oauth.rs @@ -103,23 +103,28 @@ pub struct OAuthService { impl OAuthService { /// create new OAuth service pub fn new(storage: Arc) -> Self { - let client_id = - std::env::var("OAUTH_CLIENT_ID").unwrap_or_else(|_| "cosmic-sync".to_string()); + // OAuth client configuration + // client_id should match the registered client in OAuth provider + let client_id = std::env::var("OAUTH_CLIENT_ID") + .unwrap_or_else(|_| "cosmic_sync_client".to_string()); + // For public clients (no client_secret required for PKCE) let client_secret = std::env::var("OAUTH_CLIENT_SECRET") - .unwrap_or_else(|_| "cosmicsecretsocmicsecret".to_string()); + .unwrap_or_else(|_| "".to_string()); + // Redirect URI must exactly match the registered value let redirect_uri = std::env::var("OAUTH_REDIRECT_URI") - .unwrap_or_else(|_| "http://localhost:8080/oauth/callback".to_string()); + .unwrap_or_else(|_| "https://sync.genesis76.com/oauth/callback".to_string()); + // OAuth provider endpoints (use actual provider URLs, not localhost) let auth_url = std::env::var("OAUTH_AUTH_URL") - .unwrap_or_else(|_| "https://localhost:4000/oauth/authorize".to_string()); + .unwrap_or_else(|_| "https://account.genesis76.com/oauth/authorize".to_string()); let token_url = std::env::var("OAUTH_TOKEN_URL") - .unwrap_or_else(|_| "https://localhost:4000/oauth/token".to_string()); + .unwrap_or_else(|_| "https://account.genesis76.com/oauth/token".to_string()); let user_info_url = std::env::var("OAUTH_USER_INFO_URL") - .unwrap_or_else(|_| "https://localhost:4000/userinfo".to_string()); + .unwrap_or_else(|_| "https://account.genesis76.com/userinfo".to_string()); let scope = std::env::var("OAUTH_SCOPE").unwrap_or_else(|_| "profile:read".to_string()); @@ -442,14 +447,20 @@ impl OAuthService { // create http client let client = Client::new(); - // token request parameters - let params = [ - ("code", code), - ("client_id", &self.client_id), - ("client_secret", &self.client_secret), - ("redirect_uri", &self.redirect_uri), - ("grant_type", "authorization_code"), + // Token request parameters + // For public clients, client_secret may be empty + // Consider adding PKCE support if required by provider + let mut params = vec![ + ("code", code.to_string()), + ("client_id", self.client_id.clone()), + ("redirect_uri", self.redirect_uri.clone()), + ("grant_type", "authorization_code".to_string()), ]; + + // Only include client_secret if it's not empty (for public clients) + if !self.client_secret.is_empty() { + params.push(("client_secret", self.client_secret.clone())); + } #[derive(Deserialize)] struct TokenResponse { diff --git a/src/handlers/api.rs b/src/handlers/api.rs index e5fca1a..ef01cf5 100644 --- a/src/handlers/api.rs +++ b/src/handlers/api.rs @@ -5,10 +5,12 @@ use serde_json::json; /// Get API information pub async fn api_info() -> Result { + let auth_http_status_url = std::env::var("AUTH_HTTP_STATUS_URL").unwrap_or_else(|_| "".to_string()); Ok(HttpResponse::Ok().json(json!({ "name": "Cosmic Sync Server", "version": env!("CARGO_PKG_VERSION"), - "description": "High-performance synchronization server for COSMIC Desktop Environment" + "description": "High-performance synchronization server for COSMIC Desktop Environment", + "auth_http_status_url": auth_http_status_url }))) } diff --git a/src/handlers/auth_handler.rs b/src/handlers/auth_handler.rs index 0c8ccf0..e3855d9 100644 --- a/src/handlers/auth_handler.rs +++ b/src/handlers/auth_handler.rs @@ -802,6 +802,9 @@ pub async fn handle_check_auth_status( "Authentication complete for device_hash: {}, returning full auth data", device_hash ); + if let Ok(url) = std::env::var("AUTH_HTTP_STATUS_URL") { + info!("AUTH_HTTP_STATUS_URL hint: {}", url); + } crate::handlers::oauth::AuthStatusResponse { authenticated: true, token: Some(resp.auth_token.clone()), diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 9e76186..712e9d6 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -1,9 +1,10 @@ use tonic::{Response, Status}; -use tracing::{debug, error, info}; +use tracing::{debug, error, info, warn}; use super::super::file_handler::FileHandler; use crate::sync::{DeleteFileRequest, DeleteFileResponse}; use crate::utils::response; +use crate::services::usage_service::{UsageOperation, OperationResult}; pub async fn handle_delete_file( handler: &FileHandler, @@ -16,14 +17,15 @@ pub async fn handle_delete_file( info!(" filename: {}", req.filename); info!(" revision: {}", req.revision); - match handler.app_state.oauth.verify_token(&req.auth_token).await { - Ok(v) if v.valid => {} + let verified = match handler.app_state.oauth.verify_token(&req.auth_token).await { + Ok(v) if v.valid => v, _ => { return Ok(Response::new(response::file_delete_error( "Authentication failed", ))) } - } + }; + let server_account_hash = verified.account_hash; let file_id = if req.file_id > 0 { match handler.validate_file_for_deletion(req.file_id).await { @@ -37,8 +39,38 @@ pub async fn handle_delete_file( } }; - debug!("Executing file deletion: file_id={}", file_id); - match handler.app_state.file.delete_file(file_id).await { + // Get file size before deletion for usage tracking + let file_size = match handler.app_state.file.get_file_info(file_id).await { + Ok(Some(info)) => info.size as u64, + _ => { + warn!("Could not get file size for deletion tracking, using 0"); + 0 + } + }; + + debug!("Executing file deletion: file_id={}, size={}", file_id, file_size); + let delete_result = handler.app_state.file.delete_file(file_id).await; + + // Record storage decrease after deletion + if delete_result.is_ok() && file_size > 0 { + if let Err(e) = handler + .app_state + .usage_checker + .record_after_operation( + &server_account_hash, + UsageOperation::Delete { + bytes: file_size, + file_id, + }, + OperationResult::Success, + ) + .await + { + error!("Failed to record storage decrease after deletion: {}", e); + } + } + + match delete_result { Ok(_) => { info!( "File deleted successfully: filename={}, file_id={}", diff --git a/src/handlers/file/download.rs b/src/handlers/file/download.rs index 400cbb7..5245e4b 100644 --- a/src/handlers/file/download.rs +++ b/src/handlers/file/download.rs @@ -1,9 +1,10 @@ use tonic::{Response, Status}; -use tracing::{error, info}; +use tracing::{debug, error, info, warn}; use super::super::file_handler::FileHandler; use crate::sync::{DownloadFileRequest, DownloadFileResponse}; use crate::utils::response; +use crate::services::usage_service::{UsageOperation, OperationResult}; use base64::Engine as _; fn parse_account_key(s: &str) -> Option<[u8; 32]> { @@ -32,14 +33,15 @@ pub async fn handle_download_file( let file_id = req.file_id; // Verify authentication - match handler.app_state.oauth.verify_token(&req.auth_token).await { - Ok(v) if v.valid => {} + let verified = match handler.app_state.oauth.verify_token(&req.auth_token).await { + Ok(v) if v.valid => v, _ => { return Ok(Response::new(response::file_download_error( "Authentication failed", ))) } - } + }; + let server_account_hash = verified.account_hash; // Get file info let file_info = match handler.app_state.file.get_file_info(file_id).await { @@ -75,8 +77,79 @@ pub async fn handle_download_file( }; let aad = format!("{}:{}", file_info.account_hash, req.device_hash); + // Check bandwidth quota before download + let event_id = nanoid::nanoid!(16); + let file_size = file_info.size as u64; + + let usage_check = handler + .app_state + .usage_checker + .check_before_operation( + &server_account_hash, + UsageOperation::Download { + bytes: file_size, + file_id, + revision: file_info.revision, + device_hash: req.device_hash.clone(), + event_id: event_id.clone(), + }, + ) + .await; + + match usage_check { + Ok(check_result) => { + if !check_result.allowed { + error!( + "Download blocked due to bandwidth quota: {}", + check_result.reason.as_ref().unwrap_or(&"Unknown reason".to_string()) + ); + return Ok(Response::new(response::file_download_error( + &format!("Bandwidth quota exceeded: {}", + check_result.reason.unwrap_or_else(|| "Monthly bandwidth limit reached".to_string())) + ))); + } + + // Log warnings if any + for warning in &check_result.warnings { + warn!("Bandwidth warning for {}: {}", server_account_hash, warning); + } + } + Err(e) => { + // If usage check fails, log but allow download (fail-open) + error!("Usage check failed, allowing download: {}", e); + } + } + // Get file data - match handler.app_state.file.get_file_data(file_id).await { + let download_result = handler.app_state.file.get_file_data(file_id).await; + + // Record download usage after attempt + let operation_result = match &download_result { + Ok(Some(_)) => OperationResult::Success, + _ => OperationResult::Failed, + }; + + if let Err(e) = handler + .app_state + .usage_checker + .record_after_operation( + &server_account_hash, + UsageOperation::Download { + bytes: file_size, + file_id, + revision: file_info.revision, + device_hash: req.device_hash.clone(), + event_id, + }, + operation_result, + ) + .await + { + // Log error but don't fail the download + error!("Failed to record download usage: {}", e); + } + + match download_result { Ok(Some(data)) => { let (enc_path, enc_name) = if let Some(key) = account_key.as_ref() { let ct_path = crate::utils::crypto::aead_encrypt( diff --git a/src/handlers/file/upload.rs b/src/handlers/file/upload.rs index dbce087..02e3580 100644 --- a/src/handlers/file/upload.rs +++ b/src/handlers/file/upload.rs @@ -1,8 +1,9 @@ use tonic::{Response, Status}; -use tracing::{debug, error}; +use tracing::{debug, error, warn}; use crate::sync::{UploadFileRequest, UploadFileResponse}; use crate::utils::response; +use crate::services::usage_service::{UsageOperation, OperationResult}; // use crate::services::file_service::FileService; // not used directly use super::super::file_handler::FileHandler; @@ -61,6 +62,46 @@ pub async fn handle_upload_file( Err(msg) => return Ok(Response::new(response::file_upload_error(msg))), }; + // 5.1. Check usage quota before upload + let event_id = nanoid::nanoid!(16); + let usage_check = handler + .app_state + .usage_checker + .check_before_operation( + &server_account_hash, + UsageOperation::Upload { + bytes: req.file_size as u64, + file_id, + revision: req.revision + 1, + event_id: event_id.clone(), + }, + ) + .await; + + match usage_check { + Ok(check_result) => { + if !check_result.allowed { + error!( + "Upload blocked due to quota: {}", + check_result.reason.as_ref().unwrap_or(&"Unknown reason".to_string()) + ); + return Ok(Response::new(response::file_upload_error( + &format!("Storage quota exceeded: {}", + check_result.reason.unwrap_or_else(|| "Storage limit reached".to_string())) + ))); + } + + // Log warnings if any + for warning in &check_result.warnings { + warn!("Usage warning for {}: {}", server_account_hash, warning); + } + } + Err(e) => { + // If usage check fails, log but allow upload (fail-open) + error!("Usage check failed, allowing upload: {}", e); + } + } + // Warn if storage backend is memory (diagnostic) if handler .app_state @@ -107,12 +148,38 @@ pub async fn handle_upload_file( ); // 8. Store file via FileService - match handler + let store_result = handler .app_state .file .store_file(&file_info, &req.file_data) + .await; + + // 8.1. Record usage after operation + let operation_result = match &store_result { + Ok(_) => OperationResult::Success, + Err(_) => OperationResult::Failed, + }; + + if let Err(e) = handler + .app_state + .usage_checker + .record_after_operation( + &server_account_hash, + UsageOperation::Upload { + bytes: req.file_size as u64, + file_id, + revision: req.revision + 1, + event_id, + }, + operation_result, + ) .await { + // Log error but don't fail the upload + error!("Failed to record usage after upload: {}", e); + } + + match store_result { Ok(_) => { // Publish cross-instance file upload event (masking path and name) let routing_key = format!( diff --git a/src/handlers/mod.rs b/src/handlers/mod.rs index c023c79..fbd1b0d 100755 --- a/src/handlers/mod.rs +++ b/src/handlers/mod.rs @@ -26,6 +26,9 @@ pub mod api; // Metrics handlers pub mod metrics; +// Usage tracking handlers +pub mod usage_handler; + use crate::sync::HealthCheckRequest; use crate::sync::HealthCheckResponse; use tonic::{Request, Response, Status}; diff --git a/src/handlers/usage_handler.rs b/src/handlers/usage_handler.rs new file mode 100644 index 0000000..9646697 --- /dev/null +++ b/src/handlers/usage_handler.rs @@ -0,0 +1,406 @@ +use actix_web::{web, HttpRequest, HttpResponse, Responder}; +use chrono::{Datelike, NaiveDate, Utc}; +use serde::{Deserialize, Serialize}; +use std::sync::Arc; +use tracing::{debug, error}; + +use crate::server::app_state::AppState; +use crate::services::usage_service::UsageStats; + +#[derive(Debug, Serialize)] +pub struct UsageStatsResponse { + pub account_hash: String, + pub storage: StorageUsage, + pub bandwidth: BandwidthUsage, + pub quotas: QuotaInfo, + pub warnings: Vec, +} + +#[derive(Debug, Serialize)] +pub struct StorageUsage { + pub bytes_used: u64, + pub bytes_limit: u64, + pub bytes_soft_limit: u64, + pub files_count: u32, + pub percentage_used: f64, + pub human_readable: HumanReadableSize, +} + +#[derive(Debug, Serialize)] +pub struct BandwidthUsage { + pub daily: BandwidthPeriod, + pub monthly: BandwidthPeriod, +} + +#[derive(Debug, Serialize)] +pub struct BandwidthPeriod { + pub upload_bytes: u64, + pub download_bytes: u64, + pub total_bytes: u64, + pub limit: u64, + pub soft_limit: u64, + pub percentage_used: f64, + pub human_readable: HumanReadableBandwidth, +} + +#[derive(Debug, Serialize)] +pub struct QuotaInfo { + pub storage_exceeded: bool, + pub bandwidth_exceeded: bool, + pub hard_blocked: bool, + pub soft_limit_exceeded: bool, + pub last_warning_at: Option, + pub grace_period_until: Option, +} + +#[derive(Debug, Serialize)] +pub struct HumanReadableSize { + pub used: String, + pub limit: String, + pub soft_limit: String, +} + +#[derive(Debug, Serialize)] +pub struct HumanReadableBandwidth { + pub upload: String, + pub download: String, + pub total: String, + pub limit: String, +} + +#[derive(Debug, Deserialize)] +pub struct BandwidthHistoryQuery { + pub start_date: Option, // YYYY-MM-DD + pub end_date: Option, // YYYY-MM-DD + pub period: Option, // "daily" or "monthly" +} + +#[derive(Debug, Serialize)] +pub struct BandwidthHistoryResponse { + pub account_hash: String, + pub period: String, + pub start_date: String, + pub end_date: String, + pub data: Vec, + pub totals: BandwidthTotals, +} + +#[derive(Debug, Serialize)] +pub struct BandwidthHistoryEntry { + pub date: String, + pub upload_bytes: u64, + pub download_bytes: u64, + pub total_bytes: u64, + pub upload_count: u32, + pub download_count: u32, +} + +#[derive(Debug, Serialize)] +pub struct BandwidthTotals { + pub upload_bytes: u64, + pub download_bytes: u64, + pub total_bytes: u64, + pub upload_count: u32, + pub download_count: u32, +} + +/// Format bytes to human readable string +fn format_bytes(bytes: u64) -> String { + const UNITS: &[&str] = &["B", "KB", "MB", "GB", "TB"]; + if bytes == 0 { + return "0 B".to_string(); + } + + let mut size = bytes as f64; + let mut unit_idx = 0; + + while size >= 1024.0 && unit_idx < UNITS.len() - 1 { + size /= 1024.0; + unit_idx += 1; + } + + if unit_idx == 0 { + format!("{} {}", bytes, UNITS[unit_idx]) + } else { + format!("{:.2} {}", size, UNITS[unit_idx]) + } +} + +/// Extract token from request +fn extract_token(req: &HttpRequest) -> Option { + req.headers() + .get("authorization") + .and_then(|v| v.to_str().ok()) + .and_then(|v| { + if v.starts_with("Bearer ") { + Some(v[7..].to_string()) + } else { + None + } + }) +} + +/// Get current usage statistics +pub async fn get_usage_stats( + req: HttpRequest, + app_state: web::Data>, +) -> impl Responder { + // Extract and verify token + let token = match extract_token(&req) { + Some(t) => t, + None => { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "error": "Missing or invalid authorization header" + })); + } + }; + + let verified = match app_state.oauth.verify_token(&token).await { + Ok(v) if v.valid => v, + _ => { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "error": "Invalid token" + })); + } + }; + + let account_hash = verified.account_hash; + + // Get usage stats + let stats = match app_state.usage_checker.get_usage_stats(&account_hash).await { + Ok(s) => s, + Err(e) => { + error!("Failed to get usage stats: {}", e); + return HttpResponse::InternalServerError().json(serde_json::json!({ + "error": "Failed to retrieve usage statistics" + })); + } + }; + + // Convert to response format + let response = UsageStatsResponse { + account_hash: account_hash.clone(), + storage: StorageUsage { + bytes_used: stats.storage.bytes_used, + bytes_limit: stats.storage.bytes_limit, + bytes_soft_limit: stats.storage.bytes_soft_limit, + files_count: stats.storage.files_count, + percentage_used: stats.storage.percentage_used, + human_readable: HumanReadableSize { + used: format_bytes(stats.storage.bytes_used), + limit: format_bytes(stats.storage.bytes_limit), + soft_limit: format_bytes(stats.storage.bytes_soft_limit), + }, + }, + bandwidth: BandwidthUsage { + daily: BandwidthPeriod { + upload_bytes: stats.bandwidth.daily.upload_bytes, + download_bytes: stats.bandwidth.daily.download_bytes, + total_bytes: stats.bandwidth.daily.total_bytes, + limit: stats.bandwidth.daily.limit, + soft_limit: stats.bandwidth.daily.soft_limit, + percentage_used: stats.bandwidth.daily.percentage_used, + human_readable: HumanReadableBandwidth { + upload: format_bytes(stats.bandwidth.daily.upload_bytes), + download: format_bytes(stats.bandwidth.daily.download_bytes), + total: format_bytes(stats.bandwidth.daily.total_bytes), + limit: format_bytes(stats.bandwidth.daily.limit), + }, + }, + monthly: BandwidthPeriod { + upload_bytes: stats.bandwidth.monthly.upload_bytes, + download_bytes: stats.bandwidth.monthly.download_bytes, + total_bytes: stats.bandwidth.monthly.total_bytes, + limit: stats.bandwidth.monthly.limit, + soft_limit: stats.bandwidth.monthly.soft_limit, + percentage_used: stats.bandwidth.monthly.percentage_used, + human_readable: HumanReadableBandwidth { + upload: format_bytes(stats.bandwidth.monthly.upload_bytes), + download: format_bytes(stats.bandwidth.monthly.download_bytes), + total: format_bytes(stats.bandwidth.monthly.total_bytes), + limit: format_bytes(stats.bandwidth.monthly.limit), + }, + }, + }, + quotas: QuotaInfo { + storage_exceeded: stats.storage.bytes_used > stats.storage.bytes_limit, + bandwidth_exceeded: stats.bandwidth.monthly.total_bytes > stats.bandwidth.monthly.limit, + hard_blocked: stats.limits.hard_blocked, + soft_limit_exceeded: stats.limits.soft_limit_exceeded, + last_warning_at: stats.limits.last_warning_at.map(|dt| dt.to_rfc3339()), + grace_period_until: stats.limits.grace_period_until.map(|dt| dt.to_rfc3339()), + }, + warnings: stats.warnings, + }; + + HttpResponse::Ok().json(response) +} + +/// Get bandwidth usage history +pub async fn get_bandwidth_history( + req: HttpRequest, + app_state: web::Data>, + query: web::Query, +) -> impl Responder { + // Extract and verify token + let token = match extract_token(&req) { + Some(t) => t, + None => { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "error": "Missing or invalid authorization header" + })); + } + }; + + let verified = match app_state.oauth.verify_token(&token).await { + Ok(v) if v.valid => v, + _ => { + return HttpResponse::Unauthorized().json(serde_json::json!({ + "error": "Invalid token" + })); + } + }; + + let account_hash = verified.account_hash; + + // Parse dates or use defaults + let end_date = query + .end_date + .as_ref() + .and_then(|s| NaiveDate::parse_from_str(s, "%Y-%m-%d").ok()) + .unwrap_or_else(|| Utc::now().date_naive()); + + let start_date = query + .start_date + .as_ref() + .and_then(|s| NaiveDate::parse_from_str(s, "%Y-%m-%d").ok()) + .unwrap_or_else(|| { + // Default to last 30 days + end_date - chrono::Duration::days(29) + }); + + let period = query.period.as_ref().map(|s| s.clone()).unwrap_or_else(|| "daily".to_string()); + + // Get bandwidth data from storage + let mysql_storage = match app_state + .storage + .as_any() + .downcast_ref::() + { + Some(s) => s, + None => { + error!("Storage is not MySQL"); + return HttpResponse::InternalServerError().json(serde_json::json!({ + "error": "Storage backend not available" + })); + } + }; + + use crate::storage::mysql_usage::MySqlUsageExt; + + // Collect daily data + let mut data = Vec::new(); + let mut total_upload = 0u64; + let mut total_download = 0u64; + let mut total_upload_count = 0u32; + let mut total_download_count = 0u32; + + let mut current_date = start_date; + while current_date <= end_date { + let usage = match mysql_storage + .get_bandwidth_usage(&account_hash, current_date, current_date) + .await + { + Ok(u) => u, + Err(e) => { + error!("Failed to get bandwidth history: {}", e); + return HttpResponse::InternalServerError().json(serde_json::json!({ + "error": "Failed to retrieve bandwidth history" + })); + } + }; + + if period == "daily" || usage.total_bytes > 0 { + data.push(BandwidthHistoryEntry { + date: current_date.format("%Y-%m-%d").to_string(), + upload_bytes: usage.upload_bytes, + download_bytes: usage.download_bytes, + total_bytes: usage.total_bytes, + upload_count: usage.upload_count, + download_count: usage.download_count, + }); + } + + total_upload += usage.upload_bytes; + total_download += usage.download_bytes; + total_upload_count += usage.upload_count; + total_download_count += usage.download_count; + + current_date += chrono::Duration::days(1); + } + + // Aggregate to monthly if requested + if period == "monthly" { + let mut monthly_data: std::collections::HashMap = + std::collections::HashMap::new(); + + for entry in data { + let month = entry.date[..7].to_string(); // YYYY-MM + let monthly_entry = monthly_data.entry(month.clone()).or_insert(BandwidthHistoryEntry { + date: month, + upload_bytes: 0, + download_bytes: 0, + total_bytes: 0, + upload_count: 0, + download_count: 0, + }); + + monthly_entry.upload_bytes += entry.upload_bytes; + monthly_entry.download_bytes += entry.download_bytes; + monthly_entry.total_bytes += entry.total_bytes; + monthly_entry.upload_count += entry.upload_count; + monthly_entry.download_count += entry.download_count; + } + + data = monthly_data.into_values().collect(); + data.sort_by(|a, b| a.date.cmp(&b.date)); + } + + let response = BandwidthHistoryResponse { + account_hash, + period, + start_date: start_date.format("%Y-%m-%d").to_string(), + end_date: end_date.format("%Y-%m-%d").to_string(), + data, + totals: BandwidthTotals { + upload_bytes: total_upload, + download_bytes: total_download, + total_bytes: total_upload + total_download, + upload_count: total_upload_count, + download_count: total_download_count, + }, + }; + + HttpResponse::Ok().json(response) +} + +/// Health check for usage service +pub async fn usage_health_check( + app_state: web::Data>, +) -> impl Responder { + // Check if MySQL storage is available + let mysql_available = app_state + .storage + .as_any() + .downcast_ref::() + .is_some(); + + HttpResponse::Ok().json(serde_json::json!({ + "service": "usage", + "status": if mysql_available { "healthy" } else { "degraded" }, + "mysql_available": mysql_available, + "tracking_enabled": std::env::var("USAGE_TRACKING_ENABLED") + .unwrap_or_else(|_| "true".to_string()), + "timestamp": Utc::now().to_rfc3339(), + })) +} diff --git a/src/server/app_state.rs b/src/server/app_state.rs index d8de196..6f8baeb 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -7,6 +7,7 @@ use crate::server::notification_manager::NotificationManager; use crate::services::device_service::DeviceService; use crate::services::encryption_service::EncryptionService; use crate::services::file_service::FileService; +use crate::services::usage_service::{UsageService, UsageChecker, UsageConfig}; use crate::services::version_service::{VersionService, VersionServiceImpl}; use crate::storage::mysql::MySqlStorage; use crate::storage::mysql_watcher::MySqlWatcherExt; @@ -71,6 +72,8 @@ pub struct AppState { pub device: DeviceService, /// Version service for file version management pub version_service: VersionServiceImpl, + /// Usage service for quota and bandwidth management + pub usage_checker: Arc, /// Notification manager for broadcasting events pub notification_manager: Arc, /// Event bus for cross-instance broadcasting (noop by default) @@ -197,6 +200,7 @@ impl AppState { FileService, DeviceService, VersionServiceImpl, + Arc, ), AppError, > { @@ -240,6 +244,30 @@ impl AppState { // initialize version service let version_service = VersionServiceImpl::new(storage.clone(), file.clone()); + // initialize usage service + let usage_config = UsageConfig { + enabled: std::env::var("USAGE_TRACKING_ENABLED") + .unwrap_or_else(|_| "true".to_string()) + .parse() + .unwrap_or(true), + storage_limit_bytes: std::env::var("STORAGE_LIMIT_BYTES") + .unwrap_or_else(|_| "10737418240".to_string()) // 10GB + .parse() + .unwrap_or(10_737_418_240), + storage_soft_limit_ratio: 0.8, + bandwidth_limit_monthly_bytes: std::env::var("BANDWIDTH_LIMIT_MONTHLY_BYTES") + .unwrap_or_else(|_| "107374182400".to_string()) // 100GB + .parse() + .unwrap_or(107_374_182_400), + bandwidth_soft_limit_ratio: 0.8, + grace_period_hours: 24, + check_upload: true, + check_download: true, + block_on_hard_limit: true, + warn_on_soft_limit: true, + }; + let usage_checker: Arc = Arc::new(UsageService::new(storage.clone(), usage_config)); + Ok(( storage, notification_manager, @@ -249,6 +277,7 @@ impl AppState { file, device, version_service, + usage_checker, )) } @@ -425,6 +454,30 @@ impl AppState { // initialize event bus (RabbitMQ if enabled) let event_bus: Arc = Self::create_event_bus().await; + // initialize usage service + let usage_config = UsageConfig { + enabled: std::env::var("USAGE_TRACKING_ENABLED") + .unwrap_or_else(|_| "true".to_string()) + .parse() + .unwrap_or(true), + storage_limit_bytes: std::env::var("STORAGE_LIMIT_BYTES") + .unwrap_or_else(|_| "10737418240".to_string()) // 10GB + .parse() + .unwrap_or(10_737_418_240), + storage_soft_limit_ratio: 0.8, + bandwidth_limit_monthly_bytes: std::env::var("BANDWIDTH_LIMIT_MONTHLY_BYTES") + .unwrap_or_else(|_| "107374182400".to_string()) // 100GB + .parse() + .unwrap_or(107_374_182_400), + bandwidth_soft_limit_ratio: 0.8, + grace_period_hours: 24, + check_upload: true, + check_download: true, + block_on_hard_limit: true, + warn_on_soft_limit: true, + }; + let usage_checker: Arc = Arc::new(UsageService::new(storage.clone(), usage_config)); + Ok(Self { config: full_config.clone(), storage, @@ -433,6 +486,7 @@ impl AppState { file, device, version_service, + usage_checker, notification_manager: notification_manager.clone(), event_bus, auth_sessions: Arc::new(Mutex::new(HashMap::new())), @@ -453,6 +507,7 @@ impl AppState { file, device, version_service, + usage_checker, ) = Self::initialize_services(config.server.storage_path.as_ref()).await?; let state = Self { config: config.clone(), @@ -462,6 +517,7 @@ impl AppState { file, device, version_service, + usage_checker, notification_manager, event_bus, auth_sessions: Arc::new(Mutex::new(HashMap::new())), @@ -496,6 +552,7 @@ impl AppState { file, device, version_service, + usage_checker, ) = Self::initialize_services(config.storage_path.as_ref()).await?; // create AppState object @@ -507,6 +564,7 @@ impl AppState { file, device, version_service, + usage_checker, notification_manager: notification_manager.clone(), event_bus, auth_sessions: Arc::new(Mutex::new(HashMap::new())), diff --git a/src/server/startup.rs b/src/server/startup.rs index 354470a..dc6b2e9 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -140,7 +140,7 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R .set_service_status("", ServingStatus::Serving) .await; - // Build optimized gRPC server + // Build optimized gRPC server (tonic serves HTTP/2 for gRPC by default) let server = Server::builder() // Timeout configurations .timeout(Duration::from_secs(config.auth_token_expiry_hours as u64)) @@ -150,8 +150,6 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R // Performance optimizations .max_concurrent_streams(Some(config.max_concurrent_requests as u32)) .tcp_nodelay(true) - // Compression (methods not available in current tonic version) - // .accept_compressed(tonic::codec::CompressionEncoding::Gzip) // Add services .add_service(SyncServiceServer::new(sync_service)) .add_service(SyncClientServiceServer::new(sync_client_service)) @@ -266,6 +264,19 @@ async fn start_http_server(config: &ServerConfig, app_state: Arc) -> R .route("/api/info", web::get().to(handlers::api::api_info)) .route("/api/version", web::get().to(handlers::api::api_version)) .route("/api/status", web::get().to(handlers::api::api_status)) + // Usage tracking endpoints + .route( + "/api/usage/stats", + web::get().to(handlers::usage_handler::get_usage_stats), + ) + .route( + "/api/usage/bandwidth", + web::get().to(handlers::usage_handler::get_bandwidth_history), + ) + .route( + "/api/usage/health", + web::get().to(handlers::usage_handler::usage_health_check), + ) }) .workers(config.worker_threads) .keep_alive(Duration::from_secs(75)) diff --git a/src/services/mod.rs b/src/services/mod.rs index 93b7b41..d839c84 100755 --- a/src/services/mod.rs +++ b/src/services/mod.rs @@ -3,6 +3,7 @@ pub mod auth_service; pub mod device_service; pub mod encryption_service; pub mod file_service; +pub mod usage_service; pub mod version_service; // Public re-exports @@ -10,6 +11,7 @@ pub use auth_service::AuthService; pub use device_service::DeviceService; pub use encryption_service::EncryptionService; pub use file_service::FileService; +pub use usage_service::{UsageService, UsageChecker, UsageConfig}; pub use version_service::{VersionService, VersionServiceImpl}; use async_trait::async_trait; diff --git a/src/services/usage_service.rs b/src/services/usage_service.rs new file mode 100644 index 0000000..9683e18 --- /dev/null +++ b/src/services/usage_service.rs @@ -0,0 +1,642 @@ +use async_trait::async_trait; +use chrono::{DateTime, Datelike, Utc, NaiveDate}; +use std::sync::Arc; +use tracing::{debug, error, info, warn}; + +use crate::error::{AppError, SyncError}; +use crate::storage::{Storage, StorageError}; +use crate::storage::mysql_usage::{MySqlUsageExt, StorageUsageInfo, BandwidthUsageInfo, AccountLimits}; + +/// Usage operation types +#[derive(Debug, Clone)] +pub enum UsageOperation { + Upload { + bytes: u64, + file_id: u64, + revision: i64, + event_id: String, + }, + Download { + bytes: u64, + file_id: u64, + revision: i64, + device_hash: String, + event_id: String, + }, + Delete { + bytes: u64, + file_id: u64, + }, +} + +/// Operation result status +#[derive(Debug, Clone, PartialEq)] +pub enum OperationResult { + Success, + Failed, + Partial, +} + +/// Check result for usage operations +#[derive(Debug)] +pub struct CheckResult { + pub allowed: bool, + pub reason: Option, + pub usage_info: UsageInfo, + pub warnings: Vec, +} + +/// Current usage information +#[derive(Debug, Clone)] +pub struct UsageInfo { + pub storage_used: u64, + pub storage_limit: u64, + pub storage_soft_limit: u64, + pub bandwidth_used_today: u64, + pub bandwidth_used_month: u64, + pub bandwidth_limit_month: u64, + pub bandwidth_soft_limit_month: u64, + pub is_blocked: bool, +} + +/// Usage statistics response +#[derive(Debug, Clone)] +pub struct UsageStats { + pub storage: StorageStats, + pub bandwidth: BandwidthStats, + pub limits: LimitInfo, + pub warnings: Vec, +} + +#[derive(Debug, Clone)] +pub struct StorageStats { + pub bytes_used: u64, + pub bytes_limit: u64, + pub bytes_soft_limit: u64, + pub files_count: u32, + pub percentage_used: f64, +} + +#[derive(Debug, Clone)] +pub struct BandwidthStats { + pub daily: BandwidthPeriod, + pub monthly: BandwidthPeriod, +} + +#[derive(Debug, Clone)] +pub struct BandwidthPeriod { + pub upload_bytes: u64, + pub download_bytes: u64, + pub total_bytes: u64, + pub limit: u64, + pub soft_limit: u64, + pub percentage_used: f64, +} + +#[derive(Debug, Clone)] +pub struct LimitInfo { + pub hard_blocked: bool, + pub soft_limit_exceeded: bool, + pub last_warning_at: Option>, + pub grace_period_until: Option>, +} + +/// Usage checker trait for flexible implementation +#[async_trait] +pub trait UsageChecker: Send + Sync { + async fn check_before_operation( + &self, + account_hash: &str, + operation: UsageOperation, + ) -> Result; + + async fn record_after_operation( + &self, + account_hash: &str, + operation: UsageOperation, + result: OperationResult, + ) -> Result<(), AppError>; + + async fn get_usage_stats(&self, account_hash: &str) -> Result; +} + +/// Main usage service implementation +pub struct UsageService { + storage: Arc, + config: UsageConfig, +} + +#[derive(Debug, Clone)] +pub struct UsageConfig { + pub enabled: bool, + pub storage_limit_bytes: u64, + pub storage_soft_limit_ratio: f64, + pub bandwidth_limit_monthly_bytes: u64, + pub bandwidth_soft_limit_ratio: f64, + pub grace_period_hours: u64, + pub check_upload: bool, + pub check_download: bool, + pub block_on_hard_limit: bool, + pub warn_on_soft_limit: bool, +} + +impl Default for UsageConfig { + fn default() -> Self { + Self { + enabled: true, + storage_limit_bytes: 10_737_418_240, // 10GB + storage_soft_limit_ratio: 0.8, // 80% + bandwidth_limit_monthly_bytes: 107_374_182_400, // 100GB + bandwidth_soft_limit_ratio: 0.8, // 80% + grace_period_hours: 24, + check_upload: true, + check_download: true, + block_on_hard_limit: true, + warn_on_soft_limit: true, + } + } +} + +impl UsageService { + pub fn new(storage: Arc, config: UsageConfig) -> Self { + Self { storage, config } + } + + /// Check if operation should be allowed based on current usage + async fn check_storage_limit( + &self, + account_hash: &str, + additional_bytes: u64, + ) -> Result<(bool, Option, Vec), AppError> { + if !self.config.enabled { + return Ok((true, None, vec![])); + } + + // Get current usage + let usage = self.storage + .as_any() + .downcast_ref::() + .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))? + .get_storage_usage(account_hash) + .await + .map_err(|e| AppError::Storage(format!("Failed to get storage usage: {}", e)))?; + + // Check if already blocked + if usage.hard_blocked && self.config.block_on_hard_limit { + return Ok(( + false, + Some("Account is blocked due to exceeded storage quota".to_string()), + vec![] + )); + } + + let mut warnings = Vec::new(); + let projected_usage = usage.bytes_used + additional_bytes; + + // Check hard limit + if projected_usage > usage.bytes_limit { + if self.config.block_on_hard_limit { + return Ok(( + false, + Some(format!( + "Storage limit exceeded. Current: {} bytes, Limit: {} bytes, Requested: {} bytes", + usage.bytes_used, usage.bytes_limit, additional_bytes + )), + warnings + )); + } else { + warnings.push(format!("Storage hard limit exceeded: {}%", + (projected_usage as f64 / usage.bytes_limit as f64 * 100.0) as u32)); + } + } + + // Check soft limit + if projected_usage > usage.bytes_soft_limit && self.config.warn_on_soft_limit { + warnings.push(format!( + "Storage usage warning: {}% of limit used", + (projected_usage as f64 / usage.bytes_limit as f64 * 100.0) as u32 + )); + + // Update warning timestamp + let _ = self.storage + .as_any() + .downcast_ref::() + .unwrap() + .update_last_warning(account_hash) + .await; + } + + Ok((true, None, warnings)) + } + + /// Check bandwidth limits + async fn check_bandwidth_limit( + &self, + account_hash: &str, + additional_bytes: u64, + is_upload: bool, + ) -> Result<(bool, Option, Vec), AppError> { + if !self.config.enabled { + return Ok((true, None, vec![])); + } + + let mysql_storage = self.storage + .as_any() + .downcast_ref::() + .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; + + // Get current month bandwidth + let now = Utc::now(); + let start_of_month = NaiveDate::from_ymd_opt(now.year(), now.month(), 1).unwrap(); + let end_of_month = if now.month() == 12 { + NaiveDate::from_ymd_opt(now.year() + 1, 1, 1).unwrap() + } else { + NaiveDate::from_ymd_opt(now.year(), now.month() + 1, 1).unwrap() + } - chrono::Duration::days(1); + + let bandwidth = mysql_storage + .get_bandwidth_usage(account_hash, start_of_month, end_of_month) + .await + .map_err(|e| AppError::Storage(format!("Failed to get bandwidth usage: {}", e)))?; + + let limits = mysql_storage + .get_account_limits(account_hash) + .await + .map_err(|e| AppError::Storage(format!("Failed to get account limits: {}", e)))?; + + let mut warnings = Vec::new(); + let projected_total = bandwidth.total_bytes + additional_bytes; + + // Check monthly bandwidth limit + if projected_total > limits.bandwidth_monthly_limit { + if self.config.block_on_hard_limit { + return Ok(( + false, + Some(format!( + "Monthly bandwidth limit exceeded. Current: {} bytes, Limit: {} bytes", + bandwidth.total_bytes, limits.bandwidth_monthly_limit + )), + warnings + )); + } else { + warnings.push(format!("Bandwidth hard limit exceeded: {}%", + (projected_total as f64 / limits.bandwidth_monthly_limit as f64 * 100.0) as u32)); + } + } + + // Check soft limit + if projected_total > limits.bandwidth_monthly_soft_limit && self.config.warn_on_soft_limit { + warnings.push(format!( + "Bandwidth usage warning: {}% of monthly limit used", + (projected_total as f64 / limits.bandwidth_monthly_limit as f64 * 100.0) as u32 + )); + } + + Ok((true, None, warnings)) + } + + /// Record storage change after operation + async fn update_storage_usage( + &self, + account_hash: &str, + bytes_delta: i64, + files_delta: i32, + ) -> Result<(), AppError> { + let mysql_storage = self.storage + .as_any() + .downcast_ref::() + .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; + + let (success, current, limit) = mysql_storage + .try_increase_storage(account_hash, bytes_delta, files_delta) + .await + .map_err(|e| AppError::Storage(format!("Failed to update storage usage: {}", e)))?; + + if !success && bytes_delta > 0 { + warn!( + "Failed to increase storage for {}: current={}, limit={}, requested={}", + account_hash, current, limit, bytes_delta + ); + + // Set blocked status if exceeded + if self.config.block_on_hard_limit { + let _ = mysql_storage.set_account_blocked(account_hash, true).await; + } + } + + Ok(()) + } +} + +#[async_trait] +impl UsageChecker for UsageService { + async fn check_before_operation( + &self, + account_hash: &str, + operation: UsageOperation, + ) -> Result { + if !self.config.enabled { + return Ok(CheckResult { + allowed: true, + reason: None, + usage_info: self.get_current_usage_info(account_hash).await?, + warnings: vec![], + }); + } + + let mut all_warnings = Vec::new(); + let mut blocked = false; + let mut block_reason = None; + + match &operation { + UsageOperation::Upload { bytes, event_id, .. } => { + if self.config.check_upload { + // Check storage limit + let (allowed, reason, warnings) = self.check_storage_limit(account_hash, *bytes).await?; + all_warnings.extend(warnings); + + if !allowed { + blocked = true; + block_reason = reason; + } + + // Check bandwidth limit + if !blocked { + let (allowed, reason, warnings) = self.check_bandwidth_limit(account_hash, *bytes, true).await?; + all_warnings.extend(warnings); + + if !allowed { + blocked = true; + block_reason = reason; + } + } + + debug!( + "Upload check for {}: event_id={}, bytes={}, allowed={}", + account_hash, event_id, bytes, !blocked + ); + } + }, + UsageOperation::Download { bytes, event_id, .. } => { + if self.config.check_download { + // Check bandwidth limit only for downloads + let (allowed, reason, warnings) = self.check_bandwidth_limit(account_hash, *bytes, false).await?; + all_warnings.extend(warnings); + + if !allowed { + blocked = true; + block_reason = reason; + } + + debug!( + "Download check for {}: event_id={}, bytes={}, allowed={}", + account_hash, event_id, bytes, !blocked + ); + } + }, + UsageOperation::Delete { .. } => { + // Deletes are always allowed + debug!("Delete operation for {} - always allowed", account_hash); + } + } + + let usage_info = self.get_current_usage_info(account_hash).await?; + + Ok(CheckResult { + allowed: !blocked, + reason: block_reason, + usage_info, + warnings: all_warnings, + }) + } + + async fn record_after_operation( + &self, + account_hash: &str, + operation: UsageOperation, + result: OperationResult, + ) -> Result<(), AppError> { + if !self.config.enabled { + return Ok(()); + } + + let mysql_storage = self.storage + .as_any() + .downcast_ref::() + .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; + + match operation { + UsageOperation::Upload { bytes, file_id, revision, event_id } => { + // Record transfer event + let status = match result { + OperationResult::Success => "success", + OperationResult::Failed => "failed", + OperationResult::Partial => "partial", + }; + + mysql_storage + .record_transfer_event( + &event_id, + account_hash, + file_id, + revision, + "upload", + "", // device_hash not needed for uploads + bytes, + status, + ) + .await + .map_err(|e| AppError::Storage(format!("Failed to record upload event: {}", e)))?; + + // Update storage usage if successful + if result == OperationResult::Success { + self.update_storage_usage(account_hash, bytes as i64, 1).await?; + } + + info!( + "Recorded upload: account={}, file_id={}, bytes={}, status={}", + account_hash, file_id, bytes, status + ); + }, + UsageOperation::Download { bytes, file_id, revision, device_hash, event_id } => { + // Record transfer event + let status = match result { + OperationResult::Success => "success", + OperationResult::Failed => "failed", + OperationResult::Partial => "partial", + }; + + mysql_storage + .record_transfer_event( + &event_id, + account_hash, + file_id, + revision, + "download", + &device_hash, + bytes, + status, + ) + .await + .map_err(|e| AppError::Storage(format!("Failed to record download event: {}", e)))?; + + info!( + "Recorded download: account={}, file_id={}, device={}, bytes={}, status={}", + account_hash, file_id, device_hash, bytes, status + ); + }, + UsageOperation::Delete { bytes, file_id } => { + // Update storage usage (decrease) + if result == OperationResult::Success { + self.update_storage_usage(account_hash, -(bytes as i64), -1).await?; + + info!( + "Recorded delete: account={}, file_id={}, bytes={}", + account_hash, file_id, bytes + ); + } + } + } + + Ok(()) + } + + async fn get_usage_stats(&self, account_hash: &str) -> Result { + let mysql_storage = self.storage + .as_any() + .downcast_ref::() + .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; + + // Get storage usage + let storage_info = mysql_storage + .get_storage_usage(account_hash) + .await + .map_err(|e| AppError::Storage(format!("Failed to get storage usage: {}", e)))?; + + // Get bandwidth usage (daily and monthly) + let today = Utc::now().date_naive(); + let daily_bandwidth = mysql_storage + .get_bandwidth_usage(account_hash, today, today) + .await + .map_err(|e| AppError::Storage(format!("Failed to get daily bandwidth: {}", e)))?; + + let start_of_month = NaiveDate::from_ymd_opt(today.year(), today.month(), 1).unwrap(); + let monthly_bandwidth = mysql_storage + .get_bandwidth_usage(account_hash, start_of_month, today) + .await + .map_err(|e| AppError::Storage(format!("Failed to get monthly bandwidth: {}", e)))?; + + // Get limits + let limits = mysql_storage + .get_account_limits(account_hash) + .await + .map_err(|e| AppError::Storage(format!("Failed to get account limits: {}", e)))?; + + // Calculate percentages and warnings + let storage_percentage = (storage_info.bytes_used as f64 / storage_info.bytes_limit as f64) * 100.0; + let bandwidth_percentage = (monthly_bandwidth.total_bytes as f64 / limits.bandwidth_monthly_limit as f64) * 100.0; + + let mut warnings = Vec::new(); + let mut soft_limit_exceeded = false; + + if storage_info.bytes_used > storage_info.bytes_soft_limit { + soft_limit_exceeded = true; + warnings.push(format!("Storage usage at {:.1}% of limit", storage_percentage)); + } + + if monthly_bandwidth.total_bytes > limits.bandwidth_monthly_soft_limit { + soft_limit_exceeded = true; + warnings.push(format!("Monthly bandwidth at {:.1}% of limit", bandwidth_percentage)); + } + + Ok(UsageStats { + storage: StorageStats { + bytes_used: storage_info.bytes_used, + bytes_limit: storage_info.bytes_limit, + bytes_soft_limit: storage_info.bytes_soft_limit, + files_count: storage_info.files_count, + percentage_used: storage_percentage, + }, + bandwidth: BandwidthStats { + daily: BandwidthPeriod { + upload_bytes: daily_bandwidth.upload_bytes, + download_bytes: daily_bandwidth.download_bytes, + total_bytes: daily_bandwidth.total_bytes, + limit: limits.bandwidth_monthly_limit / 30, // Daily approximation + soft_limit: limits.bandwidth_monthly_soft_limit / 30, + percentage_used: (daily_bandwidth.total_bytes as f64 / (limits.bandwidth_monthly_limit as f64 / 30.0)) * 100.0, + }, + monthly: BandwidthPeriod { + upload_bytes: monthly_bandwidth.upload_bytes, + download_bytes: monthly_bandwidth.download_bytes, + total_bytes: monthly_bandwidth.total_bytes, + limit: limits.bandwidth_monthly_limit, + soft_limit: limits.bandwidth_monthly_soft_limit, + percentage_used: bandwidth_percentage, + }, + }, + limits: LimitInfo { + hard_blocked: storage_info.hard_blocked, + soft_limit_exceeded, + last_warning_at: storage_info.last_warning_at, + grace_period_until: storage_info.grace_period_until, + }, + warnings, + }) + } +} + +impl UsageService { + async fn get_current_usage_info(&self, account_hash: &str) -> Result { + let mysql_storage = self.storage + .as_any() + .downcast_ref::() + .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; + + let storage_info = mysql_storage + .get_storage_usage(account_hash) + .await + .map_err(|e| AppError::Storage(format!("Failed to get storage usage: {}", e)))?; + + let today = Utc::now().date_naive(); + let start_of_month = NaiveDate::from_ymd_opt(today.year(), today.month(), 1).unwrap(); + + let daily_bandwidth = mysql_storage + .get_bandwidth_usage(account_hash, today, today) + .await + .unwrap_or(BandwidthUsageInfo { + upload_bytes: 0, + download_bytes: 0, + upload_count: 0, + download_count: 0, + total_bytes: 0, + }); + + let monthly_bandwidth = mysql_storage + .get_bandwidth_usage(account_hash, start_of_month, today) + .await + .unwrap_or(BandwidthUsageInfo { + upload_bytes: 0, + download_bytes: 0, + upload_count: 0, + download_count: 0, + total_bytes: 0, + }); + + let limits = mysql_storage + .get_account_limits(account_hash) + .await + .map_err(|e| AppError::Storage(format!("Failed to get account limits: {}", e)))?; + + Ok(UsageInfo { + storage_used: storage_info.bytes_used, + storage_limit: storage_info.bytes_limit, + storage_soft_limit: storage_info.bytes_soft_limit, + bandwidth_used_today: daily_bandwidth.total_bytes, + bandwidth_used_month: monthly_bandwidth.total_bytes, + bandwidth_limit_month: limits.bandwidth_monthly_limit, + bandwidth_soft_limit_month: limits.bandwidth_monthly_soft_limit, + is_blocked: storage_info.hard_blocked, + }) + } +} diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 9e92303..c4cbf79 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -7,6 +7,7 @@ mod mysql_account; mod mysql_auth; mod mysql_device; mod mysql_file; +pub mod mysql_usage; pub mod mysql_watcher; // File storage abstraction layer diff --git a/src/storage/mysql_usage.rs b/src/storage/mysql_usage.rs new file mode 100644 index 0000000..cb57cc0 --- /dev/null +++ b/src/storage/mysql_usage.rs @@ -0,0 +1,550 @@ +use async_trait::async_trait; +use chrono::{DateTime, Utc, NaiveDate}; +use sqlx::{MySql, Row}; +use tracing::{debug, error, info, warn}; + +use super::{Result, StorageError}; + +/// Usage tracking operations for MySQL storage +#[async_trait] +pub trait MySqlUsageExt: Send + Sync { + /// Initialize usage records for an account + async fn init_usage_for_account(&self, account_hash: &str) -> Result<()>; + + /// Try to increase storage usage atomically + async fn try_increase_storage( + &self, + account_hash: &str, + bytes_delta: i64, + files_delta: i32, + ) -> Result<(bool, u64, u64)>; // (success, current_usage, limit) + + /// Record transfer event + async fn record_transfer_event( + &self, + event_id: &str, + account_hash: &str, + file_id: u64, + revision: i64, + transfer_type: &str, + device_hash: &str, + bytes: u64, + status: &str, + ) -> Result<()>; + + /// Update transfer event status + async fn update_transfer_status( + &self, + event_id: &str, + status: &str, + failure_reason: Option<&str>, + ) -> Result<()>; + + /// Update daily bandwidth usage + async fn update_bandwidth_daily( + &self, + account_hash: &str, + date: NaiveDate, + upload_bytes: i64, + download_bytes: i64, + upload_count: i32, + download_count: i32, + ) -> Result<()>; + + /// Get storage usage info + async fn get_storage_usage(&self, account_hash: &str) -> Result; + + /// Get bandwidth usage for date range + async fn get_bandwidth_usage( + &self, + account_hash: &str, + start_date: NaiveDate, + end_date: NaiveDate, + ) -> Result; + + /// Check if account is blocked + async fn is_account_blocked(&self, account_hash: &str) -> Result; + + /// Set account block status + async fn set_account_blocked(&self, account_hash: &str, blocked: bool) -> Result<()>; + + /// Update warning timestamp + async fn update_last_warning(&self, account_hash: &str) -> Result<()>; + + /// Get account limits (with overrides) + async fn get_account_limits(&self, account_hash: &str) -> Result; +} + +#[derive(Debug, Clone)] +pub struct StorageUsageInfo { + pub bytes_used: u64, + pub bytes_limit: u64, + pub bytes_soft_limit: u64, + pub files_count: u32, + pub hard_blocked: bool, + pub last_warning_at: Option>, + pub grace_period_until: Option>, +} + +#[derive(Debug, Clone)] +pub struct BandwidthUsageInfo { + pub upload_bytes: u64, + pub download_bytes: u64, + pub upload_count: u32, + pub download_count: u32, + pub total_bytes: u64, +} + +#[derive(Debug, Clone)] +pub struct AccountLimits { + pub storage_bytes_limit: u64, + pub storage_bytes_soft_limit: u64, + pub bandwidth_monthly_limit: u64, + pub bandwidth_monthly_soft_limit: u64, + pub has_overrides: bool, +} + +#[async_trait] +impl MySqlUsageExt for super::MySqlStorage { + async fn init_usage_for_account(&self, account_hash: &str) -> Result<()> { + debug!("Initializing usage records for account: {}", account_hash); + + // Create usage_storage record if not exists + let query = r#" + INSERT IGNORE INTO usage_storage (account_hash) + VALUES (?) + "#; + + sqlx::query(query) + .bind(account_hash) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to init usage_storage: {}", e)))?; + + // Initialize current month bandwidth record + let current_month = chrono::Utc::now().format("%Y-%m").to_string(); + let query = r#" + INSERT IGNORE INTO usage_bandwidth_monthly (account_hash, usage_month) + VALUES (?, ?) + "#; + + sqlx::query(query) + .bind(account_hash) + .bind(¤t_month) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to init bandwidth_monthly: {}", e)))?; + + Ok(()) + } + + async fn try_increase_storage( + &self, + account_hash: &str, + bytes_delta: i64, + files_delta: i32, + ) -> Result<(bool, u64, u64)> { + debug!( + "Trying to update storage for {}: bytes_delta={}, files_delta={}", + account_hash, bytes_delta, files_delta + ); + + // Use stored procedure for atomic operation + let mut result = sqlx::query( + r#"CALL update_storage_usage(?, ?, ?, @success, @current_usage, @limit)"# + ) + .bind(account_hash) + .bind(bytes_delta) + .bind(files_delta) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to call update_storage_usage: {}", e)))?; + + // Get output parameters + let row = sqlx::query(r#"SELECT @success as success, @current_usage as current_usage, @limit as `limit`"#) + .fetch_one(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to get procedure output: {}", e)))?; + + let success: bool = row.try_get("success").unwrap_or(false); + let current_usage: i64 = row.try_get("current_usage").unwrap_or(0); + let limit: i64 = row.try_get("limit").unwrap_or(0); + + debug!( + "Storage update result: success={}, current={}, limit={}", + success, current_usage, limit + ); + + Ok((success, current_usage as u64, limit as u64)) + } + + async fn record_transfer_event( + &self, + event_id: &str, + account_hash: &str, + file_id: u64, + revision: i64, + transfer_type: &str, + device_hash: &str, + bytes: u64, + status: &str, + ) -> Result<()> { + debug!( + "Recording transfer event: id={}, type={}, bytes={}, status={}", + event_id, transfer_type, bytes, status + ); + + let query = r#" + INSERT INTO transfer_events ( + event_id, account_hash, file_id, revision, + transfer_type, device_hash, bytes_transferred, status, + initiated_at, completed_at + ) VALUES (?, ?, ?, ?, ?, ?, ?, ?, NOW(), IF(? != 'pending', NOW(), NULL)) + ON DUPLICATE KEY UPDATE + status = VALUES(status), + completed_at = VALUES(completed_at), + updated_at = NOW() + "#; + + sqlx::query(query) + .bind(event_id) + .bind(account_hash) + .bind(file_id as i64) + .bind(revision) + .bind(transfer_type) + .bind(device_hash) + .bind(bytes as i64) + .bind(status) + .bind(status) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to record transfer event: {}", e)))?; + + // Update bandwidth if status is success + if status == "success" { + let today = chrono::Utc::now().date_naive(); + let (upload_bytes, download_bytes) = if transfer_type == "upload" { + (bytes as i64, 0i64) + } else { + (0i64, bytes as i64) + }; + let (upload_count, download_count) = if transfer_type == "upload" { + (1, 0) + } else { + (0, 1) + }; + + self.update_bandwidth_daily( + account_hash, + today, + upload_bytes, + download_bytes, + upload_count, + download_count, + ).await?; + } + + Ok(()) + } + + async fn update_transfer_status( + &self, + event_id: &str, + status: &str, + failure_reason: Option<&str>, + ) -> Result<()> { + debug!("Updating transfer event status: id={}, status={}", event_id, status); + + let query = r#" + UPDATE transfer_events + SET status = ?, + completed_at = NOW(), + failure_reason = ? + WHERE event_id = ? + "#; + + sqlx::query(query) + .bind(status) + .bind(failure_reason) + .bind(event_id) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to update transfer status: {}", e)))?; + + Ok(()) + } + + async fn update_bandwidth_daily( + &self, + account_hash: &str, + date: NaiveDate, + upload_bytes: i64, + download_bytes: i64, + upload_count: i32, + download_count: i32, + ) -> Result<()> { + debug!( + "Updating daily bandwidth for {} on {}: up={}, down={}", + account_hash, date, upload_bytes, download_bytes + ); + + let query = r#" + INSERT INTO usage_bandwidth_daily ( + account_hash, usage_date, + upload_bytes, download_bytes, + upload_count, download_count + ) VALUES (?, ?, ?, ?, ?, ?) + ON DUPLICATE KEY UPDATE + upload_bytes = upload_bytes + VALUES(upload_bytes), + download_bytes = download_bytes + VALUES(download_bytes), + upload_count = upload_count + VALUES(upload_count), + download_count = download_count + VALUES(download_count), + updated_at = NOW() + "#; + + sqlx::query(query) + .bind(account_hash) + .bind(date) + .bind(upload_bytes) + .bind(download_bytes) + .bind(upload_count) + .bind(download_count) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to update daily bandwidth: {}", e)))?; + + // Also update monthly aggregation + let month = date.format("%Y-%m").to_string(); + let query = r#" + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, + upload_bytes, download_bytes, + upload_count, download_count + ) VALUES (?, ?, ?, ?, ?, ?) + ON DUPLICATE KEY UPDATE + upload_bytes = upload_bytes + VALUES(upload_bytes), + download_bytes = download_bytes + VALUES(download_bytes), + upload_count = upload_count + VALUES(upload_count), + download_count = download_count + VALUES(download_count), + updated_at = NOW() + "#; + + sqlx::query(query) + .bind(account_hash) + .bind(&month) + .bind(upload_bytes) + .bind(download_bytes) + .bind(upload_count) + .bind(download_count) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to update monthly bandwidth: {}", e)))?; + + Ok(()) + } + + async fn get_storage_usage(&self, account_hash: &str) -> Result { + let query = r#" + SELECT + bytes_used, bytes_limit, bytes_soft_limit, + files_count, hard_blocked, + last_warning_at, grace_period_until + FROM usage_storage + WHERE account_hash = ? + "#; + + let row = sqlx::query(query) + .bind(account_hash) + .fetch_optional(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to get storage usage: {}", e)))?; + + match row { + Some(row) => { + Ok(StorageUsageInfo { + bytes_used: row.try_get::("bytes_used").unwrap_or(0) as u64, + bytes_limit: row.try_get::("bytes_limit").unwrap_or(10737418240) as u64, + bytes_soft_limit: row.try_get::("bytes_soft_limit").unwrap_or(8589934592) as u64, + files_count: row.try_get::("files_count").unwrap_or(0) as u32, + hard_blocked: row.try_get("hard_blocked").unwrap_or(false), + last_warning_at: row.try_get("last_warning_at").ok(), + grace_period_until: row.try_get("grace_period_until").ok(), + }) + } + None => { + // Initialize if not exists + self.init_usage_for_account(account_hash).await?; + Ok(StorageUsageInfo { + bytes_used: 0, + bytes_limit: 10737418240, // 10GB + bytes_soft_limit: 8589934592, // 8GB + files_count: 0, + hard_blocked: false, + last_warning_at: None, + grace_period_until: None, + }) + } + } + } + + async fn get_bandwidth_usage( + &self, + account_hash: &str, + start_date: NaiveDate, + end_date: NaiveDate, + ) -> Result { + let query = r#" + SELECT + COALESCE(SUM(upload_bytes), 0) as upload_bytes, + COALESCE(SUM(download_bytes), 0) as download_bytes, + COALESCE(SUM(upload_count), 0) as upload_count, + COALESCE(SUM(download_count), 0) as download_count + FROM usage_bandwidth_daily + WHERE account_hash = ? + AND usage_date BETWEEN ? AND ? + "#; + + let row = sqlx::query(query) + .bind(account_hash) + .bind(start_date) + .bind(end_date) + .fetch_one(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to get bandwidth usage: {}", e)))?; + + let upload_bytes = row.try_get::("upload_bytes").unwrap_or(0) as u64; + let download_bytes = row.try_get::("download_bytes").unwrap_or(0) as u64; + + Ok(BandwidthUsageInfo { + upload_bytes, + download_bytes, + upload_count: row.try_get::("upload_count").unwrap_or(0) as u32, + download_count: row.try_get::("download_count").unwrap_or(0) as u32, + total_bytes: upload_bytes + download_bytes, + }) + } + + async fn is_account_blocked(&self, account_hash: &str) -> Result { + let query = r#" + SELECT hard_blocked + FROM usage_storage + WHERE account_hash = ? + "#; + + let row = sqlx::query(query) + .bind(account_hash) + .fetch_optional(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to check block status: {}", e)))?; + + Ok(row.map(|r| r.try_get("hard_blocked").unwrap_or(false)).unwrap_or(false)) + } + + async fn set_account_blocked(&self, account_hash: &str, blocked: bool) -> Result<()> { + let query = r#" + UPDATE usage_storage + SET hard_blocked = ?, + updated_at = NOW() + WHERE account_hash = ? + "#; + + sqlx::query(query) + .bind(blocked) + .bind(account_hash) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to set block status: {}", e)))?; + + if blocked { + warn!("Account {} has been blocked due to quota exceeded", account_hash); + } else { + info!("Account {} has been unblocked", account_hash); + } + + Ok(()) + } + + async fn update_last_warning(&self, account_hash: &str) -> Result<()> { + let query = r#" + UPDATE usage_storage + SET last_warning_at = NOW(), + updated_at = NOW() + WHERE account_hash = ? + "#; + + sqlx::query(query) + .bind(account_hash) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to update warning timestamp: {}", e)))?; + + Ok(()) + } + + async fn get_account_limits(&self, account_hash: &str) -> Result { + // Check for overrides first + let query = r#" + SELECT + o.storage_bytes_limit, + o.storage_bytes_soft_limit, + o.bandwidth_monthly_limit, + o.bandwidth_monthly_soft_limit, + s.bytes_limit as default_storage_limit, + s.bytes_soft_limit as default_storage_soft_limit, + m.bandwidth_limit as default_bandwidth_limit, + m.bandwidth_soft_limit as default_bandwidth_soft_limit + FROM usage_storage s + LEFT JOIN account_plan_overrides o ON s.account_hash = o.account_hash + AND (o.effective_until IS NULL OR o.effective_until > NOW()) + AND o.effective_from <= NOW() + LEFT JOIN usage_bandwidth_monthly m ON s.account_hash = m.account_hash + AND m.usage_month = DATE_FORMAT(CURDATE(), '%Y-%m') + WHERE s.account_hash = ? + "#; + + let row = sqlx::query(query) + .bind(account_hash) + .fetch_optional(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("Failed to get account limits: {}", e)))?; + + match row { + Some(row) => { + let has_overrides = row.try_get::, _>("storage_bytes_limit").unwrap_or(None).is_some(); + + Ok(AccountLimits { + storage_bytes_limit: row.try_get::, _>("storage_bytes_limit") + .unwrap_or(None) + .or_else(|| row.try_get::, _>("default_storage_limit").unwrap_or(None)) + .unwrap_or(10737418240) as u64, + storage_bytes_soft_limit: row.try_get::, _>("storage_bytes_soft_limit") + .unwrap_or(None) + .or_else(|| row.try_get::, _>("default_storage_soft_limit").unwrap_or(None)) + .unwrap_or(8589934592) as u64, + bandwidth_monthly_limit: row.try_get::, _>("bandwidth_monthly_limit") + .unwrap_or(None) + .or_else(|| row.try_get::, _>("default_bandwidth_limit").unwrap_or(None)) + .unwrap_or(107374182400) as u64, + bandwidth_monthly_soft_limit: row.try_get::, _>("bandwidth_monthly_soft_limit") + .unwrap_or(None) + .or_else(|| row.try_get::, _>("default_bandwidth_soft_limit").unwrap_or(None)) + .unwrap_or(85899345920) as u64, + has_overrides, + }) + } + None => { + // Default limits + Ok(AccountLimits { + storage_bytes_limit: 10737418240, // 10GB + storage_bytes_soft_limit: 8589934592, // 8GB + bandwidth_monthly_limit: 107374182400, // 100GB + bandwidth_monthly_soft_limit: 85899345920, // 80GB + has_overrides: false, + }) + } + } + } +} + + + From cae291aeaf80f013407bae8e29b1d0017232aba2 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 23 Sep 2025 13:45:50 -0600 Subject: [PATCH 32/71] Merge 443/50051 --- src/auth/oauth.rs | 9 +- src/handlers/api.rs | 3 +- src/handlers/file/delete.rs | 11 +- src/handlers/file/download.rs | 29 ++-- src/handlers/file/upload.rs | 27 ++-- src/handlers/usage_handler.rs | 85 ++++++----- src/server/app_state.rs | 8 +- src/services/mod.rs | 2 +- src/services/usage_service.rs | 249 ++++++++++++++++++------------- src/storage/mysql_usage.rs | 266 ++++++++++++++++++++-------------- 10 files changed, 400 insertions(+), 289 deletions(-) diff --git a/src/auth/oauth.rs b/src/auth/oauth.rs index 149f005..8e5fe33 100644 --- a/src/auth/oauth.rs +++ b/src/auth/oauth.rs @@ -105,12 +105,11 @@ impl OAuthService { pub fn new(storage: Arc) -> Self { // OAuth client configuration // client_id should match the registered client in OAuth provider - let client_id = std::env::var("OAUTH_CLIENT_ID") - .unwrap_or_else(|_| "cosmic_sync_client".to_string()); + let client_id = + std::env::var("OAUTH_CLIENT_ID").unwrap_or_else(|_| "cosmic_sync_client".to_string()); // For public clients (no client_secret required for PKCE) - let client_secret = std::env::var("OAUTH_CLIENT_SECRET") - .unwrap_or_else(|_| "".to_string()); + let client_secret = std::env::var("OAUTH_CLIENT_SECRET").unwrap_or_else(|_| "".to_string()); // Redirect URI must exactly match the registered value let redirect_uri = std::env::var("OAUTH_REDIRECT_URI") @@ -456,7 +455,7 @@ impl OAuthService { ("redirect_uri", self.redirect_uri.clone()), ("grant_type", "authorization_code".to_string()), ]; - + // Only include client_secret if it's not empty (for public clients) if !self.client_secret.is_empty() { params.push(("client_secret", self.client_secret.clone())); diff --git a/src/handlers/api.rs b/src/handlers/api.rs index ef01cf5..4d58682 100644 --- a/src/handlers/api.rs +++ b/src/handlers/api.rs @@ -5,7 +5,8 @@ use serde_json::json; /// Get API information pub async fn api_info() -> Result { - let auth_http_status_url = std::env::var("AUTH_HTTP_STATUS_URL").unwrap_or_else(|_| "".to_string()); + let auth_http_status_url = + std::env::var("AUTH_HTTP_STATUS_URL").unwrap_or_else(|_| "".to_string()); Ok(HttpResponse::Ok().json(json!({ "name": "Cosmic Sync Server", "version": env!("CARGO_PKG_VERSION"), diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 712e9d6..4a17b78 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -2,9 +2,9 @@ use tonic::{Response, Status}; use tracing::{debug, error, info, warn}; use super::super::file_handler::FileHandler; +use crate::services::usage_service::{OperationResult, UsageOperation}; use crate::sync::{DeleteFileRequest, DeleteFileResponse}; use crate::utils::response; -use crate::services::usage_service::{UsageOperation, OperationResult}; pub async fn handle_delete_file( handler: &FileHandler, @@ -48,9 +48,12 @@ pub async fn handle_delete_file( } }; - debug!("Executing file deletion: file_id={}, size={}", file_id, file_size); + debug!( + "Executing file deletion: file_id={}, size={}", + file_id, file_size + ); let delete_result = handler.app_state.file.delete_file(file_id).await; - + // Record storage decrease after deletion if delete_result.is_ok() && file_size > 0 { if let Err(e) = handler @@ -69,7 +72,7 @@ pub async fn handle_delete_file( error!("Failed to record storage decrease after deletion: {}", e); } } - + match delete_result { Ok(_) => { info!( diff --git a/src/handlers/file/download.rs b/src/handlers/file/download.rs index 5245e4b..177d9dc 100644 --- a/src/handlers/file/download.rs +++ b/src/handlers/file/download.rs @@ -2,9 +2,9 @@ use tonic::{Response, Status}; use tracing::{debug, error, info, warn}; use super::super::file_handler::FileHandler; +use crate::services::usage_service::{OperationResult, UsageOperation}; use crate::sync::{DownloadFileRequest, DownloadFileResponse}; use crate::utils::response; -use crate::services::usage_service::{UsageOperation, OperationResult}; use base64::Engine as _; fn parse_account_key(s: &str) -> Option<[u8; 32]> { @@ -80,7 +80,7 @@ pub async fn handle_download_file( // Check bandwidth quota before download let event_id = nanoid::nanoid!(16); let file_size = file_info.size as u64; - + let usage_check = handler .app_state .usage_checker @@ -95,20 +95,25 @@ pub async fn handle_download_file( }, ) .await; - + match usage_check { Ok(check_result) => { if !check_result.allowed { error!( "Download blocked due to bandwidth quota: {}", - check_result.reason.as_ref().unwrap_or(&"Unknown reason".to_string()) + check_result + .reason + .as_ref() + .unwrap_or(&"Unknown reason".to_string()) ); - return Ok(Response::new(response::file_download_error( - &format!("Bandwidth quota exceeded: {}", - check_result.reason.unwrap_or_else(|| "Monthly bandwidth limit reached".to_string())) - ))); + return Ok(Response::new(response::file_download_error(&format!( + "Bandwidth quota exceeded: {}", + check_result + .reason + .unwrap_or_else(|| "Monthly bandwidth limit reached".to_string()) + )))); } - + // Log warnings if any for warning in &check_result.warnings { warn!("Bandwidth warning for {}: {}", server_account_hash, warning); @@ -122,13 +127,13 @@ pub async fn handle_download_file( // Get file data let download_result = handler.app_state.file.get_file_data(file_id).await; - + // Record download usage after attempt let operation_result = match &download_result { Ok(Some(_)) => OperationResult::Success, _ => OperationResult::Failed, }; - + if let Err(e) = handler .app_state .usage_checker @@ -148,7 +153,7 @@ pub async fn handle_download_file( // Log error but don't fail the download error!("Failed to record download usage: {}", e); } - + match download_result { Ok(Some(data)) => { let (enc_path, enc_name) = if let Some(key) = account_key.as_ref() { diff --git a/src/handlers/file/upload.rs b/src/handlers/file/upload.rs index 02e3580..eb957f5 100644 --- a/src/handlers/file/upload.rs +++ b/src/handlers/file/upload.rs @@ -1,9 +1,9 @@ use tonic::{Response, Status}; use tracing::{debug, error, warn}; +use crate::services::usage_service::{OperationResult, UsageOperation}; use crate::sync::{UploadFileRequest, UploadFileResponse}; use crate::utils::response; -use crate::services::usage_service::{UsageOperation, OperationResult}; // use crate::services::file_service::FileService; // not used directly use super::super::file_handler::FileHandler; @@ -77,20 +77,25 @@ pub async fn handle_upload_file( }, ) .await; - + match usage_check { Ok(check_result) => { if !check_result.allowed { error!( "Upload blocked due to quota: {}", - check_result.reason.as_ref().unwrap_or(&"Unknown reason".to_string()) + check_result + .reason + .as_ref() + .unwrap_or(&"Unknown reason".to_string()) ); - return Ok(Response::new(response::file_upload_error( - &format!("Storage quota exceeded: {}", - check_result.reason.unwrap_or_else(|| "Storage limit reached".to_string())) - ))); + return Ok(Response::new(response::file_upload_error(&format!( + "Storage quota exceeded: {}", + check_result + .reason + .unwrap_or_else(|| "Storage limit reached".to_string()) + )))); } - + // Log warnings if any for warning in &check_result.warnings { warn!("Usage warning for {}: {}", server_account_hash, warning); @@ -153,13 +158,13 @@ pub async fn handle_upload_file( .file .store_file(&file_info, &req.file_data) .await; - + // 8.1. Record usage after operation let operation_result = match &store_result { Ok(_) => OperationResult::Success, Err(_) => OperationResult::Failed, }; - + if let Err(e) = handler .app_state .usage_checker @@ -178,7 +183,7 @@ pub async fn handle_upload_file( // Log error but don't fail the upload error!("Failed to record usage after upload: {}", e); } - + match store_result { Ok(_) => { // Publish cross-instance file upload event (masking path and name) diff --git a/src/handlers/usage_handler.rs b/src/handlers/usage_handler.rs index 9646697..5c8a609 100644 --- a/src/handlers/usage_handler.rs +++ b/src/handlers/usage_handler.rs @@ -110,15 +110,15 @@ fn format_bytes(bytes: u64) -> String { if bytes == 0 { return "0 B".to_string(); } - + let mut size = bytes as f64; let mut unit_idx = 0; - + while size >= 1024.0 && unit_idx < UNITS.len() - 1 { size /= 1024.0; unit_idx += 1; } - + if unit_idx == 0 { format!("{} {}", bytes, UNITS[unit_idx]) } else { @@ -154,7 +154,7 @@ pub async fn get_usage_stats( })); } }; - + let verified = match app_state.oauth.verify_token(&token).await { Ok(v) if v.valid => v, _ => { @@ -163,9 +163,9 @@ pub async fn get_usage_stats( })); } }; - + let account_hash = verified.account_hash; - + // Get usage stats let stats = match app_state.usage_checker.get_usage_stats(&account_hash).await { Ok(s) => s, @@ -176,7 +176,7 @@ pub async fn get_usage_stats( })); } }; - + // Convert to response format let response = UsageStatsResponse { account_hash: account_hash.clone(), @@ -232,7 +232,7 @@ pub async fn get_usage_stats( }, warnings: stats.warnings, }; - + HttpResponse::Ok().json(response) } @@ -251,7 +251,7 @@ pub async fn get_bandwidth_history( })); } }; - + let verified = match app_state.oauth.verify_token(&token).await { Ok(v) if v.valid => v, _ => { @@ -260,16 +260,16 @@ pub async fn get_bandwidth_history( })); } }; - + let account_hash = verified.account_hash; - + // Parse dates or use defaults let end_date = query .end_date .as_ref() .and_then(|s| NaiveDate::parse_from_str(s, "%Y-%m-%d").ok()) .unwrap_or_else(|| Utc::now().date_naive()); - + let start_date = query .start_date .as_ref() @@ -278,9 +278,13 @@ pub async fn get_bandwidth_history( // Default to last 30 days end_date - chrono::Duration::days(29) }); - - let period = query.period.as_ref().map(|s| s.clone()).unwrap_or_else(|| "daily".to_string()); - + + let period = query + .period + .as_ref() + .map(|s| s.clone()) + .unwrap_or_else(|| "daily".to_string()); + // Get bandwidth data from storage let mysql_storage = match app_state .storage @@ -295,16 +299,16 @@ pub async fn get_bandwidth_history( })); } }; - + use crate::storage::mysql_usage::MySqlUsageExt; - + // Collect daily data let mut data = Vec::new(); let mut total_upload = 0u64; let mut total_download = 0u64; let mut total_upload_count = 0u32; let mut total_download_count = 0u32; - + let mut current_date = start_date; while current_date <= end_date { let usage = match mysql_storage @@ -319,7 +323,7 @@ pub async fn get_bandwidth_history( })); } }; - + if period == "daily" || usage.total_bytes > 0 { data.push(BandwidthHistoryEntry { date: current_date.format("%Y-%m-%d").to_string(), @@ -330,42 +334,45 @@ pub async fn get_bandwidth_history( download_count: usage.download_count, }); } - + total_upload += usage.upload_bytes; total_download += usage.download_bytes; total_upload_count += usage.upload_count; total_download_count += usage.download_count; - + current_date += chrono::Duration::days(1); } - + // Aggregate to monthly if requested if period == "monthly" { - let mut monthly_data: std::collections::HashMap = + let mut monthly_data: std::collections::HashMap = std::collections::HashMap::new(); - + for entry in data { let month = entry.date[..7].to_string(); // YYYY-MM - let monthly_entry = monthly_data.entry(month.clone()).or_insert(BandwidthHistoryEntry { - date: month, - upload_bytes: 0, - download_bytes: 0, - total_bytes: 0, - upload_count: 0, - download_count: 0, - }); - + let monthly_entry = + monthly_data + .entry(month.clone()) + .or_insert(BandwidthHistoryEntry { + date: month, + upload_bytes: 0, + download_bytes: 0, + total_bytes: 0, + upload_count: 0, + download_count: 0, + }); + monthly_entry.upload_bytes += entry.upload_bytes; monthly_entry.download_bytes += entry.download_bytes; monthly_entry.total_bytes += entry.total_bytes; monthly_entry.upload_count += entry.upload_count; monthly_entry.download_count += entry.download_count; } - + data = monthly_data.into_values().collect(); data.sort_by(|a, b| a.date.cmp(&b.date)); } - + let response = BandwidthHistoryResponse { account_hash, period, @@ -380,21 +387,19 @@ pub async fn get_bandwidth_history( download_count: total_download_count, }, }; - + HttpResponse::Ok().json(response) } /// Health check for usage service -pub async fn usage_health_check( - app_state: web::Data>, -) -> impl Responder { +pub async fn usage_health_check(app_state: web::Data>) -> impl Responder { // Check if MySQL storage is available let mysql_available = app_state .storage .as_any() .downcast_ref::() .is_some(); - + HttpResponse::Ok().json(serde_json::json!({ "service": "usage", "status": if mysql_available { "healthy" } else { "degraded" }, diff --git a/src/server/app_state.rs b/src/server/app_state.rs index 6f8baeb..646e545 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -7,7 +7,7 @@ use crate::server::notification_manager::NotificationManager; use crate::services::device_service::DeviceService; use crate::services::encryption_service::EncryptionService; use crate::services::file_service::FileService; -use crate::services::usage_service::{UsageService, UsageChecker, UsageConfig}; +use crate::services::usage_service::{UsageChecker, UsageConfig, UsageService}; use crate::services::version_service::{VersionService, VersionServiceImpl}; use crate::storage::mysql::MySqlStorage; use crate::storage::mysql_watcher::MySqlWatcherExt; @@ -266,7 +266,8 @@ impl AppState { block_on_hard_limit: true, warn_on_soft_limit: true, }; - let usage_checker: Arc = Arc::new(UsageService::new(storage.clone(), usage_config)); + let usage_checker: Arc = + Arc::new(UsageService::new(storage.clone(), usage_config)); Ok(( storage, @@ -476,7 +477,8 @@ impl AppState { block_on_hard_limit: true, warn_on_soft_limit: true, }; - let usage_checker: Arc = Arc::new(UsageService::new(storage.clone(), usage_config)); + let usage_checker: Arc = + Arc::new(UsageService::new(storage.clone(), usage_config)); Ok(Self { config: full_config.clone(), diff --git a/src/services/mod.rs b/src/services/mod.rs index d839c84..dc9ed44 100755 --- a/src/services/mod.rs +++ b/src/services/mod.rs @@ -11,7 +11,7 @@ pub use auth_service::AuthService; pub use device_service::DeviceService; pub use encryption_service::EncryptionService; pub use file_service::FileService; -pub use usage_service::{UsageService, UsageChecker, UsageConfig}; +pub use usage_service::{UsageChecker, UsageConfig, UsageService}; pub use version_service::{VersionService, VersionServiceImpl}; use async_trait::async_trait; diff --git a/src/services/usage_service.rs b/src/services/usage_service.rs index 9683e18..04e03a3 100644 --- a/src/services/usage_service.rs +++ b/src/services/usage_service.rs @@ -1,11 +1,13 @@ use async_trait::async_trait; -use chrono::{DateTime, Datelike, Utc, NaiveDate}; +use chrono::{DateTime, Datelike, NaiveDate, Utc}; use std::sync::Arc; use tracing::{debug, error, info, warn}; use crate::error::{AppError, SyncError}; +use crate::storage::mysql_usage::{ + AccountLimits, BandwidthUsageInfo, MySqlUsageExt, StorageUsageInfo, +}; use crate::storage::{Storage, StorageError}; -use crate::storage::mysql_usage::{MySqlUsageExt, StorageUsageInfo, BandwidthUsageInfo, AccountLimits}; /// Usage operation types #[derive(Debug, Clone)] @@ -109,14 +111,14 @@ pub trait UsageChecker: Send + Sync { account_hash: &str, operation: UsageOperation, ) -> Result; - + async fn record_after_operation( &self, account_hash: &str, operation: UsageOperation, result: OperationResult, ) -> Result<(), AppError>; - + async fn get_usage_stats(&self, account_hash: &str) -> Result; } @@ -144,10 +146,10 @@ impl Default for UsageConfig { fn default() -> Self { Self { enabled: true, - storage_limit_bytes: 10_737_418_240, // 10GB - storage_soft_limit_ratio: 0.8, // 80% - bandwidth_limit_monthly_bytes: 107_374_182_400, // 100GB - bandwidth_soft_limit_ratio: 0.8, // 80% + storage_limit_bytes: 10_737_418_240, // 10GB + storage_soft_limit_ratio: 0.8, // 80% + bandwidth_limit_monthly_bytes: 107_374_182_400, // 100GB + bandwidth_soft_limit_ratio: 0.8, // 80% grace_period_hours: 24, check_upload: true, check_download: true, @@ -161,7 +163,7 @@ impl UsageService { pub fn new(storage: Arc, config: UsageConfig) -> Self { Self { storage, config } } - + /// Check if operation should be allowed based on current usage async fn check_storage_limit( &self, @@ -171,28 +173,29 @@ impl UsageService { if !self.config.enabled { return Ok((true, None, vec![])); } - + // Get current usage - let usage = self.storage + let usage = self + .storage .as_any() .downcast_ref::() .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))? .get_storage_usage(account_hash) .await .map_err(|e| AppError::Storage(format!("Failed to get storage usage: {}", e)))?; - + // Check if already blocked if usage.hard_blocked && self.config.block_on_hard_limit { return Ok(( false, Some("Account is blocked due to exceeded storage quota".to_string()), - vec![] + vec![], )); } - + let mut warnings = Vec::new(); let projected_usage = usage.bytes_used + additional_bytes; - + // Check hard limit if projected_usage > usage.bytes_limit { if self.config.block_on_hard_limit { @@ -205,30 +208,33 @@ impl UsageService { warnings )); } else { - warnings.push(format!("Storage hard limit exceeded: {}%", - (projected_usage as f64 / usage.bytes_limit as f64 * 100.0) as u32)); + warnings.push(format!( + "Storage hard limit exceeded: {}%", + (projected_usage as f64 / usage.bytes_limit as f64 * 100.0) as u32 + )); } } - + // Check soft limit if projected_usage > usage.bytes_soft_limit && self.config.warn_on_soft_limit { warnings.push(format!( "Storage usage warning: {}% of limit used", (projected_usage as f64 / usage.bytes_limit as f64 * 100.0) as u32 )); - + // Update warning timestamp - let _ = self.storage + let _ = self + .storage .as_any() .downcast_ref::() .unwrap() .update_last_warning(account_hash) .await; } - + Ok((true, None, warnings)) } - + /// Check bandwidth limits async fn check_bandwidth_limit( &self, @@ -239,12 +245,13 @@ impl UsageService { if !self.config.enabled { return Ok((true, None, vec![])); } - - let mysql_storage = self.storage + + let mysql_storage = self + .storage .as_any() .downcast_ref::() .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; - + // Get current month bandwidth let now = Utc::now(); let start_of_month = NaiveDate::from_ymd_opt(now.year(), now.month(), 1).unwrap(); @@ -253,20 +260,20 @@ impl UsageService { } else { NaiveDate::from_ymd_opt(now.year(), now.month() + 1, 1).unwrap() } - chrono::Duration::days(1); - + let bandwidth = mysql_storage .get_bandwidth_usage(account_hash, start_of_month, end_of_month) .await .map_err(|e| AppError::Storage(format!("Failed to get bandwidth usage: {}", e)))?; - + let limits = mysql_storage .get_account_limits(account_hash) .await .map_err(|e| AppError::Storage(format!("Failed to get account limits: {}", e)))?; - + let mut warnings = Vec::new(); let projected_total = bandwidth.total_bytes + additional_bytes; - + // Check monthly bandwidth limit if projected_total > limits.bandwidth_monthly_limit { if self.config.block_on_hard_limit { @@ -276,14 +283,16 @@ impl UsageService { "Monthly bandwidth limit exceeded. Current: {} bytes, Limit: {} bytes", bandwidth.total_bytes, limits.bandwidth_monthly_limit )), - warnings + warnings, )); } else { - warnings.push(format!("Bandwidth hard limit exceeded: {}%", - (projected_total as f64 / limits.bandwidth_monthly_limit as f64 * 100.0) as u32)); + warnings.push(format!( + "Bandwidth hard limit exceeded: {}%", + (projected_total as f64 / limits.bandwidth_monthly_limit as f64 * 100.0) as u32 + )); } } - + // Check soft limit if projected_total > limits.bandwidth_monthly_soft_limit && self.config.warn_on_soft_limit { warnings.push(format!( @@ -291,10 +300,10 @@ impl UsageService { (projected_total as f64 / limits.bandwidth_monthly_limit as f64 * 100.0) as u32 )); } - + Ok((true, None, warnings)) } - + /// Record storage change after operation async fn update_storage_usage( &self, @@ -302,28 +311,29 @@ impl UsageService { bytes_delta: i64, files_delta: i32, ) -> Result<(), AppError> { - let mysql_storage = self.storage + let mysql_storage = self + .storage .as_any() .downcast_ref::() .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; - + let (success, current, limit) = mysql_storage .try_increase_storage(account_hash, bytes_delta, files_delta) .await .map_err(|e| AppError::Storage(format!("Failed to update storage usage: {}", e)))?; - + if !success && bytes_delta > 0 { warn!( "Failed to increase storage for {}: current={}, limit={}, requested={}", account_hash, current, limit, bytes_delta ); - + // Set blocked status if exceeded if self.config.block_on_hard_limit { let _ = mysql_storage.set_account_blocked(account_hash, true).await; } } - + Ok(()) } } @@ -343,65 +353,74 @@ impl UsageChecker for UsageService { warnings: vec![], }); } - + let mut all_warnings = Vec::new(); let mut blocked = false; let mut block_reason = None; - + match &operation { - UsageOperation::Upload { bytes, event_id, .. } => { + UsageOperation::Upload { + bytes, event_id, .. + } => { if self.config.check_upload { // Check storage limit - let (allowed, reason, warnings) = self.check_storage_limit(account_hash, *bytes).await?; + let (allowed, reason, warnings) = + self.check_storage_limit(account_hash, *bytes).await?; all_warnings.extend(warnings); - + if !allowed { blocked = true; block_reason = reason; } - + // Check bandwidth limit if !blocked { - let (allowed, reason, warnings) = self.check_bandwidth_limit(account_hash, *bytes, true).await?; + let (allowed, reason, warnings) = self + .check_bandwidth_limit(account_hash, *bytes, true) + .await?; all_warnings.extend(warnings); - + if !allowed { blocked = true; block_reason = reason; } } - + debug!( "Upload check for {}: event_id={}, bytes={}, allowed={}", account_hash, event_id, bytes, !blocked ); } - }, - UsageOperation::Download { bytes, event_id, .. } => { + } + UsageOperation::Download { + bytes, event_id, .. + } => { if self.config.check_download { // Check bandwidth limit only for downloads - let (allowed, reason, warnings) = self.check_bandwidth_limit(account_hash, *bytes, false).await?; + let (allowed, reason, warnings) = self + .check_bandwidth_limit(account_hash, *bytes, false) + .await?; all_warnings.extend(warnings); - + if !allowed { blocked = true; block_reason = reason; } - + debug!( "Download check for {}: event_id={}, bytes={}, allowed={}", account_hash, event_id, bytes, !blocked ); } - }, + } UsageOperation::Delete { .. } => { // Deletes are always allowed debug!("Delete operation for {} - always allowed", account_hash); } } - + let usage_info = self.get_current_usage_info(account_hash).await?; - + Ok(CheckResult { allowed: !blocked, reason: block_reason, @@ -409,7 +428,7 @@ impl UsageChecker for UsageService { warnings: all_warnings, }) } - + async fn record_after_operation( &self, account_hash: &str, @@ -419,21 +438,27 @@ impl UsageChecker for UsageService { if !self.config.enabled { return Ok(()); } - - let mysql_storage = self.storage + + let mysql_storage = self + .storage .as_any() .downcast_ref::() .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; - + match operation { - UsageOperation::Upload { bytes, file_id, revision, event_id } => { + UsageOperation::Upload { + bytes, + file_id, + revision, + event_id, + } => { // Record transfer event let status = match result { OperationResult::Success => "success", OperationResult::Failed => "failed", OperationResult::Partial => "partial", }; - + mysql_storage .record_transfer_event( &event_id, @@ -446,26 +471,35 @@ impl UsageChecker for UsageService { status, ) .await - .map_err(|e| AppError::Storage(format!("Failed to record upload event: {}", e)))?; - + .map_err(|e| { + AppError::Storage(format!("Failed to record upload event: {}", e)) + })?; + // Update storage usage if successful if result == OperationResult::Success { - self.update_storage_usage(account_hash, bytes as i64, 1).await?; + self.update_storage_usage(account_hash, bytes as i64, 1) + .await?; } - + info!( "Recorded upload: account={}, file_id={}, bytes={}, status={}", account_hash, file_id, bytes, status ); - }, - UsageOperation::Download { bytes, file_id, revision, device_hash, event_id } => { + } + UsageOperation::Download { + bytes, + file_id, + revision, + device_hash, + event_id, + } => { // Record transfer event let status = match result { OperationResult::Success => "success", OperationResult::Failed => "failed", OperationResult::Partial => "partial", }; - + mysql_storage .record_transfer_event( &event_id, @@ -478,18 +512,21 @@ impl UsageChecker for UsageService { status, ) .await - .map_err(|e| AppError::Storage(format!("Failed to record download event: {}", e)))?; - + .map_err(|e| { + AppError::Storage(format!("Failed to record download event: {}", e)) + })?; + info!( "Recorded download: account={}, file_id={}, device={}, bytes={}, status={}", account_hash, file_id, device_hash, bytes, status ); - }, + } UsageOperation::Delete { bytes, file_id } => { // Update storage usage (decrease) if result == OperationResult::Success { - self.update_storage_usage(account_hash, -(bytes as i64), -1).await?; - + self.update_storage_usage(account_hash, -(bytes as i64), -1) + .await?; + info!( "Recorded delete: account={}, file_id={}, bytes={}", account_hash, file_id, bytes @@ -497,58 +534,67 @@ impl UsageChecker for UsageService { } } } - + Ok(()) } - + async fn get_usage_stats(&self, account_hash: &str) -> Result { - let mysql_storage = self.storage + let mysql_storage = self + .storage .as_any() .downcast_ref::() .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; - + // Get storage usage let storage_info = mysql_storage .get_storage_usage(account_hash) .await .map_err(|e| AppError::Storage(format!("Failed to get storage usage: {}", e)))?; - + // Get bandwidth usage (daily and monthly) let today = Utc::now().date_naive(); let daily_bandwidth = mysql_storage .get_bandwidth_usage(account_hash, today, today) .await .map_err(|e| AppError::Storage(format!("Failed to get daily bandwidth: {}", e)))?; - + let start_of_month = NaiveDate::from_ymd_opt(today.year(), today.month(), 1).unwrap(); let monthly_bandwidth = mysql_storage .get_bandwidth_usage(account_hash, start_of_month, today) .await .map_err(|e| AppError::Storage(format!("Failed to get monthly bandwidth: {}", e)))?; - + // Get limits let limits = mysql_storage .get_account_limits(account_hash) .await .map_err(|e| AppError::Storage(format!("Failed to get account limits: {}", e)))?; - + // Calculate percentages and warnings - let storage_percentage = (storage_info.bytes_used as f64 / storage_info.bytes_limit as f64) * 100.0; - let bandwidth_percentage = (monthly_bandwidth.total_bytes as f64 / limits.bandwidth_monthly_limit as f64) * 100.0; - + let storage_percentage = + (storage_info.bytes_used as f64 / storage_info.bytes_limit as f64) * 100.0; + let bandwidth_percentage = + (monthly_bandwidth.total_bytes as f64 / limits.bandwidth_monthly_limit as f64) * 100.0; + let mut warnings = Vec::new(); let mut soft_limit_exceeded = false; - + if storage_info.bytes_used > storage_info.bytes_soft_limit { soft_limit_exceeded = true; - warnings.push(format!("Storage usage at {:.1}% of limit", storage_percentage)); + warnings.push(format!( + "Storage usage at {:.1}% of limit", + storage_percentage + )); } - + if monthly_bandwidth.total_bytes > limits.bandwidth_monthly_soft_limit { soft_limit_exceeded = true; - warnings.push(format!("Monthly bandwidth at {:.1}% of limit", bandwidth_percentage)); + warnings.push(format!( + "Monthly bandwidth at {:.1}% of limit", + bandwidth_percentage + )); } - + Ok(UsageStats { storage: StorageStats { bytes_used: storage_info.bytes_used, @@ -562,9 +608,11 @@ impl UsageChecker for UsageService { upload_bytes: daily_bandwidth.upload_bytes, download_bytes: daily_bandwidth.download_bytes, total_bytes: daily_bandwidth.total_bytes, - limit: limits.bandwidth_monthly_limit / 30, // Daily approximation + limit: limits.bandwidth_monthly_limit / 30, // Daily approximation soft_limit: limits.bandwidth_monthly_soft_limit / 30, - percentage_used: (daily_bandwidth.total_bytes as f64 / (limits.bandwidth_monthly_limit as f64 / 30.0)) * 100.0, + percentage_used: (daily_bandwidth.total_bytes as f64 + / (limits.bandwidth_monthly_limit as f64 / 30.0)) + * 100.0, }, monthly: BandwidthPeriod { upload_bytes: monthly_bandwidth.upload_bytes, @@ -588,19 +636,20 @@ impl UsageChecker for UsageService { impl UsageService { async fn get_current_usage_info(&self, account_hash: &str) -> Result { - let mysql_storage = self.storage + let mysql_storage = self + .storage .as_any() .downcast_ref::() .ok_or_else(|| AppError::Internal("Storage is not MySQL".to_string()))?; - + let storage_info = mysql_storage .get_storage_usage(account_hash) .await .map_err(|e| AppError::Storage(format!("Failed to get storage usage: {}", e)))?; - + let today = Utc::now().date_naive(); let start_of_month = NaiveDate::from_ymd_opt(today.year(), today.month(), 1).unwrap(); - + let daily_bandwidth = mysql_storage .get_bandwidth_usage(account_hash, today, today) .await @@ -611,7 +660,7 @@ impl UsageService { download_count: 0, total_bytes: 0, }); - + let monthly_bandwidth = mysql_storage .get_bandwidth_usage(account_hash, start_of_month, today) .await @@ -622,12 +671,12 @@ impl UsageService { download_count: 0, total_bytes: 0, }); - + let limits = mysql_storage .get_account_limits(account_hash) .await .map_err(|e| AppError::Storage(format!("Failed to get account limits: {}", e)))?; - + Ok(UsageInfo { storage_used: storage_info.bytes_used, storage_limit: storage_info.bytes_limit, diff --git a/src/storage/mysql_usage.rs b/src/storage/mysql_usage.rs index cb57cc0..d5f5729 100644 --- a/src/storage/mysql_usage.rs +++ b/src/storage/mysql_usage.rs @@ -1,5 +1,5 @@ use async_trait::async_trait; -use chrono::{DateTime, Utc, NaiveDate}; +use chrono::{DateTime, NaiveDate, Utc}; use sqlx::{MySql, Row}; use tracing::{debug, error, info, warn}; @@ -10,7 +10,7 @@ use super::{Result, StorageError}; pub trait MySqlUsageExt: Send + Sync { /// Initialize usage records for an account async fn init_usage_for_account(&self, account_hash: &str) -> Result<()>; - + /// Try to increase storage usage atomically async fn try_increase_storage( &self, @@ -18,7 +18,7 @@ pub trait MySqlUsageExt: Send + Sync { bytes_delta: i64, files_delta: i32, ) -> Result<(bool, u64, u64)>; // (success, current_usage, limit) - + /// Record transfer event async fn record_transfer_event( &self, @@ -31,7 +31,7 @@ pub trait MySqlUsageExt: Send + Sync { bytes: u64, status: &str, ) -> Result<()>; - + /// Update transfer event status async fn update_transfer_status( &self, @@ -39,7 +39,7 @@ pub trait MySqlUsageExt: Send + Sync { status: &str, failure_reason: Option<&str>, ) -> Result<()>; - + /// Update daily bandwidth usage async fn update_bandwidth_daily( &self, @@ -50,10 +50,10 @@ pub trait MySqlUsageExt: Send + Sync { upload_count: i32, download_count: i32, ) -> Result<()>; - + /// Get storage usage info async fn get_storage_usage(&self, account_hash: &str) -> Result; - + /// Get bandwidth usage for date range async fn get_bandwidth_usage( &self, @@ -61,16 +61,16 @@ pub trait MySqlUsageExt: Send + Sync { start_date: NaiveDate, end_date: NaiveDate, ) -> Result; - + /// Check if account is blocked async fn is_account_blocked(&self, account_hash: &str) -> Result; - + /// Set account block status async fn set_account_blocked(&self, account_hash: &str, blocked: bool) -> Result<()>; - + /// Update warning timestamp async fn update_last_warning(&self, account_hash: &str) -> Result<()>; - + /// Get account limits (with overrides) async fn get_account_limits(&self, account_hash: &str) -> Result; } @@ -108,36 +108,38 @@ pub struct AccountLimits { impl MySqlUsageExt for super::MySqlStorage { async fn init_usage_for_account(&self, account_hash: &str) -> Result<()> { debug!("Initializing usage records for account: {}", account_hash); - + // Create usage_storage record if not exists let query = r#" INSERT IGNORE INTO usage_storage (account_hash) VALUES (?) "#; - + sqlx::query(query) .bind(account_hash) .execute(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to init usage_storage: {}", e)))?; - + // Initialize current month bandwidth record let current_month = chrono::Utc::now().format("%Y-%m").to_string(); let query = r#" INSERT IGNORE INTO usage_bandwidth_monthly (account_hash, usage_month) VALUES (?, ?) "#; - + sqlx::query(query) .bind(account_hash) .bind(¤t_month) .execute(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to init bandwidth_monthly: {}", e)))?; - + .map_err(|e| { + StorageError::Database(format!("Failed to init bandwidth_monthly: {}", e)) + })?; + Ok(()) } - + async fn try_increase_storage( &self, account_hash: &str, @@ -148,36 +150,39 @@ impl MySqlUsageExt for super::MySqlStorage { "Trying to update storage for {}: bytes_delta={}, files_delta={}", account_hash, bytes_delta, files_delta ); - + // Use stored procedure for atomic operation - let mut result = sqlx::query( - r#"CALL update_storage_usage(?, ?, ?, @success, @current_usage, @limit)"# + let mut result = + sqlx::query(r#"CALL update_storage_usage(?, ?, ?, @success, @current_usage, @limit)"#) + .bind(account_hash) + .bind(bytes_delta) + .bind(files_delta) + .execute(self.get_sqlx_pool()) + .await + .map_err(|e| { + StorageError::Database(format!("Failed to call update_storage_usage: {}", e)) + })?; + + // Get output parameters + let row = sqlx::query( + r#"SELECT @success as success, @current_usage as current_usage, @limit as `limit`"#, ) - .bind(account_hash) - .bind(bytes_delta) - .bind(files_delta) - .execute(self.get_sqlx_pool()) + .fetch_one(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to call update_storage_usage: {}", e)))?; - - // Get output parameters - let row = sqlx::query(r#"SELECT @success as success, @current_usage as current_usage, @limit as `limit`"#) - .fetch_one(self.get_sqlx_pool()) - .await - .map_err(|e| StorageError::Database(format!("Failed to get procedure output: {}", e)))?; - + .map_err(|e| StorageError::Database(format!("Failed to get procedure output: {}", e)))?; + let success: bool = row.try_get("success").unwrap_or(false); let current_usage: i64 = row.try_get("current_usage").unwrap_or(0); let limit: i64 = row.try_get("limit").unwrap_or(0); - + debug!( "Storage update result: success={}, current={}, limit={}", success, current_usage, limit ); - + Ok((success, current_usage as u64, limit as u64)) } - + async fn record_transfer_event( &self, event_id: &str, @@ -193,7 +198,7 @@ impl MySqlUsageExt for super::MySqlStorage { "Recording transfer event: id={}, type={}, bytes={}, status={}", event_id, transfer_type, bytes, status ); - + let query = r#" INSERT INTO transfer_events ( event_id, account_hash, file_id, revision, @@ -205,7 +210,7 @@ impl MySqlUsageExt for super::MySqlStorage { completed_at = VALUES(completed_at), updated_at = NOW() "#; - + sqlx::query(query) .bind(event_id) .bind(account_hash) @@ -218,8 +223,10 @@ impl MySqlUsageExt for super::MySqlStorage { .bind(status) .execute(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to record transfer event: {}", e)))?; - + .map_err(|e| { + StorageError::Database(format!("Failed to record transfer event: {}", e)) + })?; + // Update bandwidth if status is success if status == "success" { let today = chrono::Utc::now().date_naive(); @@ -233,7 +240,7 @@ impl MySqlUsageExt for super::MySqlStorage { } else { (0, 1) }; - + self.update_bandwidth_daily( account_hash, today, @@ -241,20 +248,24 @@ impl MySqlUsageExt for super::MySqlStorage { download_bytes, upload_count, download_count, - ).await?; + ) + .await?; } - + Ok(()) } - + async fn update_transfer_status( &self, event_id: &str, status: &str, failure_reason: Option<&str>, ) -> Result<()> { - debug!("Updating transfer event status: id={}, status={}", event_id, status); - + debug!( + "Updating transfer event status: id={}, status={}", + event_id, status + ); + let query = r#" UPDATE transfer_events SET status = ?, @@ -262,18 +273,20 @@ impl MySqlUsageExt for super::MySqlStorage { failure_reason = ? WHERE event_id = ? "#; - + sqlx::query(query) .bind(status) .bind(failure_reason) .bind(event_id) .execute(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to update transfer status: {}", e)))?; - + .map_err(|e| { + StorageError::Database(format!("Failed to update transfer status: {}", e)) + })?; + Ok(()) } - + async fn update_bandwidth_daily( &self, account_hash: &str, @@ -287,7 +300,7 @@ impl MySqlUsageExt for super::MySqlStorage { "Updating daily bandwidth for {} on {}: up={}, down={}", account_hash, date, upload_bytes, download_bytes ); - + let query = r#" INSERT INTO usage_bandwidth_daily ( account_hash, usage_date, @@ -301,7 +314,7 @@ impl MySqlUsageExt for super::MySqlStorage { download_count = download_count + VALUES(download_count), updated_at = NOW() "#; - + sqlx::query(query) .bind(account_hash) .bind(date) @@ -311,8 +324,10 @@ impl MySqlUsageExt for super::MySqlStorage { .bind(download_count) .execute(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to update daily bandwidth: {}", e)))?; - + .map_err(|e| { + StorageError::Database(format!("Failed to update daily bandwidth: {}", e)) + })?; + // Also update monthly aggregation let month = date.format("%Y-%m").to_string(); let query = r#" @@ -328,7 +343,7 @@ impl MySqlUsageExt for super::MySqlStorage { download_count = download_count + VALUES(download_count), updated_at = NOW() "#; - + sqlx::query(query) .bind(account_hash) .bind(&month) @@ -338,11 +353,13 @@ impl MySqlUsageExt for super::MySqlStorage { .bind(download_count) .execute(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to update monthly bandwidth: {}", e)))?; - + .map_err(|e| { + StorageError::Database(format!("Failed to update monthly bandwidth: {}", e)) + })?; + Ok(()) } - + async fn get_storage_usage(&self, account_hash: &str) -> Result { let query = r#" SELECT @@ -352,32 +369,32 @@ impl MySqlUsageExt for super::MySqlStorage { FROM usage_storage WHERE account_hash = ? "#; - + let row = sqlx::query(query) .bind(account_hash) .fetch_optional(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to get storage usage: {}", e)))?; - + match row { - Some(row) => { - Ok(StorageUsageInfo { - bytes_used: row.try_get::("bytes_used").unwrap_or(0) as u64, - bytes_limit: row.try_get::("bytes_limit").unwrap_or(10737418240) as u64, - bytes_soft_limit: row.try_get::("bytes_soft_limit").unwrap_or(8589934592) as u64, - files_count: row.try_get::("files_count").unwrap_or(0) as u32, - hard_blocked: row.try_get("hard_blocked").unwrap_or(false), - last_warning_at: row.try_get("last_warning_at").ok(), - grace_period_until: row.try_get("grace_period_until").ok(), - }) - } + Some(row) => Ok(StorageUsageInfo { + bytes_used: row.try_get::("bytes_used").unwrap_or(0) as u64, + bytes_limit: row.try_get::("bytes_limit").unwrap_or(10737418240) as u64, + bytes_soft_limit: row + .try_get::("bytes_soft_limit") + .unwrap_or(8589934592) as u64, + files_count: row.try_get::("files_count").unwrap_or(0) as u32, + hard_blocked: row.try_get("hard_blocked").unwrap_or(false), + last_warning_at: row.try_get("last_warning_at").ok(), + grace_period_until: row.try_get("grace_period_until").ok(), + }), None => { // Initialize if not exists self.init_usage_for_account(account_hash).await?; Ok(StorageUsageInfo { bytes_used: 0, - bytes_limit: 10737418240, // 10GB - bytes_soft_limit: 8589934592, // 8GB + bytes_limit: 10737418240, // 10GB + bytes_soft_limit: 8589934592, // 8GB files_count: 0, hard_blocked: false, last_warning_at: None, @@ -386,7 +403,7 @@ impl MySqlUsageExt for super::MySqlStorage { } } } - + async fn get_bandwidth_usage( &self, account_hash: &str, @@ -403,7 +420,7 @@ impl MySqlUsageExt for super::MySqlStorage { WHERE account_hash = ? AND usage_date BETWEEN ? AND ? "#; - + let row = sqlx::query(query) .bind(account_hash) .bind(start_date) @@ -411,10 +428,10 @@ impl MySqlUsageExt for super::MySqlStorage { .fetch_one(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to get bandwidth usage: {}", e)))?; - + let upload_bytes = row.try_get::("upload_bytes").unwrap_or(0) as u64; let download_bytes = row.try_get::("download_bytes").unwrap_or(0) as u64; - + Ok(BandwidthUsageInfo { upload_bytes, download_bytes, @@ -423,23 +440,25 @@ impl MySqlUsageExt for super::MySqlStorage { total_bytes: upload_bytes + download_bytes, }) } - + async fn is_account_blocked(&self, account_hash: &str) -> Result { let query = r#" SELECT hard_blocked FROM usage_storage WHERE account_hash = ? "#; - + let row = sqlx::query(query) .bind(account_hash) .fetch_optional(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to check block status: {}", e)))?; - - Ok(row.map(|r| r.try_get("hard_blocked").unwrap_or(false)).unwrap_or(false)) + + Ok(row + .map(|r| r.try_get("hard_blocked").unwrap_or(false)) + .unwrap_or(false)) } - + async fn set_account_blocked(&self, account_hash: &str, blocked: bool) -> Result<()> { let query = r#" UPDATE usage_storage @@ -447,23 +466,26 @@ impl MySqlUsageExt for super::MySqlStorage { updated_at = NOW() WHERE account_hash = ? "#; - + sqlx::query(query) .bind(blocked) .bind(account_hash) .execute(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to set block status: {}", e)))?; - + if blocked { - warn!("Account {} has been blocked due to quota exceeded", account_hash); + warn!( + "Account {} has been blocked due to quota exceeded", + account_hash + ); } else { info!("Account {} has been unblocked", account_hash); } - + Ok(()) } - + async fn update_last_warning(&self, account_hash: &str) -> Result<()> { let query = r#" UPDATE usage_storage @@ -471,16 +493,18 @@ impl MySqlUsageExt for super::MySqlStorage { updated_at = NOW() WHERE account_hash = ? "#; - + sqlx::query(query) .bind(account_hash) .execute(self.get_sqlx_pool()) .await - .map_err(|e| StorageError::Database(format!("Failed to update warning timestamp: {}", e)))?; - + .map_err(|e| { + StorageError::Database(format!("Failed to update warning timestamp: {}", e)) + })?; + Ok(()) } - + async fn get_account_limits(&self, account_hash: &str) -> Result { // Check for overrides first let query = r#" @@ -501,50 +525,68 @@ impl MySqlUsageExt for super::MySqlStorage { AND m.usage_month = DATE_FORMAT(CURDATE(), '%Y-%m') WHERE s.account_hash = ? "#; - + let row = sqlx::query(query) .bind(account_hash) .fetch_optional(self.get_sqlx_pool()) .await .map_err(|e| StorageError::Database(format!("Failed to get account limits: {}", e)))?; - + match row { Some(row) => { - let has_overrides = row.try_get::, _>("storage_bytes_limit").unwrap_or(None).is_some(); - + let has_overrides = row + .try_get::, _>("storage_bytes_limit") + .unwrap_or(None) + .is_some(); + Ok(AccountLimits { - storage_bytes_limit: row.try_get::, _>("storage_bytes_limit") + storage_bytes_limit: row + .try_get::, _>("storage_bytes_limit") .unwrap_or(None) - .or_else(|| row.try_get::, _>("default_storage_limit").unwrap_or(None)) + .or_else(|| { + row.try_get::, _>("default_storage_limit") + .unwrap_or(None) + }) .unwrap_or(10737418240) as u64, - storage_bytes_soft_limit: row.try_get::, _>("storage_bytes_soft_limit") + storage_bytes_soft_limit: row + .try_get::, _>("storage_bytes_soft_limit") .unwrap_or(None) - .or_else(|| row.try_get::, _>("default_storage_soft_limit").unwrap_or(None)) + .or_else(|| { + row.try_get::, _>("default_storage_soft_limit") + .unwrap_or(None) + }) .unwrap_or(8589934592) as u64, - bandwidth_monthly_limit: row.try_get::, _>("bandwidth_monthly_limit") + bandwidth_monthly_limit: row + .try_get::, _>("bandwidth_monthly_limit") .unwrap_or(None) - .or_else(|| row.try_get::, _>("default_bandwidth_limit").unwrap_or(None)) - .unwrap_or(107374182400) as u64, - bandwidth_monthly_soft_limit: row.try_get::, _>("bandwidth_monthly_soft_limit") + .or_else(|| { + row.try_get::, _>("default_bandwidth_limit") + .unwrap_or(None) + }) + .unwrap_or(107374182400) + as u64, + bandwidth_monthly_soft_limit: row + .try_get::, _>("bandwidth_monthly_soft_limit") .unwrap_or(None) - .or_else(|| row.try_get::, _>("default_bandwidth_soft_limit").unwrap_or(None)) - .unwrap_or(85899345920) as u64, + .or_else(|| { + row.try_get::, _>("default_bandwidth_soft_limit") + .unwrap_or(None) + }) + .unwrap_or(85899345920) + as u64, has_overrides, }) } None => { // Default limits Ok(AccountLimits { - storage_bytes_limit: 10737418240, // 10GB - storage_bytes_soft_limit: 8589934592, // 8GB - bandwidth_monthly_limit: 107374182400, // 100GB - bandwidth_monthly_soft_limit: 85899345920, // 80GB + storage_bytes_limit: 10737418240, // 10GB + storage_bytes_soft_limit: 8589934592, // 8GB + bandwidth_monthly_limit: 107374182400, // 100GB + bandwidth_monthly_soft_limit: 85899345920, // 80GB has_overrides: false, }) } } } } - - - From e55115da36724fd23802ffd10140693a2e975c8d Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 24 Sep 2025 12:57:46 -0600 Subject: [PATCH 33/71] Merge 443/50051 --- src/server/startup.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/server/startup.rs b/src/server/startup.rs index dc6b2e9..f842e47 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -150,6 +150,7 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R // Performance optimizations .max_concurrent_streams(Some(config.max_concurrent_requests as u32)) .tcp_nodelay(true) + .accept_http1(false) // Add services .add_service(SyncServiceServer::new(sync_service)) .add_service(SyncClientServiceServer::new(sync_client_service)) From 4b8668a0c9b962f4bd1e5f2291aaea95cd9c77d0 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 24 Sep 2025 15:19:04 -0600 Subject: [PATCH 34/71] Merge 443/50051 --- src/handlers/health.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/handlers/health.rs b/src/handlers/health.rs index c52599e..c00d6fa 100644 --- a/src/handlers/health.rs +++ b/src/handlers/health.rs @@ -5,6 +5,7 @@ use actix_web::{web, HttpResponse, Result as ActixResult}; use serde_json::json; use tonic::{Request, Response, Status}; use tracing::info; +use std::sync::Arc; #[tonic::async_trait] impl HealthHandler for SyncServiceImpl { @@ -36,7 +37,7 @@ pub async fn health_check() -> ActixResult { /// HTTP readiness check endpoint pub async fn readiness_check( - app_state: web::Data, + app_state: web::Data>, ) -> ActixResult { // Perform basic dependency checks let storage_ok = app_state.storage.health_check().await.unwrap_or(false); @@ -74,7 +75,7 @@ pub async fn liveness_check() -> ActixResult { /// Detailed health for external debugging pub async fn health_details( - app_state: web::Data, + app_state: web::Data>, ) -> ActixResult { let cfg = crate::server::app_state::AppState::get_config(); From c19311556fa7deedb9fdb40d24e93c12dd44382a Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 24 Sep 2025 15:37:02 -0600 Subject: [PATCH 35/71] Merge 443/50051 --- src/handlers/health.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/handlers/health.rs b/src/handlers/health.rs index c00d6fa..86b05b9 100644 --- a/src/handlers/health.rs +++ b/src/handlers/health.rs @@ -3,9 +3,9 @@ use crate::server::service::SyncServiceImpl; use crate::sync::{HealthCheckRequest, HealthCheckResponse}; use actix_web::{web, HttpResponse, Result as ActixResult}; use serde_json::json; +use std::sync::Arc; use tonic::{Request, Response, Status}; use tracing::info; -use std::sync::Arc; #[tonic::async_trait] impl HealthHandler for SyncServiceImpl { From a035a88cefd16cca2fcc03361a6c186b741a2c48 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 24 Sep 2025 16:04:25 -0600 Subject: [PATCH 36/71] Merge 443/50051 --- src/server/startup.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/server/startup.rs b/src/server/startup.rs index 92f8c46..e97c0e7 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -191,6 +191,7 @@ async fn start_http_server(config: &ServerConfig, app_state: Arc) -> R // Clone app state for the closure let app_state_clone = app_state.clone(); + let app_state_data = web::Data::new(app_state_clone.clone()); // Build HTTP server with middleware HttpServer::new(move || { @@ -199,7 +200,7 @@ async fn start_http_server(config: &ServerConfig, app_state: Arc) -> R App::new() // App data - .app_data(web::Data::new(app_state_clone.clone())) + .app_data(app_state_data.clone()) .app_data(web::Data::new(auth_handler)) // Middleware stack (optimized order) .wrap(middleware::Compress::default()) From 509b93cdf9299076364d0cde00475913b8ed38cc Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 11:03:31 -0600 Subject: [PATCH 37/71] Fix http size issue --- src/server/startup.rs | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/src/server/startup.rs b/src/server/startup.rs index e97c0e7..f83ad8b 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -2,6 +2,7 @@ use std::net::SocketAddr; use std::sync::Arc; use std::time::Duration; use tokio::signal; +use tonic::codec::CompressionEncoding; use tonic::transport::Server; use tracing::{error, info, instrument, warn}; @@ -135,6 +136,19 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R let sync_service = SyncServiceImpl::new(app_state.clone()); let sync_client_service = SyncClientServiceImpl::new(app_state.clone()); + // Wrap services with compression and message size limits + let sync_service = SyncServiceServer::new(sync_service) + .accept_compressed(CompressionEncoding::Gzip) + .send_compressed(CompressionEncoding::Gzip) + .max_decoding_message_size(64 * 1024 * 1024) + .max_encoding_message_size(64 * 1024 * 1024); + + let sync_client_service = SyncClientServiceServer::new(sync_client_service) + .accept_compressed(CompressionEncoding::Gzip) + .send_compressed(CompressionEncoding::Gzip) + .max_decoding_message_size(64 * 1024 * 1024) + .max_encoding_message_size(64 * 1024 * 1024); + let (mut health_reporter, health_service) = health_reporter(); health_reporter .set_service_status("", ServingStatus::Serving) @@ -151,8 +165,8 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R .max_concurrent_streams(Some(config.max_concurrent_requests as u32)) .tcp_nodelay(true) .accept_http1(false) - .add_service(SyncServiceServer::new(sync_service)) - .add_service(SyncClientServiceServer::new(sync_client_service)) + .add_service(sync_service) + .add_service(sync_client_service) .add_service(health_service); // Add reflection service in development From 1dac2127cff66f0c11374b6d465fe8494bee1a79 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 12:31:45 -0600 Subject: [PATCH 38/71] Fix http size issue --- Cargo.lock | 1173 ++++++++++++++++++++--------------------- proto/sync.proto | 33 ++ src/server/service.rs | 121 +++++ 3 files changed, 725 insertions(+), 602 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index c1cd42e..1be69a5 100755 --- a/Cargo.lock +++ b/Cargo.lock @@ -8,7 +8,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f7b0a21988c1bf877cf4759ef5ddaac04c1c9fe808c9142ecb78ba97d97a28a" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "bytes", "futures-core", "futures-sink", @@ -36,9 +36,9 @@ dependencies = [ [[package]] name = "actix-http" -version = "3.10.0" +version = "3.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0fa882656b67966045e4152c634051e70346939fced7117d5f0b52146a7c74c9" +checksum = "44cceded2fb55f3c4b67068fa64962e2ca59614edc5b03167de9ff82ae803da0" dependencies = [ "actix-codec", "actix-rt", @@ -46,7 +46,7 @@ dependencies = [ "actix-tls", "actix-utils", "base64 0.22.1", - "bitflags 2.9.0", + "bitflags 2.9.4", "brotli", "bytes", "bytestring", @@ -55,7 +55,7 @@ dependencies = [ "flate2", "foldhash", "futures-core", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "httparse", "httpdate", @@ -65,7 +65,7 @@ dependencies = [ "mime", "percent-encoding", "pin-project-lite", - "rand 0.9.1", + "rand 0.9.2", "sha1", "smallvec", "tokio", @@ -81,7 +81,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e01ed3140b2f8d422c68afa1ed2e85d996ea619c988ac834d255db32138655cb" dependencies = [ "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -101,9 +101,9 @@ dependencies = [ [[package]] name = "actix-rt" -version = "2.10.0" +version = "2.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "24eda4e2a6e042aa4e55ac438a2ae052d3b5da0ecf83d7411e1a368946925208" +checksum = "92589714878ca59a7626ea19734f0e07a6a875197eec751bb5d3f99e64998c63" dependencies = [ "futures-core", "tokio", @@ -111,9 +111,9 @@ dependencies = [ [[package]] name = "actix-server" -version = "2.5.1" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6398974fd4284f4768af07965701efbbb5fdc0616bff20cade1bb14b77675e24" +checksum = "a65064ea4a457eaf07f2fba30b4c695bf43b721790e9530d26cb6f9019ff7502" dependencies = [ "actix-rt", "actix-service", @@ -121,7 +121,7 @@ dependencies = [ "futures-core", "futures-util", "mio", - "socket2 0.5.9", + "socket2 0.5.10", "tokio", "tracing", ] @@ -167,9 +167,9 @@ dependencies = [ [[package]] name = "actix-web" -version = "4.10.2" +version = "4.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2e3b15b3dc6c6ed996e4032389e9849d4ab002b1e92fbfe85b5f307d1479b4d" +checksum = "a597b77b5c6d6a1e1097fddde329a83665e25c5437c696a3a9a4aa514a614dea" dependencies = [ "actix-codec", "actix-http", @@ -203,7 +203,7 @@ dependencies = [ "serde_json", "serde_urlencoded", "smallvec", - "socket2 0.5.9", + "socket2 0.5.10", "time", "tracing", "url", @@ -218,23 +218,23 @@ dependencies = [ "actix-router", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "addr2line" -version = "0.24.2" +version = "0.25.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "dfbe277e56a376000877090da837660b4427aad530e3028d44e0bffe4f89a1c1" +checksum = "1b5d307320b3181d6d7954e663bd7c774a838b8220fe0593c86d9fb09f498b4b" dependencies = [ "gimli", ] [[package]] name = "adler2" -version = "2.0.0" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "512761e0bb2578dd7380c6baaa0f4ce03e84f95e960231d1dec8bf4d7d6e2627" +checksum = "320119579fcad9c21884f5c4861d16174d0e06250625266f50fe6898340abefa" [[package]] name = "aead" @@ -273,15 +273,15 @@ dependencies = [ [[package]] name = "ahash" -version = "0.8.11" +version = "0.8.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e89da841a80418a9b391ebaea17f5c112ffaaa96f621d2c285b5174da76b9011" +checksum = "5a15f179cd60c4584b8a8c596927aadc462e27f2ca70c04e0071964a73ba7a75" dependencies = [ "cfg-if", - "getrandom 0.2.15", + "getrandom 0.3.3", "once_cell", "version_check", - "zerocopy 0.7.35", + "zerocopy", ] [[package]] @@ -362,12 +362,6 @@ dependencies = [ "url", ] -[[package]] -name = "android-tzdata" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e999941b234f3131b00bc13c22d06e8c5ff726d1b6318ac7eb276997bbb4fef0" - [[package]] name = "android_system_properties" version = "0.1.5" @@ -379,9 +373,9 @@ dependencies = [ [[package]] name = "anyhow" -version = "1.0.98" +version = "1.0.100" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e16d2d3311acee920a9eb8d33b8cbc1787ce4a264e85f964c2404b969bdcd487" +checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "arc-swap" @@ -401,7 +395,7 @@ dependencies = [ "nom", "num-traits", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.16", "time", ] @@ -413,7 +407,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "synstructure", ] @@ -425,7 +419,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -442,9 +436,9 @@ dependencies = [ [[package]] name = "async-executor" -version = "1.13.2" +version = "1.13.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bb812ffb58524bdd10860d7d974e2f01cc0950c2438a74ee5ec2e2280c6c4ffa" +checksum = "497c00e0fd83a72a79a39fcbd8e3e2f055d6f6c7e025f3b3d91f4f8e76527fb8" dependencies = [ "async-task", "concurrent-queue", @@ -462,7 +456,7 @@ checksum = "13f937e26114b93193065fd44f507aa2e9169ad0cdabbb996920b1fe1ddea7ba" dependencies = [ "async-channel", "async-executor", - "async-io 2.5.0", + "async-io 2.6.0", "async-lock 3.4.1", "blocking", "futures-lite 2.6.1", @@ -501,20 +495,20 @@ dependencies = [ [[package]] name = "async-io" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19634d6336019ef220f09fd31168ce5c184b295cbf80345437cc36094ef223ca" +checksum = "456b8a8feb6f42d237746d4b3e9a178494627745c3c56c6ea55d92ba50d026fc" dependencies = [ - "async-lock 3.4.1", + "autocfg", "cfg-if", "concurrent-queue", "futures-io", "futures-lite 2.6.1", "parking", - "polling 3.10.0", - "rustix 1.0.8", + "polling 3.11.0", + "rustix 1.1.2", "slab", - "windows-sys 0.60.2", + "windows-sys 0.61.1", ] [[package]] @@ -568,7 +562,7 @@ checksum = "c7c24de15d275a1ecfd47a380fb4d5ec9bfe0933f309ed5e705b775596a3574d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -579,13 +573,13 @@ checksum = "8b75356056920673b02621b35afd0f7dda9306d03c79a30f5c56c44cf256e3de" [[package]] name = "async-trait" -version = "0.1.88" +version = "0.1.89" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e539d3fca749fcee5236ab05e93a52867dd549cc157c8cb7f99595f3cedffdb5" +checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -605,15 +599,15 @@ checksum = "1505bd5d3d116872e7271a6d4e16d81d0c8570876c8de68093a09ac269d8aac0" [[package]] name = "autocfg" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ace50bade8e6234aa140d9a2f552bbee1db4d353f69b8217bc503490fc1a9f26" +checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8" [[package]] name = "aws-config" -version = "1.8.1" +version = "1.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c18d005c70d2b9c0c1ea8876c039db0ec7fb71164d25c73ccea21bf41fd02171" +checksum = "8bc1b40fb26027769f16960d2f4a6bc20c4bb755d403e552c8c1a73af433c246" dependencies = [ "aws-credential-types", "aws-runtime", @@ -641,9 +635,9 @@ dependencies = [ [[package]] name = "aws-credential-types" -version = "1.2.3" +version = "1.2.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "687bc16bc431a8533fe0097c7f0182874767f920989d7260950172ae8e3c4465" +checksum = "d025db5d9f52cbc413b167136afb3d8aeea708c0d8884783cf6253be5e22f6f2" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", @@ -653,9 +647,9 @@ dependencies = [ [[package]] name = "aws-lc-rs" -version = "1.13.1" +version = "1.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93fcc8f365936c834db5514fc45aee5b1202d677e6b40e48468aaaa8183ca8c7" +checksum = "879b6c89592deb404ba4dc0ae6b58ffd1795c78991cbb5b8bc441c48a070440d" dependencies = [ "aws-lc-sys", "zeroize", @@ -663,22 +657,23 @@ dependencies = [ [[package]] name = "aws-lc-sys" -version = "0.29.0" +version = "0.32.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "61b1d86e7705efe1be1b569bab41d4fa1e14e220b60a160f78de2db687add079" +checksum = "ee74396bee4da70c2e27cf94762714c911725efe69d9e2672f998512a67a4ce4" dependencies = [ "bindgen", "cc", "cmake", "dunce", "fs_extra", + "libloading", ] [[package]] name = "aws-runtime" -version = "1.5.8" +version = "1.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2" +checksum = "c034a1bc1d70e16e7f4e4caf7e9f7693e4c9c24cd91cf17c2a0b21abaebc7c8b" dependencies = [ "aws-credential-types", "aws-sigv4", @@ -701,9 +696,9 @@ dependencies = [ [[package]] name = "aws-sdk-s3" -version = "1.96.0" +version = "1.106.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e25d24de44b34dcdd5182ac4e4c6f07bcec2661c505acef94c0d293b65505fe" +checksum = "2c230530df49ed3f2b7b4d9c8613b72a04cdac6452eede16d587fc62addfabac" dependencies = [ "aws-credential-types", "aws-runtime", @@ -735,9 +730,9 @@ dependencies = [ [[package]] name = "aws-sdk-secretsmanager" -version = "1.78.0" +version = "1.88.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "04f96b46a81d4e6e213f6e614dbc22b8babec9c5f7a0f48afed47219d88ec6c0" +checksum = "1656cc8753202f255a1bcc6e06f9e768f30968684022fd0dd2f8912cad00fcef" dependencies = [ "aws-credential-types", "aws-runtime", @@ -757,9 +752,9 @@ dependencies = [ [[package]] name = "aws-sdk-sso" -version = "1.74.0" +version = "1.84.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e0a69de9c1b9272da2872af60c7402683e7f45c06267735b4332deacb203239b" +checksum = "357a841807f6b52cb26123878b3326921e2a25faca412fabdd32bd35b7edd5d3" dependencies = [ "aws-credential-types", "aws-runtime", @@ -779,9 +774,9 @@ dependencies = [ [[package]] name = "aws-sdk-ssooidc" -version = "1.75.0" +version = "1.86.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f0b161d836fac72bdd5ac1a4cd1cdc38ab888c7af26cfd95f661be4409505e63" +checksum = "9d1cc7fb324aa12eb4404210e6381195c5b5e9d52c2682384f295f38716dd3c7" dependencies = [ "aws-credential-types", "aws-runtime", @@ -801,9 +796,9 @@ dependencies = [ [[package]] name = "aws-sdk-sts" -version = "1.76.0" +version = "1.86.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb1cd79a3412751a341a28e2cd0d6fa4345241976da427b075a0c0cd5409f886" +checksum = "e7d835f123f307cafffca7b9027c14979f1d403b417d8541d67cf252e8a21e35" dependencies = [ "aws-credential-types", "aws-runtime", @@ -824,9 +819,9 @@ dependencies = [ [[package]] name = "aws-sigv4" -version = "1.3.3" +version = "1.3.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ddfb9021f581b71870a17eac25b52335b82211cdc092e02b6876b2bcefa61666" +checksum = "084c34162187d39e3740cb635acd73c4e3a551a36146ad6fe8883c929c9f876c" dependencies = [ "aws-credential-types", "aws-smithy-eventstream", @@ -863,9 +858,9 @@ dependencies = [ [[package]] name = "aws-smithy-checksums" -version = "0.63.4" +version = "0.63.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "244f00666380d35c1c76b90f7b88a11935d11b84076ac22a4c014ea0939627af" +checksum = "56d2df0314b8e307995a3b86d44565dfe9de41f876901a7d71886c756a25979f" dependencies = [ "aws-smithy-http", "aws-smithy-types", @@ -883,9 +878,9 @@ dependencies = [ [[package]] name = "aws-smithy-eventstream" -version = "0.60.9" +version = "0.60.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "338a3642c399c0a5d157648426110e199ca7fd1c689cc395676b81aa563700c4" +checksum = "182b03393e8c677347fb5705a04a9392695d47d20ef0a2f8cfe28c8e6b9b9778" dependencies = [ "aws-smithy-types", "bytes", @@ -894,9 +889,9 @@ dependencies = [ [[package]] name = "aws-smithy-http" -version = "0.62.1" +version = "0.62.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "99335bec6cdc50a346fda1437f9fefe33abf8c99060739a546a16457f2862ca9" +checksum = "7c4dacf2d38996cf729f55e7a762b30918229917eca115de45dfa8dfb97796c9" dependencies = [ "aws-smithy-eventstream", "aws-smithy-runtime-api", @@ -915,38 +910,39 @@ dependencies = [ [[package]] name = "aws-smithy-http-client" -version = "1.0.6" +version = "1.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f108f1ca850f3feef3009bdcc977be201bca9a91058864d9de0684e64514bee0" +checksum = "147e8eea63a40315d704b97bf9bc9b8c1402ae94f89d5ad6f7550d963309da1b" dependencies = [ "aws-smithy-async", "aws-smithy-runtime-api", "aws-smithy-types", - "h2 0.3.26", - "h2 0.4.11", + "h2 0.3.27", + "h2 0.4.12", "http 0.2.12", "http 1.3.1", "http-body 0.4.6", "hyper 0.14.32", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-rustls 0.24.2", "hyper-rustls 0.27.7", "hyper-util", "pin-project-lite", "rustls 0.21.12", - "rustls 0.23.28", + "rustls 0.23.32", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", + "tokio-rustls 0.26.4", "tower 0.5.2", "tracing", ] [[package]] name = "aws-smithy-json" -version = "0.61.4" +version = "0.61.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a16e040799d29c17412943bdbf488fd75db04112d0c0d4b9290bacf5ae0014b9" +checksum = "eaa31b350998e703e9826b2104dd6f63be0508666e1aba88137af060e8944047" dependencies = [ "aws-smithy-types", ] @@ -972,9 +968,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime" -version = "1.8.4" +version = "1.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c3aaec682eb189e43c8a19c3dab2fe54590ad5f2cc2d26ab27608a20f2acf81c" +checksum = "4fa63ad37685ceb7762fa4d73d06f1d5493feb88e3f27259b9ed277f4c01b185" dependencies = [ "aws-smithy-async", "aws-smithy-http", @@ -996,9 +992,9 @@ dependencies = [ [[package]] name = "aws-smithy-runtime-api" -version = "1.8.3" +version = "1.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9852b9226cb60b78ce9369022c0df678af1cac231c882d5da97a0c4e03be6e67" +checksum = "07f5e0fc8a6b3f2303f331b94504bbf754d85488f402d6f1dd7a6080f99afe56" dependencies = [ "aws-smithy-async", "aws-smithy-types", @@ -1048,9 +1044,9 @@ dependencies = [ [[package]] name = "aws-types" -version = "1.3.7" +version = "1.3.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a322fec39e4df22777ed3ad8ea868ac2f94cd15e1a55f6ee8d8d6305057689a" +checksum = "b069d19bf01e46298eaedd7c6f283fe565a59263e53eebec945f3e6398f42390" dependencies = [ "aws-credential-types", "aws-smithy-async", @@ -1107,9 +1103,9 @@ dependencies = [ [[package]] name = "backtrace" -version = "0.3.74" +version = "0.3.76" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8d82cb332cdfaed17ae235a638438ac4d4839913cc2af585c3c6746e8f8bee1a" +checksum = "bb531853791a215d7c62a30daf0dde835f381ab5de4589cfe7c649d2cbe92bd6" dependencies = [ "addr2line", "cfg-if", @@ -1117,7 +1113,7 @@ dependencies = [ "miniz_oxide", "object", "rustc-demangle", - "windows-targets 0.52.6", + "windows-link", ] [[package]] @@ -1156,22 +1152,20 @@ dependencies = [ [[package]] name = "base64ct" -version = "1.7.3" +version = "1.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e25b6adfb930f02d1981565a6e5d9c547ac15a96606256d3b59040e5cd4ca3" +checksum = "55248b47b0caf0546f7988906588779981c43bb1bc9d0c44087278f80cdb44ba" [[package]] name = "bindgen" -version = "0.69.5" +version = "0.72.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271383c67ccabffb7381723dea0672a673f292304fcb45c01cc648c7a8d58088" +checksum = "993776b509cfb49c750f11b8f07a46fa23e0a1386ffc01fb1e7d343efc387895" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "cexpr", "clang-sys", - "itertools", - "lazy_static", - "lazycell", + "itertools 0.13.0", "log", "prettyplease", "proc-macro2", @@ -1179,8 +1173,7 @@ dependencies = [ "regex", "rustc-hash", "shlex", - "syn 2.0.100", - "which", + "syn 2.0.106", ] [[package]] @@ -1191,9 +1184,9 @@ checksum = "bef38d45163c2f1dde094a7dfd33ccf595c92905c8f8f4fdc18d06fb1037718a" [[package]] name = "bitflags" -version = "2.9.0" +version = "2.9.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5c8214115b7bf84099f1309324e63141d4c5d7cc26862f97a0a857dbefe165bd" +checksum = "2261d10cca569e4643e526d8dc2e62e433cc8aba21ab764233731f8d369bf394" dependencies = [ "serde", ] @@ -1231,9 +1224,9 @@ dependencies = [ [[package]] name = "brotli" -version = "7.0.0" +version = "8.0.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc97b8f16f944bba54f0433f07e30be199b6dc2bd25937444bbad560bcea29bd" +checksum = "4bd8b9603c7aa97359dbd97ecf258968c95f3adddd6db2f7e7a5bef101c84560" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1242,9 +1235,9 @@ dependencies = [ [[package]] name = "brotli-decompressor" -version = "4.0.3" +version = "5.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a334ef7c9e23abf0ce748e8cd309037da93e606ad52eb372e4ce327a0dcfbdfd" +checksum = "874bb8112abecc98cbd6d81ea4fa7e94fb9449648c93cc89aa40c81c24d7de03" dependencies = [ "alloc-no-stdlib", "alloc-stdlib", @@ -1252,9 +1245,9 @@ dependencies = [ [[package]] name = "bumpalo" -version = "3.17.0" +version = "3.19.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1628fb46dfa0b37568d12e5edd512553eccf6a22a78e8bde00bb4aed84d5bdbf" +checksum = "46c5e41b57b8bba42a04676d81cb89e9ee8e859a1a66f80a5a72e1cb76b34d43" [[package]] name = "byteorder" @@ -1280,9 +1273,9 @@ dependencies = [ [[package]] name = "bytestring" -version = "1.4.0" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e465647ae23b2823b0753f50decb2d5a86d2bb2cac04788fafd1f80e45378e5f" +checksum = "113b4343b5f6617e7ad401ced8de3cc8b012e73a594347c307b90db3e9271289" dependencies = [ "bytes", ] @@ -1298,10 +1291,11 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.19" +version = "1.2.39" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e3a13707ac958681c13b39b458c073d0d9bc8a22cb1b2f4c8e55eb72c13f362" +checksum = "e1354349954c6fc9cb0deab020f27f783cf0b604e8bb754dc4658ecf0d29c35f" dependencies = [ + "find-msvc-tools", "jobserver", "libc", "shlex", @@ -1318,17 +1312,16 @@ dependencies = [ [[package]] name = "cfg-if" -version = "1.0.0" +version = "1.0.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "baf1de4339761588bc0619e3cbc0120ee582ebb74b53b4efbf79117bd2da40fd" +checksum = "2fd1289c04a9ea8cb22300a459a72a385d7c73d3259e2ed7dcb2af674838cfa9" [[package]] name = "chrono" -version = "0.4.40" +version = "0.4.42" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a7964611d71df112cb1730f2ee67324fcf4d0fc6606acbbe9bfe06df124637c" +checksum = "145052bdd345b87320e369255277e3fb5152762ad123a901ef5c262dd38fe8d2" dependencies = [ - "android-tzdata", "iana-time-zone", "js-sys", "num-traits", @@ -1527,9 +1520,9 @@ dependencies = [ [[package]] name = "crc" -version = "3.2.1" +version = "3.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "69e6e4d7b33a94f0991c26729976b10ebde1d34c3ee82408fb536164fa10d636" +checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675" dependencies = [ "crc-catalog", ] @@ -1549,15 +1542,15 @@ dependencies = [ "crc", "digest", "libc", - "rand 0.9.1", + "rand 0.9.2", "regex", ] [[package]] name = "crc32fast" -version = "1.4.2" +version = "1.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3" +checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511" dependencies = [ "cfg-if", ] @@ -1683,14 +1676,14 @@ checksum = "8034092389675178f570469e6c3b0465d3d30b4505c294a6550db47f3c17ad18" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "deranged" -version = "0.4.0" +version = "0.5.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c9e6a11ca8224451684bc0d7d5a7adbf8f2fd6887261a1cfc3c0432f9d4068e" +checksum = "a41953f86f8a05768a6cda24def994fd2f424b04ec5c719cf89989779f199071" dependencies = [ "powerfmt", ] @@ -1705,7 +1698,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -1725,7 +1718,7 @@ checksum = "bda628edc44c4bb645fbe0f758797143e4e07926f7ebf4e9bdfbd3d2ce621df3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "unicode-xid", ] @@ -1758,7 +1751,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -1843,12 +1836,12 @@ checksum = "877a4ace8713b0bcf2a4e7eec82529c029f1d0619886d18145fea96c3ffe5c0f" [[package]] name = "errno" -version = "0.3.11" +version = "0.3.14" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "976dd42dc7e85965fe702eb8164f21f450704bdde31faefd6471dba214cb594e" +checksum = "39cab71617ae0d63f51a36d69f866391735b51691dbda63cf6f96d042b63efeb" dependencies = [ "libc", - "windows-sys 0.59.0", + "windows-sys 0.61.1", ] [[package]] @@ -1923,6 +1916,12 @@ dependencies = [ "subtle", ] +[[package]] +name = "find-msvc-tools" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" + [[package]] name = "fixedbitset" version = "0.4.2" @@ -1937,9 +1936,9 @@ checksum = "b7ac824320a75a52197e8f2d787f6a38b6718bb6897a35142d749af3c0e8f4fe" [[package]] name = "flate2" -version = "1.1.1" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7ced92e76e966ca2fd84c8f7aa01a4aea65b0eb6648d72f7c8f3e2764a67fece" +checksum = "4a3d7db9596fecd151c5f638c0ee5d5bd487b6e0ea232e5dc96d5250f6f94b1d" dependencies = [ "crc32fast", "miniz_oxide", @@ -1970,9 +1969,9 @@ checksum = "d9c4f5dac5e15c24eb999c26181a6ca40b39fe946cbe4c263c7209467bc83af2" [[package]] name = "form_urlencoded" -version = "1.2.1" +version = "1.2.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e13624c2627564efccf4934284bdd98cbaa14e79b0b5a141218e507b3a823456" +checksum = "cb4cb245038516f5f85277875cdaa4f7d2c9a0fa0468de06ed190163b1581fcf" dependencies = [ "percent-encoding", ] @@ -2078,7 +2077,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -2123,27 +2122,27 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.15" +version = "0.2.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c4567c8db10ae91089c99af84c68c38da3ec2f087c3f82960bcdbf3656b6f4d7" +checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" dependencies = [ "cfg-if", "js-sys", "libc", - "wasi 0.11.0+wasi-snapshot-preview1", + "wasi 0.11.1+wasi-snapshot-preview1", "wasm-bindgen", ] [[package]] name = "getrandom" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "73fea8450eea4bac3940448fb7ae50d91f034f941199fcd9d909a5a07aa455f0" +checksum = "26145e563e54f2cadc477553f1ec5ee650b00862f0a58bcd12cbdc5f0ea2d2f4" dependencies = [ "cfg-if", "libc", "r-efi", - "wasi 0.14.2+wasi-0.2.4", + "wasi 0.14.7+wasi-0.2.4", ] [[package]] @@ -2158,15 +2157,15 @@ dependencies = [ [[package]] name = "gimli" -version = "0.31.1" +version = "0.32.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07e28edb80900c19c28f1072f2e8aeca7fa06b23cd4169cefe1af5aa3260783f" +checksum = "e629b9b98ef3dd8afe6ca2bd0f89306cec16d43d907889945bc5d6687f2f13c7" [[package]] name = "glob" -version = "0.3.2" +version = "0.3.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a8d1add55171497b4705a648c6b583acafb01d58050a51727785f0b2c8e0a2b2" +checksum = "0cc23270f6e1808e30a928bdc84dea0b9b4136a8bc82338574f23baf47bbd280" [[package]] name = "group" @@ -2181,9 +2180,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.3.26" +version = "0.3.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8" +checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d" dependencies = [ "bytes", "fnv", @@ -2191,7 +2190,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.9.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -2200,9 +2199,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.11" +version = "0.4.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "17da50a276f1e01e0ba6c029e47b7100754904ee8a278f886546e98575380785" +checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" dependencies = [ "atomic-waker", "bytes", @@ -2210,7 +2209,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.9.0", + "indexmap 2.11.4", "slab", "tokio", "tokio-util", @@ -2235,15 +2234,21 @@ dependencies = [ [[package]] name = "hashbrown" -version = "0.15.2" +version = "0.15.5" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf151400ff0baff5465007dd2f3e717f3fe502074ca563069ce3a6629d07b289" +checksum = "9229cfe53dfd69f0609a49f65461bd93001ea1ef889cd5529dd176593f5338a1" dependencies = [ "allocator-api2", "equivalent", "foldhash", ] +[[package]] +name = "hashbrown" +version = "0.16.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5419bdc4f6a9207fbeba6d11b604d481addf78ecd10c11ad51e76c2f6482748d" + [[package]] name = "hashlink" version = "0.8.4" @@ -2276,9 +2281,9 @@ checksum = "d231dfb89cfffdbc30e7fc41579ed6066ad03abda9e567ccafae602b97ec5024" [[package]] name = "hermit-abi" -version = "0.5.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f154ce46856750ed433c8649605bf7ed2de3bc35fd9d2a9f30cddd873c80cb08" +checksum = "fc0fef456e4baa96da950455cd02c081ca953b141298e41db3fc7e36b1da849c" [[package]] name = "hex" @@ -2402,14 +2407,14 @@ dependencies = [ "futures-channel", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "httparse", "httpdate", "itoa", "pin-project-lite", - "socket2 0.5.9", + "socket2 0.5.10", "tokio", "tower-service", "tracing", @@ -2418,19 +2423,21 @@ dependencies = [ [[package]] name = "hyper" -version = "1.6.0" +version = "1.7.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cc2b571658e38e0c01b1fdca3bbbe93c00d3d71693ff2770043f8c29bc7d6f80" +checksum = "eb3aa54a13a0dfe7fbe3a59e0c76093041720fdc77b110cc0fc260fafb4dc51e" dependencies = [ + "atomic-waker", "bytes", "futures-channel", - "futures-util", - "h2 0.4.11", + "futures-core", + "h2 0.4.12", "http 1.3.1", "http-body 1.0.1", "httparse", "itoa", "pin-project-lite", + "pin-utils", "smallvec", "tokio", "want", @@ -2459,13 +2466,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e3c93eb611681b207e1fe55d5a71ecf91572ec8a6705cdb6857f7d8d5242cf58" dependencies = [ "http 1.3.1", - "hyper 1.6.0", + "hyper 1.7.0", "hyper-util", - "rustls 0.23.28", + "rustls 0.23.32", "rustls-native-certs 0.8.1", "rustls-pki-types", "tokio", - "tokio-rustls 0.26.2", + "tokio-rustls 0.26.4", "tower-service", ] @@ -2483,20 +2490,23 @@ dependencies = [ [[package]] name = "hyper-util" -version = "0.1.15" +version = "0.1.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f66d5bd4c6f02bf0542fad85d626775bab9258cf795a4256dcaf3161114d1df" +checksum = "3c6995591a8f1380fcb4ba966a252a4b29188d51d2b89e3a252f5305be65aea8" dependencies = [ + "base64 0.22.1", "bytes", "futures-channel", "futures-core", "futures-util", "http 1.3.1", "http-body 1.0.1", - "hyper 1.6.0", + "hyper 1.7.0", + "ipnet", "libc", + "percent-encoding", "pin-project-lite", - "socket2 0.5.9", + "socket2 0.6.0", "tokio", "tower-service", "tracing", @@ -2504,9 +2514,9 @@ dependencies = [ [[package]] name = "iana-time-zone" -version = "0.1.63" +version = "0.1.64" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0c919e5debc312ad217002b8048a17b7d83f80703865bbfcfebb0458b0b27d8" +checksum = "33e57f83510bb73707521ebaffa789ec8caf86f9657cad665b092b581d40e9fb" dependencies = [ "android_system_properties", "core-foundation-sys", @@ -2528,21 +2538,22 @@ dependencies = [ [[package]] name = "icu_collections" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "db2fa452206ebee18c4b5c2274dbf1de17008e874b4dc4f0aea9d01ca79e4526" +checksum = "200072f5d0e3614556f94a9930d5dc3e0662a652823904c3a75dc3b0af7fee47" dependencies = [ "displaydoc", + "potential_utf", "yoke", "zerofrom", "zerovec", ] [[package]] -name = "icu_locid" -version = "1.5.0" +name = "icu_locale_core" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13acbb8371917fc971be86fc8057c41a64b521c184808a698c02acc242dbf637" +checksum = "0cde2700ccaed3872079a65fb1a78f6c0a36c91570f28755dda67bc8f7d9f00a" dependencies = [ "displaydoc", "litemap", @@ -2551,31 +2562,11 @@ dependencies = [ "zerovec", ] -[[package]] -name = "icu_locid_transform" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "01d11ac35de8e40fdeda00d9e1e9d92525f3f9d887cdd7aa81d727596788b54e" -dependencies = [ - "displaydoc", - "icu_locid", - "icu_locid_transform_data", - "icu_provider", - "tinystr", - "zerovec", -] - -[[package]] -name = "icu_locid_transform_data" -version = "1.5.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7515e6d781098bf9f7205ab3fc7e9709d34554ae0b21ddbcb5febfa4bc7df11d" - [[package]] name = "icu_normalizer" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "19ce3e0da2ec68599d193c93d088142efd7f9c5d6fc9b803774855747dc6a84f" +checksum = "436880e8e18df4d7bbc06d58432329d6458cc84531f7ac5f024e93deadb37979" dependencies = [ "displaydoc", "icu_collections", @@ -2583,72 +2574,59 @@ dependencies = [ "icu_properties", "icu_provider", "smallvec", - "utf16_iter", - "utf8_iter", - "write16", "zerovec", ] [[package]] name = "icu_normalizer_data" -version = "1.5.1" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5e8338228bdc8ab83303f16b797e177953730f601a96c25d10cb3ab0daa0cb7" +checksum = "00210d6893afc98edb752b664b8890f0ef174c8adbb8d0be9710fa66fbbf72d3" [[package]] name = "icu_properties" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "93d6020766cfc6302c15dbbc9c8778c37e62c14427cb7f6e601d849e092aeef5" +checksum = "016c619c1eeb94efb86809b015c58f479963de65bdb6253345c1a1276f22e32b" dependencies = [ "displaydoc", "icu_collections", - "icu_locid_transform", + "icu_locale_core", "icu_properties_data", "icu_provider", - "tinystr", + "potential_utf", + "zerotrie", "zerovec", ] [[package]] name = "icu_properties_data" -version = "1.5.1" +version = "2.0.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "85fb8799753b75aee8d2a21d7c14d9f38921b54b3dbda10f5a3c7a7b82dba5e2" +checksum = "298459143998310acd25ffe6810ed544932242d3f07083eee1084d83a71bd632" [[package]] name = "icu_provider" -version = "1.5.0" +version = "2.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6ed421c8a8ef78d3e2dbc98a973be2f3770cb42b606e3ab18d6237c4dfde68d9" +checksum = "03c80da27b5f4187909049ee2d72f276f0d9f99a42c306bd0131ecfe04d8e5af" dependencies = [ "displaydoc", - "icu_locid", - "icu_provider_macros", + "icu_locale_core", "stable_deref_trait", "tinystr", "writeable", "yoke", "zerofrom", + "zerotrie", "zerovec", ] -[[package]] -name = "icu_provider_macros" -version = "1.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1ec89e9337638ecdc08744df490b221a7399bf8d164eb52a665454e60e075ad6" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", -] - [[package]] name = "idna" -version = "1.0.3" +version = "1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "686f825264d630750a544639377bae737628043f20d38bbc029e8f29ea968a7e" +checksum = "3b0875f23caa03898994f6ddc501886a45c7d3d62d04d2d90788d47be1b1e4de" dependencies = [ "idna_adapter", "smallvec", @@ -2657,9 +2635,9 @@ dependencies = [ [[package]] name = "idna_adapter" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "daca1df1c957320b2cf139ac61e7bd64fed304c5040df000a745aa1de3b4ef71" +checksum = "3acae9609540aa318d1bc588455225fb2085b9ed0c4f6bd0d9d5bcd86f1a0344" dependencies = [ "icu_normalizer", "icu_properties", @@ -2683,12 +2661,12 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.9.0" +version = "2.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cea70ddb795996207ad57735b50c5982d8844f38ba9ee5f1aedcfb708a2aa11e" +checksum = "4b0f83760fb341a774ed326568e19f5a863af4a952def8c39f9ab92fd95b88e5" dependencies = [ "equivalent", - "hashbrown 0.15.2", + "hashbrown 0.16.0", ] [[package]] @@ -2721,6 +2699,17 @@ dependencies = [ "windows-sys 0.48.0", ] +[[package]] +name = "io-uring" +version = "0.7.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "046fa2d4d00aea763528b4950358d0ead425372445dc8ff86312b3c69ff7727b" +dependencies = [ + "bitflags 2.9.4", + "cfg-if", + "libc", +] + [[package]] name = "ipnet" version = "2.11.0" @@ -2729,9 +2718,18 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "itertools" -version = "0.10.5" +version = "0.12.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b0fd2260e829bddf4cb6ea802289de2f86d6a7a690192fbe91b3f46e0f2c8473" +checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +dependencies = [ + "either", +] + +[[package]] +name = "itertools" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] @@ -2744,19 +2742,19 @@ checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c" [[package]] name = "jobserver" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "38f262f097c174adebe41eb73d66ae9c06b2844fb0da69969647bbddd9b0538a" +checksum = "9afb3de4395d6b3e67a780b6de64b51c978ecf11cb9a462c66be7d4ca9039d33" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", "libc", ] [[package]] name = "js-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1cfaf33c695fc6e08064efbc1f72ec937429614f25eef83af942d0e227c3a28f" +checksum = "ec48937a97411dcb524a265206ccd4c90bb711fca92b2792c407f268825b9305" dependencies = [ "once_cell", "wasm-bindgen", @@ -2814,33 +2812,38 @@ dependencies = [ "spin 0.9.8", ] -[[package]] -name = "lazycell" -version = "1.3.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "830d08ce1d1d941e6b30645f1a0eb5643013d835ce3779a5fc208261dbe10f55" - [[package]] name = "libc" -version = "0.2.172" +version = "0.2.176" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d750af042f7ef4f724306de029d18836c26c1765a54a6a3f094cbd23a7267ffa" +checksum = "58f929b4d672ea937a23a1ab494143d968337a5f47e56d0815df1e0890ddf174" [[package]] name = "libloading" -version = "0.8.7" +version = "0.8.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6a793df0d7afeac54f95b471d3af7f0d4fb975699f972341a4b76988d49cdf0c" +checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667" dependencies = [ "cfg-if", - "windows-targets 0.53.2", + "windows-targets 0.53.4", ] [[package]] name = "libm" -version = "0.2.13" +version = "0.2.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9627da5196e5d8ed0b0495e61e518847578da83483c37288316d9b2e03a7f72" +checksum = "f9fbbcab51052fe104eb5e5d351cf728d30a5be1fe14d9be8a3b097481fb97de" + +[[package]] +name = "libredox" +version = "0.1.10" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "416f7e718bdb06000964960ffa43b4335ad4012ae8b99060261aa4a8088d5ccb" +dependencies = [ + "bitflags 2.9.4", + "libc", + "redox_syscall", +] [[package]] name = "libsqlite3-sys" @@ -2861,21 +2864,15 @@ checksum = "ef53942eb7bf7ff43a617b3e2c1c4a5ecf5944a7c1bc12d7ee39bbb15e5c1519" [[package]] name = "linux-raw-sys" -version = "0.4.15" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d26c52dbd32dccf2d10cac7725f8eae5296885fb5703b261f7d0a0739ec807ab" - -[[package]] -name = "linux-raw-sys" -version = "0.9.4" +version = "0.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cd945864f07fe9f5371a27ad7b52a172b4b499999f1d97574c9fa68373937e12" +checksum = "df1d3c3b53da64cf5760482273a98e575c651a67eec7f77df96b5b642de8f039" [[package]] name = "litemap" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "23fb14cb19457329c82206317a5663005a4d404783dc74f4252769b0d5f42856" +checksum = "241eaef5fd12c88705a01fc1066c48c4b36e0dd4377dcdc7ec3942cea7a69956" [[package]] name = "local-channel" @@ -2896,9 +2893,9 @@ checksum = "4d873d7c67ce09b42110d801813efbc9364414e356be9935700d368351657487" [[package]] name = "lock_api" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "07af8b9cdd281b7915f413fa73f29ebd5d55d0d3f0155584dade1ff18cea1b17" +checksum = "96936507f153605bddfcda068dd804796c84324ed2510809e5b2a624c81da765" dependencies = [ "autocfg", "scopeguard", @@ -2906,9 +2903,9 @@ dependencies = [ [[package]] name = "log" -version = "0.4.27" +version = "0.4.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "13dc2df351e3202783a1fe0d44375f7295ffb4049267b0f3018346dc122a1d94" +checksum = "34080505efa8e45a4b816c349525ebe327ceaa8559756f0356cba97ef3bf7432" [[package]] name = "lru" @@ -2916,7 +2913,7 @@ version = "0.12.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "234cf4f4a04dc1f57e24b96cc0cd600cf2af460d4161ac5ecdd0af8e1f3b2a38" dependencies = [ - "hashbrown 0.15.2", + "hashbrown 0.15.5", ] [[package]] @@ -2927,11 +2924,11 @@ checksum = "ffbee8634e0d45d258acb448e7eaab3fce7a0a467395d4d9f228e3c1f01fb2e4" [[package]] name = "matchers" -version = "0.1.0" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8263075bb86c5a1b1427b5ae862e8889656f126e9f77c484496e8b47cf5c5558" +checksum = "d1525a2a28c7f4fa0fc98bb91ae755d1e2d1505079e05539e35bc876b5d65ae9" dependencies = [ - "regex-automata 0.1.10", + "regex-automata", ] [[package]] @@ -2958,9 +2955,9 @@ checksum = "490cc448043f947bae3cbee9c203358d62dbee0db12107a74be5c30ccfd09771" [[package]] name = "memchr" -version = "2.7.4" +version = "2.7.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "78ca9ab1a0babb1e7d5695e3530886289c18cf2f87ec19a575a0abdce112e3a3" +checksum = "f52b00d39961fc5b2736ea853c9cc86238e165017a493d1d5c8eac6bdc4cc273" [[package]] name = "mime" @@ -2976,30 +2973,30 @@ checksum = "68354c5c6bd36d73ff3feceb05efa59b6acb7626617f4962be322a825e61f79a" [[package]] name = "miniz_oxide" -version = "0.8.8" +version = "0.8.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3be647b768db090acb35d5ec5db2b0e1f1de11133ca123b9eacf5137868f892a" +checksum = "1fa76a2c86f704bdb222d66965fb3d63269ce38518b83cb0575fca855ebb6316" dependencies = [ "adler2", ] [[package]] name = "mio" -version = "1.0.3" +version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2886843bf800fba2e3377cff24abf6379b4c4d5c6681eaf9ea5b0d15090450bd" +checksum = "78bed444cc8a2160f01cbcf811ef18cac863ad68ae8ca62092e8db51d51c761c" dependencies = [ "libc", "log", - "wasi 0.11.0+wasi-snapshot-preview1", - "windows-sys 0.52.0", + "wasi 0.11.1+wasi-snapshot-preview1", + "windows-sys 0.59.0", ] [[package]] name = "multimap" -version = "0.8.3" +version = "0.10.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e5ce46fe64a9d73be07dcbe690a38ce1b293be448fd8ce1e6c1b8062c9f72c6a" +checksum = "1d87ecb2933e8aeadb3e3a02b828fed80a7528047e68b4f424523a0981a3a084" [[package]] name = "mutually_exclusive_features" @@ -3028,12 +3025,11 @@ dependencies = [ [[package]] name = "nu-ansi-term" -version = "0.46.0" +version = "0.50.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77a8165726e8236064dbb45459242600304b42a5ea24ee2948e18e023bf7ba84" +checksum = "d4a28e057d01f97e61255210fcff094d74ed0466038633e95017f5beb68e4399" dependencies = [ - "overload", - "winapi", + "windows-sys 0.52.0", ] [[package]] @@ -3105,7 +3101,7 @@ version = "1.17.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "91df4bbde75afed763b708b7eee1e8e7651e02d97f6d5dd763e89367e957b23b" dependencies = [ - "hermit-abi 0.5.1", + "hermit-abi 0.5.2", "libc", ] @@ -3117,7 +3113,7 @@ checksum = "c38841cdd844847e3e7c8d29cef9dcfed8877f8f56f9071f77843ecf3baf937f" dependencies = [ "base64 0.13.1", "chrono", - "getrandom 0.2.15", + "getrandom 0.2.16", "http 0.2.12", "rand 0.8.5", "reqwest", @@ -3131,9 +3127,9 @@ dependencies = [ [[package]] name = "object" -version = "0.36.7" +version = "0.37.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "62948e14d923ea95ea2c7c86c71013138b66525b86bdc08d2dcc262bdb497b87" +checksum = "ff76201f031d8863c38aa7f905eca4f53abbfa15f609db4277d44cd8938f33fe" dependencies = [ "memchr", ] @@ -3171,12 +3167,6 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1a80800c0488c3a21695ea981a54918fbb37abf04f4d0720c453632255e2ff0e" -[[package]] -name = "overload" -version = "0.1.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b15813163c1d831bf4a13c3610c05c0d03b39feb07f7e09fa234dac9b15aaf39" - [[package]] name = "p12-keystore" version = "0.1.5" @@ -3191,11 +3181,11 @@ dependencies = [ "hmac", "pkcs12", "pkcs5", - "rand 0.9.1", + "rand 0.9.2", "rc2", "sha1", "sha2", - "thiserror 2.0.12", + "thiserror 2.0.16", "x509-parser", ] @@ -3218,9 +3208,9 @@ checksum = "f38d5652c16fde515bb1ecef450ab0f6a219d619a7274976324d5e377f7dceba" [[package]] name = "parking_lot" -version = "0.12.3" +version = "0.12.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f1bf18183cf54e8d6059647fc3063646a1801cf30896933ec2311622cc4b9a27" +checksum = "70d58bf43669b5795d1576d0641cfb6fbb2057bf629506267a92807158584a13" dependencies = [ "lock_api", "parking_lot_core", @@ -3228,9 +3218,9 @@ dependencies = [ [[package]] name = "parking_lot_core" -version = "0.9.10" +version = "0.9.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e401f977ab385c9e4e3ab30627d6f26d00e2c73eef317493c4ec6d468726cf8" +checksum = "bc838d2a56b5b1a6c25f55575dfc605fabb63bb2365f6c2353ef9159aa69e4a5" dependencies = [ "cfg-if", "libc", @@ -3276,9 +3266,9 @@ dependencies = [ [[package]] name = "percent-encoding" -version = "2.3.1" +version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3148f5046208a5d56bcfc03053e3ca6334e51da8dfb19b6cdc8b306fae3283e" +checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" @@ -3287,7 +3277,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" dependencies = [ "fixedbitset", - "indexmap 2.9.0", + "indexmap 2.11.4", ] [[package]] @@ -3307,7 +3297,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -3324,9 +3314,9 @@ checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" [[package]] name = "pinky-swear" -version = "6.2.0" +version = "6.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6cfae3ead413ca051a681152bd266438d3bfa301c9bdf836939a14c721bb2a21" +checksum = "b1ea6e230dd3a64d61bcb8b79e597d3ab6b4c94ec7a234ce687dd718b4f2e657" dependencies = [ "doc-comment", "flume", @@ -3430,16 +3420,16 @@ dependencies = [ [[package]] name = "polling" -version = "3.10.0" +version = "3.11.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b5bd19146350fe804f7cb2669c851c03d69da628803dab0d98018142aaa5d829" +checksum = "5d0e4f59085d47d8241c88ead0f274e8a0cb551f3625263c05eb8dd897c34218" dependencies = [ "cfg-if", "concurrent-queue", - "hermit-abi 0.5.1", + "hermit-abi 0.5.2", "pin-project-lite", - "rustix 1.0.8", - "windows-sys 0.60.2", + "rustix 1.1.2", + "windows-sys 0.61.1", ] [[package]] @@ -3454,6 +3444,15 @@ dependencies = [ "universal-hash", ] +[[package]] +name = "potential_utf" +version = "0.1.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84df19adbe5b5a0782edcab45899906947ab039ccf4573713735ee7de1e6b08a" +dependencies = [ + "zerovec", +] + [[package]] name = "powerfmt" version = "0.2.0" @@ -3466,24 +3465,24 @@ version = "0.2.21" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "85eae3c4ed2f50dcfe72643da4befc30deadb458a9b590d720cde2f2b1e97da9" dependencies = [ - "zerocopy 0.8.24", + "zerocopy", ] [[package]] name = "prettyplease" -version = "0.2.34" +version = "0.2.37" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6837b9e10d61f45f987d50808f83d1ee3d206c66acf650c3e4ae2e1f6ddedf55" +checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "proc-macro2" -version = "1.0.95" +version = "1.0.101" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "02b3e5e68a3a1a02aad3ec490a98007cbc13c37cbe84a3cd7b8e406d76e7f778" +checksum = "89ae43fd86e4158d6db51ad8e2b80f313af9cc74f5c0e03ccb87de09998732de" dependencies = [ "unicode-ident", ] @@ -3506,7 +3505,7 @@ checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" dependencies = [ "bytes", "heck 0.5.0", - "itertools", + "itertools 0.12.1", "log", "multimap", "once_cell", @@ -3515,7 +3514,7 @@ dependencies = [ "prost", "prost-types", "regex", - "syn 2.0.100", + "syn 2.0.106", "tempfile", ] @@ -3526,10 +3525,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" dependencies = [ "anyhow", - "itertools", + "itertools 0.12.1", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -3552,9 +3551,9 @@ dependencies = [ [[package]] name = "r-efi" -version = "5.2.0" +version = "5.3.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "74765f6d916ee2faa39bc8e68e4f3ed8949b48cccdac59983d287a7cb71ce9c5" +checksum = "69cdb34c158ceb288df11e18b4bd39de994f6657d83847bdffdbd7f346754b0f" [[package]] name = "rand" @@ -3569,9 +3568,9 @@ dependencies = [ [[package]] name = "rand" -version = "0.9.1" +version = "0.9.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9fbfd9d094a40bf3ae768db9361049ace4c0e04a4fd6b359518bd7b73a73dd97" +checksum = "6db2770f06117d490610c7488547d543617b21bfa07796d7a12f6f1bd53850d1" dependencies = [ "rand_chacha 0.9.0", "rand_core 0.9.3", @@ -3603,7 +3602,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.15", + "getrandom 0.2.16", ] [[package]] @@ -3612,7 +3611,7 @@ version = "0.9.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "99d9a13982dcf210057a8a78572b2217b667c3beacbf3a0d8b454f6f82837d38" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", ] [[package]] @@ -3661,62 +3660,47 @@ dependencies = [ [[package]] name = "redox_syscall" -version = "0.5.11" +version = "0.5.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d2f103c6d277498fbceb16e84d317e2a400f160f46904d5f5410848c829511a3" +checksum = "5407465600fb0548f1442edf71dd20683c6ed326200ace4b1ef0763521bb3b77" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", ] [[package]] name = "regex" -version = "1.11.1" +version = "1.11.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b544ef1b4eac5dc2db33ea63606ae9ffcfac26c1416a2806ae0bf5f56b201191" +checksum = "8b5288124840bee7b386bc413c487869b360b2b4ec421ea56425128692f2a82c" dependencies = [ "aho-corasick", "memchr", - "regex-automata 0.4.9", - "regex-syntax 0.8.5", -] - -[[package]] -name = "regex-automata" -version = "0.1.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6c230d73fb8d8c1b9c0b3135c5142a8acee3a0558fb8db5cf1cb65f8d7862132" -dependencies = [ - "regex-syntax 0.6.29", + "regex-automata", + "regex-syntax", ] [[package]] name = "regex-automata" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "809e8dc61f6de73b46c85f4c96486310fe304c434cfa43669d7b40f711150908" +checksum = "833eb9ce86d40ef33cb1306d8accf7bc8ec2bfea4355cbdebb3df68b40925cad" dependencies = [ "aho-corasick", "memchr", - "regex-syntax 0.8.5", + "regex-syntax", ] [[package]] name = "regex-lite" -version = "0.1.6" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "53a49587ad06b26609c52e423de037e7f57f20d53535d66e08c695f347df952a" - -[[package]] -name = "regex-syntax" -version = "0.6.29" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f162c6dd7b008981e4d40210aca20b4bd0f9b60ca9271061b07f78537722f2e1" +checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30" [[package]] name = "regex-syntax" -version = "0.8.5" +version = "0.8.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2b15c43186be67a4fd63bee50d0303afffcef381492ebe2c5d87f324e1b8815c" +checksum = "caf4aa5b0f434c91fe5c7f1ecb6a5ece2130b02ad2a590589dda5146df959001" [[package]] name = "reqwest" @@ -3729,7 +3713,7 @@ dependencies = [ "encoding_rs", "futures-core", "futures-util", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", @@ -3795,7 +3779,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.15", + "getrandom 0.2.16", "libc", "untrusted 0.9.0", "windows-sys 0.52.0", @@ -3823,15 +3807,15 @@ dependencies = [ [[package]] name = "rustc-demangle" -version = "0.1.24" +version = "0.1.26" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "719b953e2095829ee67db738b3bfa9fa368c94900df327b3f07fe6e794d2fe1f" +checksum = "56f7d92ca342cea22a06f2121d944b4fd82af56988c270852495420f961d4ace" [[package]] name = "rustc-hash" -version = "1.1.0" +version = "2.1.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08d43f7aa6b08d49f382cde6a7982047c3426db949b1424bc4b7ec9ae12c6ce2" +checksum = "357703d41365b4b27c590e3ed91eabb1b663f07c4c084095e60cbed4362dff0d" [[package]] name = "rustc_version" @@ -3867,28 +3851,15 @@ dependencies = [ [[package]] name = "rustix" -version = "0.38.44" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fdb5bc1ae2baa591800df16c9ca78619bf65c0488b41b96ccec5d11220d8c154" -dependencies = [ - "bitflags 2.9.0", - "errno", - "libc", - "linux-raw-sys 0.4.15", - "windows-sys 0.59.0", -] - -[[package]] -name = "rustix" -version = "1.0.8" +version = "1.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8" +checksum = "cd15f8a2c5551a84d56efdc1cd049089e409ac19a3072d5037a17fd70719ff3e" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "errno", "libc", - "linux-raw-sys 0.9.4", - "windows-sys 0.60.2", + "linux-raw-sys 0.11.0", + "windows-sys 0.61.1", ] [[package]] @@ -3931,15 +3902,15 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.28" +version = "0.23.32" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7160e3e10bf4535308537f3c4e1641468cd0e485175d6163087c0393c7d46643" +checksum = "cd3c25631629d034ce7cd9940adc9d45762d46de2b0f57193c4443b92c6d4d40" dependencies = [ "aws-lc-rs", "once_cell", "ring 0.17.14", "rustls-pki-types", - "rustls-webpki 0.103.3", + "rustls-webpki 0.103.6", "subtle", "zeroize", ] @@ -3951,10 +3922,10 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "70cc376c6ba1823ae229bacf8ad93c136d93524eab0e4e5e0e4f96b9c4e5b212" dependencies = [ "log", - "rustls 0.23.28", + "rustls 0.23.32", "rustls-native-certs 0.7.3", "rustls-pki-types", - "rustls-webpki 0.103.3", + "rustls-webpki 0.103.6", ] [[package]] @@ -3991,7 +3962,7 @@ dependencies = [ "openssl-probe", "rustls-pki-types", "schannel", - "security-framework 3.2.0", + "security-framework 3.5.0", ] [[package]] @@ -4044,9 +4015,9 @@ dependencies = [ [[package]] name = "rustls-webpki" -version = "0.103.3" +version = "0.103.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e4a72fe2bcf7a6ac6fd7d0b9e5cb68aeb7d4c0a0271730218b3e92d43b4eb435" +checksum = "8572f3c2cb9934231157b45499fc41e1f58c589fdfb81a844ba873265e80f8eb" dependencies = [ "aws-lc-rs", "ring 0.17.14", @@ -4056,9 +4027,9 @@ dependencies = [ [[package]] name = "rustversion" -version = "1.0.20" +version = "1.0.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eded382c5f5f786b989652c49544c4877d9f015cc22e145a5ea8ea66c2921cd2" +checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d" [[package]] name = "ryu" @@ -4077,11 +4048,11 @@ dependencies = [ [[package]] name = "schannel" -version = "0.1.27" +version = "0.1.28" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1f29ebaa345f945cec9fbbc532eb307f0fdad8161f281b6369539c8d84876b3d" +checksum = "891d81b926048e76efe18581bf793546b4c0eaf8448d72be8de2bbee5fd166e1" dependencies = [ - "windows-sys 0.59.0", + "windows-sys 0.61.1", ] [[package]] @@ -4131,7 +4102,7 @@ version = "2.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "897b2245f0b511c87893af39b033e5ca9cce68824c4d7e7630b5a1d339658d02" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "core-foundation 0.9.4", "core-foundation-sys", "libc", @@ -4140,11 +4111,11 @@ dependencies = [ [[package]] name = "security-framework" -version = "3.2.0" +version = "3.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "271720403f46ca04f7ba6f55d438f8bd878d6b8ca0a1046e8228c4145bcbb316" +checksum = "cc198e42d9b7510827939c9a15f5062a0c913f3371d765977e586d2fe6c16f4a" dependencies = [ - "bitflags 2.9.0", + "bitflags 2.9.4", "core-foundation 0.10.1", "core-foundation-sys", "libc", @@ -4153,9 +4124,9 @@ dependencies = [ [[package]] name = "security-framework-sys" -version = "2.14.0" +version = "2.15.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "49db231d56a190491cb4aeda9527f1ad45345af50b0851622a7adb8c03b01c32" +checksum = "cc1f0cbffaac4852523ce30d8bd3c5cdc873501d96ff467ca09b6767bb8cd5c0" dependencies = [ "core-foundation-sys", "libc", @@ -4163,50 +4134,62 @@ dependencies = [ [[package]] name = "semver" -version = "1.0.26" +version = "1.0.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "56e6fa9c48d24d85fb3de5ad847117517440f6beceb7798af16b4a87d616b8d0" +checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2" [[package]] name = "serde" -version = "1.0.219" +version = "1.0.227" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5f0e2c6ed6606019b4e29e69dbaba95b11854410e5347d525002456dbbb786b6" +checksum = "80ece43fc6fbed4eb5392ab50c07334d3e577cbf40997ee896fe7af40bba4245" +dependencies = [ + "serde_core", + "serde_derive", +] + +[[package]] +name = "serde_core" +version = "1.0.227" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7a576275b607a2c86ea29e410193df32bc680303c82f31e275bbfcafe8b33be5" dependencies = [ "serde_derive", ] [[package]] name = "serde_derive" -version = "1.0.219" +version = "1.0.227" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5b0276cf7f2c73365f7157c8123c21cd9a50fbbd844757af28ca1f5925fc2a00" +checksum = "51e694923b8824cf0e9b382adf0f60d4e05f348f357b38833a3fa5ed7c2ede04" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "serde_json" -version = "1.0.140" +version = "1.0.145" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "20068b6e96dc6c9bd23e01df8827e6c7e1f2fddd43c21810382803c136b99373" +checksum = "402a6f66d8c709116cf22f558eab210f5a50187f702eb4d7e5ef38d9a7f1c79c" dependencies = [ "itoa", "memchr", "ryu", "serde", + "serde_core", ] [[package]] name = "serde_path_to_error" -version = "0.1.17" +version = "0.1.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "59fab13f937fa393d08645bf3a84bdfe86e296747b506ada67bb15f10f218b2a" +checksum = "10a9ff822e371bb5403e391ecd83e182e0e77ba7f6fe0160b795797109d1b457" dependencies = [ "itoa", "serde", + "serde_core", ] [[package]] @@ -4240,9 +4223,9 @@ checksum = "bbfa15b3dddfee50a0fff136974b3e1bde555604ba463834a7eb7deb6417705d" [[package]] name = "sha2" -version = "0.10.8" +version = "0.10.9" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "793db75ad2bcafc3ffa7c68b215fee268f537982cd901d132f89c6343f3a3dc8" +checksum = "a7507d819769d01a365ab707794a4084392c824f54a7a6a7862f8c3d0892b283" dependencies = [ "cfg-if", "cpufeatures", @@ -4266,9 +4249,9 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64" [[package]] name = "signal-hook-registry" -version = "1.4.2" +version = "1.4.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a9e9e0b4211b72e7b8b6e85c807d36c212bdb33ea8587f7569562a84df5465b1" +checksum = "b2a4719bff48cee6b39d12c020eeb490953ad2443b7055bd0b21fca26bd8c28b" dependencies = [ "libc", ] @@ -4301,24 +4284,21 @@ checksum = "297f631f50729c8c99b84667867963997ec0b50f32b2a7dbcab828ef0541e8bb" dependencies = [ "num-bigint", "num-traits", - "thiserror 2.0.12", + "thiserror 2.0.16", "time", ] [[package]] name = "slab" -version = "0.4.9" +version = "0.4.11" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8f92a496fb766b417c996b9c5e57daf2f7ad3b0bebe1ccfca4856390e3d3bb67" -dependencies = [ - "autocfg", -] +checksum = "7a2ae44ef20feb57a68b23d846850f861394c2e02dc425a50098ae8c90267589" [[package]] name = "smallvec" -version = "1.15.0" +version = "1.15.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8917285742e9f3e1683f0a9c4e6b57960b7314d0b08d30d1ecd426713ee2eee9" +checksum = "67b1b7a3b5fe4f1376887184045fcf45c69e92af734b7aaddc05fb777b6fbd03" [[package]] name = "socket2" @@ -4332,14 +4312,24 @@ dependencies = [ [[package]] name = "socket2" -version = "0.5.9" +version = "0.5.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4f5fd57c80058a56cf5c777ab8a126398ece8e442983605d280a44ce79d0edef" +checksum = "e22376abed350d73dd1cd119b57ffccad95b4e585a7cda43e286245ce23c0678" dependencies = [ "libc", "windows-sys 0.52.0", ] +[[package]] +name = "socket2" +version = "0.6.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "233504af464074f9d066d7b5416c5f9b894a5862a6506e306f7b816cdd6f1807" +dependencies = [ + "libc", + "windows-sys 0.59.0", +] + [[package]] name = "spin" version = "0.5.2" @@ -4420,7 +4410,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap 2.9.0", + "indexmap 2.11.4", "log", "memchr", "once_cell", @@ -4489,7 +4479,7 @@ checksum = "1ed31390216d20e538e447a7a9b959e06ed9fc51c37b514b46eb758016ecd418" dependencies = [ "atoi", "base64 0.21.7", - "bitflags 2.9.0", + "bitflags 2.9.4", "byteorder", "bytes", "chrono", @@ -4533,7 +4523,7 @@ checksum = "7c824eb80b894f926f89a0b9da0c7f435d27cdd35b8c655b114e58223918577e" dependencies = [ "atoi", "base64 0.21.7", - "bitflags 2.9.0", + "bitflags 2.9.4", "byteorder", "chrono", "crc", @@ -4626,9 +4616,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.100" +version = "2.0.106" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b09a44accad81e1ba1cd74a32461ba89dee89095ba17b32f5d03683b1b1fc2a0" +checksum = "ede7c438028d4436d71104916910f5bb611972c5cfd7f89b8300a8186e6fada6" dependencies = [ "proc-macro2", "quote", @@ -4643,13 +4633,13 @@ checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" [[package]] name = "synstructure" -version = "0.13.1" +version = "0.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8af7666ab7b6390ab78131fb5b0fce11d6b7a6951602017c35fa82800708971" +checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -4687,15 +4677,15 @@ dependencies = [ [[package]] name = "tempfile" -version = "3.19.1" +version = "3.23.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7437ac7763b9b123ccf33c338a5cc1bac6f69b45a136c19bdd8a65e3916435bf" +checksum = "2d31c77bdf42a745371d260a26ca7163f1e0924b64afa0b688e61b5a9fa02f16" dependencies = [ "fastrand 2.3.0", - "getrandom 0.3.2", + "getrandom 0.3.3", "once_cell", - "rustix 1.0.8", - "windows-sys 0.59.0", + "rustix 1.1.2", + "windows-sys 0.61.1", ] [[package]] @@ -4709,11 +4699,11 @@ dependencies = [ [[package]] name = "thiserror" -version = "2.0.12" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "567b8a2dae586314f7be2a752ec7474332959c6460e02bde30d702a66d488708" +checksum = "3467d614147380f2e4e374161426ff399c91084acd2363eaf549172b3d5e60c0" dependencies = [ - "thiserror-impl 2.0.12", + "thiserror-impl 2.0.16", ] [[package]] @@ -4724,18 +4714,18 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "thiserror-impl" -version = "2.0.12" +version = "2.0.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7f7cf42b4507d8ea322120659672cf1b9dbb93f8f2d4ecfd6e51350ff5b17a1d" +checksum = "6c5e1be1c48b9172ee610da68fd9cd2770e7a4056cb3fc98710ee6906f0c7960" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -4749,9 +4739,9 @@ dependencies = [ [[package]] name = "time" -version = "0.3.41" +version = "0.3.44" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a7619e19bc266e0f9c5e6686659d394bc57973859340060a69221e57dbc0c40" +checksum = "91e7d9e3bb61134e77bde20dd4825b97c010155709965fedf0f49bb138e52a9d" dependencies = [ "deranged", "itoa", @@ -4764,15 +4754,15 @@ dependencies = [ [[package]] name = "time-core" -version = "0.1.4" +version = "0.1.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9e9a38711f559d9e3ce1cdb06dd7c5b8ea546bc90052da6d06bb76da74bb07c" +checksum = "40868e7c1d2f0b8d73e4a8c7f0ff63af4f6d19be117e90bd73eb1d62cf831c6b" [[package]] name = "time-macros" -version = "0.2.22" +version = "0.2.24" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3526739392ec93fd8b359c8e98514cb3e8e021beb4e5f597b00a0221f8ed8a49" +checksum = "30cfb0125f12d9c277f35663a0a33f8c30190f4e4574868a330595412d34ebf3" dependencies = [ "num-conv", "time-core", @@ -4780,9 +4770,9 @@ dependencies = [ [[package]] name = "tinystr" -version = "0.7.6" +version = "0.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9117f5d4db391c1cf6927e7bea3db74b9a1c1add8f7eda9ffd5364f40f57b82f" +checksum = "5d4f6d1145dcb577acf783d4e601bc1d76a13337bb54e6233add580b07344c8b" dependencies = [ "displaydoc", "zerovec", @@ -4790,9 +4780,9 @@ dependencies = [ [[package]] name = "tinyvec" -version = "1.9.0" +version = "1.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "09b3661f17e86524eccd4371ab0429194e0d7c008abb45f7a7495b1719463c71" +checksum = "bfa5fdc3bce6191a1dbc8c02d5c8bffcf557bafa17c124c5264a458f1b0613fa" dependencies = [ "tinyvec_macros", ] @@ -4805,27 +4795,29 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.44.2" +version = "1.47.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e6b88822cbe49de4185e3a4cbf8321dd487cf5fe0c5c65695fef6346371e9c48" +checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" dependencies = [ "backtrace", "bytes", + "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "socket2 0.5.9", + "slab", + "socket2 0.6.0", "tokio-macros", - "windows-sys 0.52.0", + "windows-sys 0.59.0", ] [[package]] name = "tokio-io-timeout" -version = "1.2.0" +version = "1.2.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30b74022ada614a1b4834de765f9bb43877f910cc8ce4be40e89042c9223a8bf" +checksum = "0bd86198d9ee903fedd2f9a2e72014287c0d9167e4ae43b5853007205dda1b76" dependencies = [ "pin-project-lite", "tokio", @@ -4839,7 +4831,7 @@ checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -4887,11 +4879,11 @@ dependencies = [ [[package]] name = "tokio-rustls" -version = "0.26.2" +version = "0.26.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8e727b36a1a0e8b74c376ac2211e40c2c8af09fb4013c60d910495810f008e9b" +checksum = "1729aa945f29d91ba541258c8df89027d5792d85a8841fb65e8bf0f4ede4ef61" dependencies = [ - "rustls 0.23.28", + "rustls 0.23.32", "tokio", ] @@ -4921,9 +4913,9 @@ dependencies = [ [[package]] name = "tokio-util" -version = "0.7.14" +version = "0.7.16" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6b9590b93e6fcc1739458317cccd391ad3955e2bde8913edf6f95f9e65a8f034" +checksum = "14307c986784f72ef81c89db7d9e28d6ac26d16213b109ea501696195e6e3ce5" dependencies = [ "bytes", "futures-core", @@ -4944,7 +4936,7 @@ dependencies = [ "base64 0.21.7", "bytes", "flate2", - "h2 0.3.26", + "h2 0.3.27", "http 0.2.12", "http-body 0.4.6", "hyper 0.14.32", @@ -4973,7 +4965,7 @@ dependencies = [ "proc-macro2", "prost-build", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -5071,20 +5063,20 @@ dependencies = [ [[package]] name = "tracing-attributes" -version = "0.1.28" +version = "0.1.30" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "395ae124c09f9e6918a2310af6038fba074bcf474ac352496d5910dd59a2226d" +checksum = "81383ab64e72a7a8b8e13130c49e3dab29def6d0c7d76a03087b3cf71c5c6903" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "tracing-core" -version = "0.1.33" +version = "0.1.34" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e672c95779cf947c5311f83787af4fa8fffd12fb27e4993211a84bdfd9610f9c" +checksum = "b9d12581f227e93f094d3af2ae690a574abb8a2b9b7a96e7cfe9647b2b617678" dependencies = [ "once_cell", "valuable", @@ -5113,14 +5105,14 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.19" +version = "0.3.20" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e8189decb5ac0fa7bc8b96b7cb9b2701d60d48805aca84a238004d665fcc4008" +checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" dependencies = [ "matchers", "nu-ansi-term", "once_cell", - "regex", + "regex-automata", "serde", "serde_json", "sharded-slab", @@ -5152,9 +5144,9 @@ checksum = "5c1cb5db39152898a79168971543b1cb5020dff7fe43c8dc468b0885f5e29df5" [[package]] name = "unicode-ident" -version = "1.0.18" +version = "1.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5a5f39404a5da50712a4c1eecf25e90dd62b613502b7e925fd4e4d19b5c96512" +checksum = "f63a545481291138910575129486daeaf8ac54aee4387fe7906919f7830c7d9d" [[package]] name = "unicode-normalization" @@ -5213,9 +5205,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.4" +version = "2.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "32f8b686cadd1473f4bd0117a5d28d36b1ade384ea9b5069a1c40aefed7fda60" +checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" dependencies = [ "form_urlencoded", "idna", @@ -5229,12 +5221,6 @@ version = "2.1.3" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "daf8dba3b7eb870caf1ddeed7bc9d2a049f3cfdfae7cb521b087cc33ae4c49da" -[[package]] -name = "utf16_iter" -version = "1.0.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8232dd3cdaed5356e0f716d285e4b40b932ac434100fe9b7e0e8e935b9e6246" - [[package]] name = "utf8_iter" version = "1.0.4" @@ -5243,12 +5229,14 @@ checksum = "b6c140620e7ffbb22c2dee59cafe6084a59b5ffc27a8859a5f0d494b5d52b6be" [[package]] name = "uuid" -version = "1.16.0" +version = "1.18.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "458f7a779bf54acc9f347480ac654f68407d3aab21269a6e3c9f922acd9e2da9" +checksum = "2f87b8aa10b915a06587d0dec516c282ff295b475d94abf425d62b57710070a2" dependencies = [ - "getrandom 0.3.2", + "getrandom 0.3.3", + "js-sys", "serde", + "wasm-bindgen", ] [[package]] @@ -5292,17 +5280,26 @@ dependencies = [ [[package]] name = "wasi" -version = "0.11.0+wasi-snapshot-preview1" +version = "0.11.1+wasi-snapshot-preview1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9c8d87e72b64a3b4db28d11ce29237c246188f4f51057d65a7eab63b7987e423" +checksum = "ccf3ec651a847eb01de73ccad15eb7d99f80485de043efb2f370cd654f4ea44b" [[package]] name = "wasi" -version = "0.14.2+wasi-0.2.4" +version = "0.14.7+wasi-0.2.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9683f9a5a998d873c0d21fcbe3c083009670149a8fab228644b8bd36b2c48cb3" +checksum = "883478de20367e224c0090af9cf5f9fa85bed63a95c1abf3afc5c083ebc06e8c" dependencies = [ - "wit-bindgen-rt", + "wasip2", +] + +[[package]] +name = "wasip2" +version = "1.0.1+wasi-0.2.4" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0562428422c63773dad2c345a1882263bbf4d65cf3f42e90921f787ef5ad58e7" +dependencies = [ + "wit-bindgen", ] [[package]] @@ -5313,35 +5310,36 @@ checksum = "b8dad83b4f25e74f184f64c43b150b91efe7647395b42289f38e50566d82855b" [[package]] name = "wasm-bindgen" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1edc8929d7499fc4e8f0be2262a241556cfc54a0bea223790e71446f2aab1ef5" +checksum = "c1da10c01ae9f1ae40cbfac0bac3b1e724b320abfcf52229f80b547c0d250e2d" dependencies = [ "cfg-if", "once_cell", "rustversion", "wasm-bindgen-macro", + "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-backend" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2f0a0651a5c2bc21487bde11ee802ccaf4c51935d0d3d42a6101f98161700bc6" +checksum = "671c9a5a66f49d8a47345ab942e2cb93c7d1d0339065d4f8139c486121b43b19" dependencies = [ "bumpalo", "log", "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-futures" -version = "0.4.50" +version = "0.4.54" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "555d470ec0bc3bb57890405e5d4322cc9ea83cebb085523ced7be4144dac1e61" +checksum = "7e038d41e478cc73bae0ff9b36c60cff1c98b8f38f8d7e8061e79ee63608ac5c" dependencies = [ "cfg-if", "js-sys", @@ -5352,9 +5350,9 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7fe63fc6d09ed3792bd0897b314f53de8e16568c2b3f7982f468c0bf9bd0b407" +checksum = "7ca60477e4c59f5f2986c50191cd972e3a50d8a95603bc9434501cf156a9a119" dependencies = [ "quote", "wasm-bindgen-macro-support", @@ -5362,22 +5360,22 @@ dependencies = [ [[package]] name = "wasm-bindgen-macro-support" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8ae87ea40c9f689fc23f209965b6fb8a99ad69aeeb0231408be24920604395de" +checksum = "9f07d2f20d4da7b26400c9f4a0511e6e0345b040694e8a75bd41d578fa4421d7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "wasm-bindgen-backend", "wasm-bindgen-shared", ] [[package]] name = "wasm-bindgen-shared" -version = "0.2.100" +version = "0.2.104" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a05d73b933a847d6cccdda8f838a22ff101ad9bf93e33684f39c1f5f0eece3d" +checksum = "bad67dc8b2a1a6e5448428adec4c3e84c43e561d8c9ee8a9e5aabeb193ec41d1" dependencies = [ "unicode-ident", ] @@ -5397,9 +5395,9 @@ dependencies = [ [[package]] name = "web-sys" -version = "0.3.77" +version = "0.3.81" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "33b6dd2ef9186f1f2072e409e99cd22a975331a6b3591b12c764e0e55c60d5d2" +checksum = "9367c417a924a74cae129e6a2ae3b47fabb1f8995595ab474029da749a8be120" dependencies = [ "js-sys", "wasm-bindgen", @@ -5430,25 +5428,13 @@ version = "0.25.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "5f20c57d8d7db6d3b86154206ae5d8fba62dd39573114de97c2cb0578251f8e1" -[[package]] -name = "which" -version = "4.4.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "87ba24419a2078cd2b0f2ede2691b6c66d8e47836da3b6db8265ebad47afbfc7" -dependencies = [ - "either", - "home", - "once_cell", - "rustix 0.38.44", -] - [[package]] name = "whoami" -version = "1.6.0" +version = "1.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6994d13118ab492c3c80c1f81928718159254c53c472bf9ce36f8dae4add02a7" +checksum = "5d4a4db5077702ca3015d3d02d74974948aba2ad9e12ab7df718ee64ccd7e97d" dependencies = [ - "redox_syscall", + "libredox", "wasite", ] @@ -5476,9 +5462,9 @@ checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f" [[package]] name = "windows-core" -version = "0.61.0" +version = "0.62.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "4763c1de310c86d75a878046489e2e5ba02c649d185f21c67d4cf8a56d098980" +checksum = "6844ee5416b285084d3d3fffd743b925a6c9385455f64f6d4fa3031c4c2749a9" dependencies = [ "windows-implement", "windows-interface", @@ -5489,46 +5475,46 @@ dependencies = [ [[package]] name = "windows-implement" -version = "0.60.0" +version = "0.60.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a47fddd13af08290e67f4acabf4b459f647552718f683a7b415d290ac744a836" +checksum = "edb307e42a74fb6de9bf3a02d9712678b22399c87e6fa869d6dfcd8c1b7754e0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "windows-interface" -version = "0.59.1" +version = "0.59.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd9211b69f8dcdfa817bfd14bf1c97c9188afa36f4750130fcdf3f400eca9fa8" +checksum = "c0abd1ddbc6964ac14db11c7213d6532ef34bd9aa042c2e5935f59d7908b46a5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] name = "windows-link" -version = "0.1.1" +version = "0.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38" +checksum = "45e46c0661abb7180e7b9c281db115305d49ca1709ab8242adf09666d2173c65" [[package]] name = "windows-result" -version = "0.3.2" +version = "0.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c64fd11a4fd95df68efcfee5f44a294fe71b8bc6a91993e2791938abcc712252" +checksum = "7084dcc306f89883455a206237404d3eaf961e5bd7e0f312f7c91f57eb44167f" dependencies = [ "windows-link", ] [[package]] name = "windows-strings" -version = "0.4.0" +version = "0.5.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a2ba9642430ee452d5a7aa78d72907ebe8cfda358e8cb7918a2050581322f97" +checksum = "7218c655a553b0bed4426cf54b20d7ba363ef543b52d515b3e48d7fd55318dda" dependencies = [ "windows-link", ] @@ -5562,11 +5548,11 @@ dependencies = [ [[package]] name = "windows-sys" -version = "0.60.2" +version = "0.61.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2f500e4d28234f72040990ec9d39e3a6b950f9f22d3dba18416c35882612bcb" +checksum = "6f109e41dd4a3c848907eb83d5a42ea98b3769495597450cf6d153507b166f0f" dependencies = [ - "windows-targets 0.53.2", + "windows-link", ] [[package]] @@ -5602,10 +5588,11 @@ dependencies = [ [[package]] name = "windows-targets" -version = "0.53.2" +version = "0.53.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c66f69fcc9ce11da9966ddb31a40968cad001c5bedeb5c2b82ede4253ab48aef" +checksum = "2d42b7b7f66d2a06854650af09cfdf8713e427a439c97ad65a6375318033ac4b" dependencies = [ + "windows-link", "windows_aarch64_gnullvm 0.53.0", "windows_aarch64_msvc 0.53.0", "windows_i686_gnu 0.53.0", @@ -5765,25 +5752,16 @@ dependencies = [ ] [[package]] -name = "wit-bindgen-rt" -version = "0.39.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6f42320e61fe2cfd34354ecb597f86f413484a798ba44a8ca1165c58d42da6c1" -dependencies = [ - "bitflags 2.9.0", -] - -[[package]] -name = "write16" -version = "1.0.0" +name = "wit-bindgen" +version = "0.46.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d1890f4022759daae28ed4fe62859b1236caebfc61ede2f63ed4e695f3f6d936" +checksum = "f17a85883d4e6d00e8a97c586de764dabcc06133f7f1d55dce5cdc070ad7fe59" [[package]] name = "writeable" -version = "0.5.5" +version = "0.6.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e9df38ee2d2c3c5948ea468a8406ff0db0b29ae1ffde1bcf20ef305bcc95c51" +checksum = "ea2f10b9bb0928dfb1b42b65e1f9e36f7f54dbdf08457afefb38afcdec4fa2bb" [[package]] name = "x509-cert" @@ -5809,7 +5787,7 @@ dependencies = [ "nom", "oid-registry", "rusticata-macros", - "thiserror 2.0.12", + "thiserror 2.0.16", "time", ] @@ -5821,9 +5799,9 @@ checksum = "66fee0b777b0f5ac1c69bb06d361268faafa61cd4682ae064a171c16c433e9e4" [[package]] name = "yoke" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "120e6aef9aa629e3d4f52dc8cc43a015c7724194c97dfaf45180d2daf2b77f40" +checksum = "5f41bb01b8226ef4bfd589436a297c53d118f65921786300e427be8d487695cc" dependencies = [ "serde", "stable_deref_trait", @@ -5833,54 +5811,34 @@ dependencies = [ [[package]] name = "yoke-derive" -version = "0.7.5" +version = "0.8.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2380878cad4ac9aac1e2435f3eb4020e8374b5f13c296cb75b4620ff8e229154" +checksum = "38da3c9736e16c5d3c8c597a9aaa5d1fa565d0532ae05e27c24aa62fb32c0ab6" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "synstructure", ] [[package]] name = "zerocopy" -version = "0.7.35" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1b9b4fd18abc82b8136838da5d50bae7bdea537c574d8dc1a34ed098d6c166f0" +checksum = "0894878a5fa3edfd6da3f88c4805f4c8558e2b996227a3d864f47fe11e38282c" dependencies = [ - "zerocopy-derive 0.7.35", -] - -[[package]] -name = "zerocopy" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2586fea28e186957ef732a5f8b3be2da217d65c5969d4b1e17f973ebbe876879" -dependencies = [ - "zerocopy-derive 0.8.24", + "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.7.35" +version = "0.8.27" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fa4f8080344d4671fb4e831a13ad1e68092748387dfc4f55e356242fae12ce3e" +checksum = "88d2b8d9c68ad2b9e4340d7832716a4d21a22a1154777ad56ea55c51a9cf3831" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", -] - -[[package]] -name = "zerocopy-derive" -version = "0.8.24" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a996a8f63c5c4448cd959ac1bab0aaa3306ccfd060472f85943ee0750f0169be" -dependencies = [ - "proc-macro2", - "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -5900,7 +5858,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", "synstructure", ] @@ -5910,11 +5868,22 @@ version = "1.8.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ced3678a2879b30306d323f4542626697a464a97c0a07c9aebf7ebca65cd4dde" +[[package]] +name = "zerotrie" +version = "0.2.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "36f0bbd478583f79edad978b407914f61b2972f5af6fa089686016be8f9af595" +dependencies = [ + "displaydoc", + "yoke", + "zerofrom", +] + [[package]] name = "zerovec" -version = "0.10.4" +version = "0.11.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "aa2b893d79df23bfb12d5461018d408ea19dfafe76c2c7ef6d4eba614f8ff079" +checksum = "e7aa2bd55086f1ab526693ecbe444205da57e25f4489879da80635a46d90e73b" dependencies = [ "yoke", "zerofrom", @@ -5923,13 +5892,13 @@ dependencies = [ [[package]] name = "zerovec-derive" -version = "0.10.3" +version = "0.11.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6eafa6dfb17584ea3e2bd6e76e0cc15ad7af12b09abdd1ca55961bed9b1063c6" +checksum = "5b96237efa0c878c64bd89c436f661be4e46b2f3eff1ebb976f7ef2321d2f58f" dependencies = [ "proc-macro2", "quote", - "syn 2.0.100", + "syn 2.0.106", ] [[package]] @@ -5952,9 +5921,9 @@ dependencies = [ [[package]] name = "zstd-sys" -version = "2.0.15+zstd.1.5.7" +version = "2.0.16+zstd.1.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "eb81183ddd97d0c74cedf1d50d85c8d08c1b8b68ee863bdee9e706eedba1a237" +checksum = "91e19ebc2adc8f83e43039e79776e3fda8ca919132d68a1fed6a5faca2683748" dependencies = [ "cc", "pkg-config", diff --git a/proto/sync.proto b/proto/sync.proto index 4f53d54..7e8dcec 100755 --- a/proto/sync.proto +++ b/proto/sync.proto @@ -34,6 +34,9 @@ service SyncService { rpc DeleteFile(DeleteFileRequest) returns (DeleteFileResponse); rpc DownloadFile(DownloadFileRequest) returns (DownloadFileResponse); rpc ListFiles(ListFilesRequest) returns (ListFilesResponse); + // Streaming variants for large payloads (hybrid approach) + rpc UploadFileStream(stream UploadFileChunk) returns (UploadFileResponse); + rpc DownloadFileStream(DownloadFileRequest) returns (stream DownloadFileChunk); // 파일 업데이트 알림 스트리밍 rpc SubscribeToFileUpdates(SubscribeRequest) returns (stream FileUpdateNotification); // 파일 검색 기능 @@ -374,6 +377,28 @@ message UploadFileResponse { string return_message = 4; } +// Client-streaming upload chunk (first chunk carries metadata) +message UploadFileChunk { + // File metadata (should be populated in the first chunk) + string account_hash = 1; + string device_hash = 2; + int32 group_id = 3; + int32 watcher_id = 4; + string filename = 5; + string file_path = 6; + string auth_token = 7; + uint64 file_size = 8; // optional: declared total size + string file_hash = 9; // optional: client precomputed hash + bool is_encrypted = 10; + string key_id = 11; + int64 revision = 12; + + // Chunk payload + bytes data = 13; + uint64 seq = 14; // sequential index starting at 0 + bool last = 15; // true for the final chunk +} + message DownloadFileRequest { string account_hash = 1; string device_hash = 2; @@ -397,6 +422,14 @@ message DownloadFileResponse { string key_id = 10; } +// Server-streaming download chunk +message DownloadFileChunk { + bytes data = 1; + uint64 seq = 2; + bool last = 3; + uint64 total_size = 4; // optional: sent on first chunk for convenience +} + message ListFilesRequest { string account_hash = 1; string device_hash = 2; diff --git a/src/server/service.rs b/src/server/service.rs index 9ebdcde..8a763a8 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -23,6 +23,7 @@ use crate::sync::{ SyncConfigurationRequest, SyncConfigurationResponse, UpdateDeviceInfoRequest, UpdateDeviceInfoResponse, UpdateWatcherGroupRequest, UpdateWatcherGroupResponse, UpdateWatcherPresetRequest, UpdateWatcherPresetResponse, UploadFileRequest, UploadFileResponse, + UploadFileChunk, DownloadFileChunk, ValidateTokenRequest, ValidateTokenResponse, VerifyLoginRequest, VerifyLoginResponse, VersionUpdateNotification, WatcherGroupUpdateNotification, WatcherPresetUpdateNotification, }; @@ -31,6 +32,7 @@ use futures::Stream; use futures::StreamExt; use std::pin::Pin; use std::sync::Arc; +use tokio::io::AsyncWriteExt; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; use tonic::{Request, Response, Status}; @@ -432,6 +434,59 @@ impl SyncService for SyncServiceImpl { self.file_handler.handle_upload_file(request).await } + // Client-streaming upload for large files + async fn upload_file_stream( + &self, + request: Request>, + ) -> Result, Status> { + let mut stream = request.into_inner(); + + let mut first_meta: Option = None; + let mut buffer: Vec = Vec::new(); + let mut expected_seq: u64 = 0; + + while let Some(chunk) = stream.message().await? { + if first_meta.is_none() { + first_meta = Some(chunk.clone()); + } + + if chunk.seq != expected_seq { + error!("upload_file_stream: out-of-order chunk: expected={}, got={}", expected_seq, chunk.seq); + return Err(Status::aborted("Out-of-order chunk sequence")); + } + expected_seq += 1; + + buffer.extend_from_slice(&chunk.data); + if chunk.last { + break; + } + } + + let meta = first_meta.ok_or_else(|| Status::invalid_argument("No chunks received"))?; + + // Reuse existing handler flow by constructing UploadFileRequest + let req = UploadFileRequest { + account_hash: meta.account_hash, + device_hash: meta.device_hash, + filename: meta.filename, + file_path: meta.file_path, + file_data: buffer, + file_hash: meta.file_hash, + auth_token: meta.auth_token, + group_id: meta.group_id, + watcher_id: meta.watcher_id, + is_encrypted: meta.is_encrypted, + revision: meta.revision, + updated_time: None, + file_size: meta.file_size, + key_id: meta.key_id, + }; + + self.file_handler + .handle_upload_file(Request::new(req)) + .await + } + async fn download_file( &self, request: Request, @@ -440,6 +495,72 @@ impl SyncService for SyncServiceImpl { self.file_handler.handle_download_file(request).await } + // Server-streaming download for large files + type DownloadFileStreamStream = Pin> + Send + 'static>>; + + async fn download_file_stream( + &self, + request: Request, + ) -> Result, Status> { + let req = request.into_inner(); + + // Use existing handler to validate and fetch metadata + full bytes once for now + // In future, switch storage.get_file_data_stream for real streaming from backend + let file_id = req.file_id; + let device_hash = req.device_hash.clone(); + + let verified = self + .app_state + .oauth + .verify_token(&req.auth_token) + .await + .map_err(|_| Status::unauthenticated("Invalid authentication"))?; + if !verified.valid { + return Err(Status::unauthenticated("Invalid authentication")); + } + + let file_info = self + .app_state + .file + .get_file_info(file_id) + .await + .map_err(|e| Status::internal(format!("Failed to get file info: {}", e)))? + .ok_or_else(|| Status::not_found("File not found"))?; + + let total_data = self + .app_state + .file + .get_file_data(file_id) + .await + .map_err(|e| Status::internal(format!("Failed to load data: {}", e)))? + .ok_or_else(|| Status::not_found("File data not found"))?; + + // Chunking in-memory for now (hybrid step 1) + let (tx, rx) = mpsc::channel(16); + let chunk_size: usize = 1024 * 1024; // 1MB + + tokio::spawn(async move { + let mut seq: u64 = 0; + let total_size = total_data.len() as u64; + for slice in total_data.chunks(chunk_size) { + let last = ((seq + 1) * chunk_size as u64) >= total_size; + let msg = DownloadFileChunk { + data: slice.to_vec(), + seq, + last, + total_size, + }; + if tx.send(Ok(msg)).await.is_err() { + break; + } + seq += 1; + } + }); + + let stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(stream) as Self::DownloadFileStreamStream)) + } + async fn list_files( &self, request: Request, From 18f0f727091c0425c13f446e68d36da1c43362d6 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 13:28:43 -0600 Subject: [PATCH 39/71] Fix http size issue --- Cargo.lock | 291 +++++++++++++++++++++++++---------------------------- Cargo.toml | 18 ++-- build.rs | 11 +- 3 files changed, 152 insertions(+), 168 deletions(-) mode change 100755 => 100644 Cargo.lock diff --git a/Cargo.lock b/Cargo.lock old mode 100755 new mode 100644 index 1be69a5..febd6c1 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 4 +version = 3 [[package]] name = "actix-codec" @@ -934,7 +934,7 @@ dependencies = [ "rustls-pki-types", "tokio", "tokio-rustls 0.26.4", - "tower 0.5.2", + "tower", "tracing", ] @@ -1058,18 +1058,16 @@ dependencies = [ [[package]] name = "axum" -version = "0.6.20" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3b829e4e32b91e643de6eafe82b1d90675f5874230191a4ffbc1b336dec4d6bf" +checksum = "021e862c184ae977658b36c4500f7feac3221ca5da43e3f25bd04ab6c79a29b5" dependencies = [ - "async-trait", "axum-core", - "bitflags 1.3.2", "bytes", "futures-util", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", "itoa", "matchit", "memchr", @@ -1078,25 +1076,27 @@ dependencies = [ "pin-project-lite", "rustversion", "serde", - "sync_wrapper", - "tower 0.4.13", + "sync_wrapper 1.0.2", + "tower", "tower-layer", "tower-service", ] [[package]] name = "axum-core" -version = "0.3.4" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "759fa577a247914fd3f7f76d62972792636412fbfd634cd452f6a385a74d2d2c" +checksum = "68464cd0412f486726fb3373129ef5d2993f90c34bc2bc1c1e9943b2f4fc7ca6" dependencies = [ - "async-trait", "bytes", - "futures-util", - "http 0.2.12", - "http-body 0.4.6", + "futures-core", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", "mime", + "pin-project-lite", "rustversion", + "sync_wrapper 1.0.2", "tower-layer", "tower-service", ] @@ -1501,6 +1501,8 @@ dependencies = [ "tonic", "tonic-build", "tonic-health", + "tonic-prost", + "tonic-prost-build", "tonic-reflection", "tracing", "tracing-actix-web", @@ -1924,9 +1926,9 @@ checksum = "1ced73b1dacfc750a6db6c0a0c3a3853c8b41997e2e2c563dc90804ae6867959" [[package]] name = "fixedbitset" -version = "0.4.2" +version = "0.5.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ce7134b9999ecaf8bcd65542e436736ef32ddca1b3e06094cb6ec5755203b80" +checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99" [[package]] name = "flagset" @@ -2190,7 +2192,7 @@ dependencies = [ "futures-sink", "futures-util", "http 0.2.12", - "indexmap 2.11.4", + "indexmap", "slab", "tokio", "tokio-util", @@ -2209,19 +2211,13 @@ dependencies = [ "futures-core", "futures-sink", "http 1.3.1", - "indexmap 2.11.4", + "indexmap", "slab", "tokio", "tokio-util", "tracing", ] -[[package]] -name = "hashbrown" -version = "0.12.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8a9ee70c43aaf417c914396645a0fa852624801b24ebb7ae78fe8272889ac888" - [[package]] name = "hashbrown" version = "0.14.5" @@ -2267,12 +2263,6 @@ dependencies = [ "unicode-segmentation", ] -[[package]] -name = "heck" -version = "0.5.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2304e00983f87ffb38b55b444b5e3b60a884b5d30c0fca7d82fe33449bbe55ea" - [[package]] name = "hermit-abi" version = "0.3.9" @@ -2435,6 +2425,7 @@ dependencies = [ "http 1.3.1", "http-body 1.0.1", "httparse", + "httpdate", "itoa", "pin-project-lite", "pin-utils", @@ -2478,14 +2469,15 @@ dependencies = [ [[package]] name = "hyper-timeout" -version = "0.4.1" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbb958482e8c7be4bc3cf272a766a2b0bf1a6755e7a6ae777f017a31d11b13b1" +checksum = "2b90d566bffbce6a75bd8b09a05aa8c2cb1fabb6cb348f8840c9e4c90a0d83b0" dependencies = [ - "hyper 0.14.32", + "hyper 1.7.0", + "hyper-util", "pin-project-lite", "tokio", - "tokio-io-timeout", + "tower-service", ] [[package]] @@ -2649,16 +2641,6 @@ version = "0.1.9" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "e8a5a9a0ff0086c7a148acb942baaabeadf9504d10400b5a05645853729b9cd2" -[[package]] -name = "indexmap" -version = "1.9.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bd070e393353796e801d209ad339e89596eb4c8d430d18ede6a1cced8fafbd99" -dependencies = [ - "autocfg", - "hashbrown 0.12.3", -] - [[package]] name = "indexmap" version = "2.11.4" @@ -2718,18 +2700,18 @@ checksum = "469fb0b9cefa57e3ef31275ee7cacb78f2fdca44e4765491884a2b119d4eb130" [[package]] name = "itertools" -version = "0.12.1" +version = "0.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569" +checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" dependencies = [ "either", ] [[package]] name = "itertools" -version = "0.13.0" +version = "0.14.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "413ee7dfc52ee1a4949ceeb7dbc8a33f2d6c088194d9f922fb8318faf1f01186" +checksum = "2b192c782037fadd9cfa75548310488aabdbf3d2da73885b31bd0abd03351285" dependencies = [ "either", ] @@ -2933,9 +2915,9 @@ dependencies = [ [[package]] name = "matchit" -version = "0.7.3" +version = "0.8.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94" +checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "md-5" @@ -3272,12 +3254,12 @@ checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" [[package]] name = "petgraph" -version = "0.6.5" +version = "0.7.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b4c5cc86750666a3ed20bdaf5ca2a0344f9c67674cae0515bec2da16fbaa47db" +checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" dependencies = [ "fixedbitset", - "indexmap 2.11.4", + "indexmap", ] [[package]] @@ -3489,9 +3471,9 @@ dependencies = [ [[package]] name = "prost" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "deb1435c188b76130da55f17a466d252ff7b1418b2ad3e037d127b94e3411f29" +checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" dependencies = [ "bytes", "prost-derive", @@ -3499,13 +3481,12 @@ dependencies = [ [[package]] name = "prost-build" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "22505a5c94da8e3b7c2996394d1c933236c4d743e81a410bcca4e6989fc066a4" +checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" dependencies = [ - "bytes", - "heck 0.5.0", - "itertools 0.12.1", + "heck", + "itertools 0.14.0", "log", "multimap", "once_cell", @@ -3513,6 +3494,8 @@ dependencies = [ "prettyplease", "prost", "prost-types", + "pulldown-cmark", + "pulldown-cmark-to-cmark", "regex", "syn 2.0.106", "tempfile", @@ -3520,12 +3503,12 @@ dependencies = [ [[package]] name = "prost-derive" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "81bddcdb20abf9501610992b6759a4c888aef7d1a7247ef75e2404275ac24af1" +checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" dependencies = [ "anyhow", - "itertools 0.12.1", + "itertools 0.14.0", "proc-macro2", "quote", "syn 2.0.106", @@ -3533,13 +3516,33 @@ dependencies = [ [[package]] name = "prost-types" -version = "0.12.6" +version = "0.14.1" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9091c90b0a32608e984ff2fa4091273cbdd755d54935c51d520887f4a1dbd5b0" +checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" dependencies = [ "prost", ] +[[package]] +name = "pulldown-cmark" +version = "0.13.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "1e8bbe1a966bd2f362681a44f6edce3c2310ac21e4d5067a6e7ec396297a6ea0" +dependencies = [ + "bitflags 2.9.4", + "memchr", + "unicase", +] + +[[package]] +name = "pulldown-cmark-to-cmark" +version = "21.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "e5b6a0769a491a08b31ea5c62494a8f144ee0987d86d670a8af4df1e1b7cde75" +dependencies = [ + "pulldown-cmark", +] + [[package]] name = "quote" version = "1.0.40" @@ -3730,7 +3733,7 @@ dependencies = [ "serde", "serde_json", "serde_urlencoded", - "sync_wrapper", + "sync_wrapper 0.1.2", "system-configuration", "tokio", "tokio-rustls 0.24.1", @@ -3886,20 +3889,6 @@ dependencies = [ "sct", ] -[[package]] -name = "rustls" -version = "0.22.4" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bf4ef73721ac7bcd79b2b315da7779d8fc09718c6b3d2d1b2d94850eb8c18432" -dependencies = [ - "log", - "ring 0.17.14", - "rustls-pki-types", - "rustls-webpki 0.102.8", - "subtle", - "zeroize", -] - [[package]] name = "rustls" version = "0.23.32" @@ -4002,17 +3991,6 @@ dependencies = [ "untrusted 0.9.0", ] -[[package]] -name = "rustls-webpki" -version = "0.102.8" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "64ca1bc8749bd4cf37b5ce386cc146580777b4e8572c7b97baf22c83f444bee9" -dependencies = [ - "ring 0.17.14", - "rustls-pki-types", - "untrusted 0.9.0", -] - [[package]] name = "rustls-webpki" version = "0.103.6" @@ -4410,7 +4388,7 @@ dependencies = [ "futures-util", "hashlink", "hex", - "indexmap 2.11.4", + "indexmap", "log", "memchr", "once_cell", @@ -4453,7 +4431,7 @@ checksum = "5833ef53aaa16d860e92123292f1f6a3d53c34ba8b1969f152ef1a7bb803f3c8" dependencies = [ "dotenvy", "either", - "heck 0.4.1", + "heck", "hex", "once_cell", "proc-macro2", @@ -4631,6 +4609,12 @@ version = "0.1.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "2047c6ded9c721764247e62cd3b03c09ffc529b2ba5b10ec482ae507a4a70160" +[[package]] +name = "sync_wrapper" +version = "1.0.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "0bf256ce5efdfa370213c1dabab5935a12e49f2c58d15e9eac2870d3b4f27263" + [[package]] name = "synstructure" version = "0.13.2" @@ -4813,16 +4797,6 @@ dependencies = [ "windows-sys 0.59.0", ] -[[package]] -name = "tokio-io-timeout" -version = "1.2.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0bd86198d9ee903fedd2f9a2e72014287c0d9167e4ae43b5853007205dda1b76" -dependencies = [ - "pin-project-lite", - "tokio", -] - [[package]] name = "tokio-macros" version = "2.5.0" @@ -4866,17 +4840,6 @@ dependencies = [ "tokio", ] -[[package]] -name = "tokio-rustls" -version = "0.25.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "775e0c0f0adb3a2f22a00c4745d728b479985fc15ee7ca6a2608388c5569860f" -dependencies = [ - "rustls 0.22.4", - "rustls-pki-types", - "tokio", -] - [[package]] name = "tokio-rustls" version = "0.26.4" @@ -4896,6 +4859,7 @@ dependencies = [ "futures-core", "pin-project-lite", "tokio", + "tokio-util", ] [[package]] @@ -4926,30 +4890,29 @@ dependencies = [ [[package]] name = "tonic" -version = "0.11.0" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "76c4eb7a4e9ef9d4763600161f12f5070b92a578e1b634db88a6887844c91a13" +checksum = "eb7613188ce9f7df5bfe185db26c5814347d110db17920415cf2fbcad85e7203" dependencies = [ - "async-stream", "async-trait", "axum", - "base64 0.21.7", + "base64 0.22.1", "bytes", "flate2", - "h2 0.3.27", - "http 0.2.12", - "http-body 0.4.6", - "hyper 0.14.32", + "h2 0.4.12", + "http 1.3.1", + "http-body 1.0.1", + "http-body-util", + "hyper 1.7.0", "hyper-timeout", + "hyper-util", "percent-encoding", "pin-project", - "prost", - "rustls-pemfile 2.2.0", - "rustls-pki-types", + "socket2 0.6.0", + "sync_wrapper 1.0.2", "tokio", - "tokio-rustls 0.25.0", "tokio-stream", - "tower 0.4.13", + "tower", "tower-layer", "tower-service", "tracing", @@ -4957,56 +4920,82 @@ dependencies = [ [[package]] name = "tonic-build" -version = "0.10.2" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9d021fc044c18582b9a2408cd0dd05b1596e3ecdb5c4df822bb0183545683889" +checksum = "4c40aaccc9f9eccf2cd82ebc111adc13030d23e887244bc9cfa5d1d636049de3" dependencies = [ "prettyplease", "proc-macro2", - "prost-build", "quote", "syn 2.0.106", ] [[package]] name = "tonic-health" -version = "0.11.0" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2cef6e24bc96871001a7e48e820ab240b3de2201e59b517cf52835df2f1d2350" +checksum = "2a82868bf299e0a1d2e8dce0dc33a46c02d6f045b2c1f1d6cc8dc3d0bf1812ef" dependencies = [ - "async-stream", "prost", "tokio", "tokio-stream", "tonic", + "tonic-prost", +] + +[[package]] +name = "tonic-prost" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" +dependencies = [ + "bytes", + "prost", + "tonic", +] + +[[package]] +name = "tonic-prost-build" +version = "0.14.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "b4a16cba4043dc3ff43fcb3f96b4c5c154c64cbd18ca8dce2ab2c6a451d058a2" +dependencies = [ + "prettyplease", + "proc-macro2", + "prost-build", + "prost-types", + "quote", + "syn 2.0.106", + "tempfile", + "tonic-build", ] [[package]] name = "tonic-reflection" -version = "0.11.0" +version = "0.14.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "548c227bd5c0fae5925812c4ec6c66ffcfced23ea370cb823f4d18f0fc1cb6a7" +checksum = "34da53e8387581d66db16ff01f98a70b426b091fdf76856e289d5c1bd386ed7b" dependencies = [ "prost", "prost-types", "tokio", "tokio-stream", "tonic", + "tonic-prost", ] [[package]] name = "tower" -version = "0.4.13" +version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c" +checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 1.9.3", - "pin-project", + "indexmap", "pin-project-lite", - "rand 0.8.5", "slab", + "sync_wrapper 1.0.2", "tokio", "tokio-util", "tower-layer", @@ -5014,16 +5003,6 @@ dependencies = [ "tracing", ] -[[package]] -name = "tower" -version = "0.5.2" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" -dependencies = [ - "tower-layer", - "tower-service", -] - [[package]] name = "tower-layer" version = "0.3.3" @@ -5136,6 +5115,12 @@ version = "1.18.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "1dccffe3ce07af9386bfd29e80c0ab1a8205a2fc34e4bcd40364df902cfa8f3f" +[[package]] +name = "unicase" +version = "2.8.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" + [[package]] name = "unicode-bidi" version = "0.3.18" diff --git a/Cargo.toml b/Cargo.toml index a9920fe..cf18c14 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -35,12 +35,13 @@ tokio-util = { version = "0.7", features = ["codec"] } futures = "0.3.29" futures-util = "0.3.29" -# gRPC and Protocol Buffers with performance features -tonic = { version = "0.11", features = ["tls", "gzip"] } -tonic-reflection = { version = "0.11", optional = true } -tonic-health = "0.11" -prost = "0.12" -prost-types = "0.12" +# gRPC and Protocol Buffers with performance features (updated to 0.14.2) +tonic = { version = "0.14.2", features = ["transport", "gzip"] } +tonic-prost = "0.14.2" +tonic-reflection = { version = "0.14", optional = true } +tonic-health = "0.14" +prost = "0.14" +prost-types = "0.14" bytes = "1.5" # HTTP server with performance optimizations @@ -131,11 +132,12 @@ tokio-test = "0.4" # Build dependencies [build-dependencies] -tonic-build = { version = "0.10", features = ["prost"] } +tonic-build = "0.14.2" +tonic-prost-build = "0.14.2" # Feature flags for conditional compilation [features] -default = ["metrics", "compression", "reflection"] +default = ["metrics", "compression"] # Storage backends s3-storage = [] diff --git a/build.rs b/build.rs index 9956723..e6456c1 100755 --- a/build.rs +++ b/build.rs @@ -1,14 +1,11 @@ +// Use tonic-prost-build (tonic 0.14+) + fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=proto/sync.proto"); println!("cargo:rerun-if-changed=proto/health.proto"); - let out_dir = std::env::var("OUT_DIR")?; - let descriptor_path = std::path::PathBuf::from(&out_dir).join("sync_descriptor.bin"); - - tonic_build::configure() - .file_descriptor_set_path(&descriptor_path) - .extern_path(".google.protobuf.Timestamp", "::prost_types::Timestamp") - .compile(&["proto/sync.proto", "proto/health.proto"], &["proto"])?; + tonic_prost_build::compile_protos("proto/sync.proto")?; + tonic_prost_build::compile_protos("proto/health.proto")?; Ok(()) } From 2b6c5503cba652612440510988ec3ccd85b8dd23 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 13:30:53 -0600 Subject: [PATCH 40/71] Fix http size issue --- src/server/service.rs | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/src/server/service.rs b/src/server/service.rs index 8a763a8..ba23717 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -9,7 +9,7 @@ use crate::sync::{ BroadcastFileRestoreRequest, BroadcastFileRestoreResponse, CheckAuthStatusRequest, CheckAuthStatusResponse, CheckFileExistsRequest, CheckFileExistsResponse, DeleteDeviceRequest, DeleteDeviceResponse, DeleteFileRequest, DeleteFileResponse, DeleteWatcherGroupRequest, - DeleteWatcherGroupResponse, DeviceUpdateNotification, DownloadFileRequest, + DeleteWatcherGroupResponse, DeviceUpdateNotification, DownloadFileChunk, DownloadFileRequest, DownloadFileResponse, EncryptionKeyUpdateNotification, FileUpdateNotification, FindFileRequest, FindFileResponse, GetAccountInfoRequest, GetAccountInfoResponse, GetFileHistoryRequest, GetFileHistoryResponse, GetWatcherGroupRequest, GetWatcherGroupResponse, @@ -22,10 +22,10 @@ use crate::sync::{ RestoreFileVersionRequest, RestoreFileVersionResponse, SubscribeRequest, SyncConfigurationRequest, SyncConfigurationResponse, UpdateDeviceInfoRequest, UpdateDeviceInfoResponse, UpdateWatcherGroupRequest, UpdateWatcherGroupResponse, - UpdateWatcherPresetRequest, UpdateWatcherPresetResponse, UploadFileRequest, UploadFileResponse, - UploadFileChunk, DownloadFileChunk, - ValidateTokenRequest, ValidateTokenResponse, VerifyLoginRequest, VerifyLoginResponse, - VersionUpdateNotification, WatcherGroupUpdateNotification, WatcherPresetUpdateNotification, + UpdateWatcherPresetRequest, UpdateWatcherPresetResponse, UploadFileChunk, UploadFileRequest, + UploadFileResponse, ValidateTokenRequest, ValidateTokenResponse, VerifyLoginRequest, + VerifyLoginResponse, VersionUpdateNotification, WatcherGroupUpdateNotification, + WatcherPresetUpdateNotification, }; use base64::Engine as _; use futures::Stream; @@ -451,7 +451,10 @@ impl SyncService for SyncServiceImpl { } if chunk.seq != expected_seq { - error!("upload_file_stream: out-of-order chunk: expected={}, got={}", expected_seq, chunk.seq); + error!( + "upload_file_stream: out-of-order chunk: expected={}, got={}", + expected_seq, chunk.seq + ); return Err(Status::aborted("Out-of-order chunk sequence")); } expected_seq += 1; @@ -496,7 +499,8 @@ impl SyncService for SyncServiceImpl { } // Server-streaming download for large files - type DownloadFileStreamStream = Pin> + Send + 'static>>; + type DownloadFileStreamStream = + Pin> + Send + 'static>>; async fn download_file_stream( &self, @@ -558,7 +562,9 @@ impl SyncService for SyncServiceImpl { }); let stream = ReceiverStream::new(rx); - Ok(Response::new(Box::pin(stream) as Self::DownloadFileStreamStream)) + Ok(Response::new( + Box::pin(stream) as Self::DownloadFileStreamStream + )) } async fn list_files( From ad42771cf6470b54ff72970391848b32145bd6f1 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 13:44:31 -0600 Subject: [PATCH 41/71] Fix http size issue --- Dockerfile | 1 + 1 file changed, 1 insertion(+) diff --git a/Dockerfile b/Dockerfile index 6131157..a395005 100644 --- a/Dockerfile +++ b/Dockerfile @@ -12,6 +12,7 @@ RUN apt-get update && apt-get install -y \ pkg-config \ protobuf-compiler \ musl-tools \ + linux-headers-amd64 \ && rm -rf /var/lib/apt/lists/* # Enable target From 65c99f067b602ecf733f388d9b216281254c9c99 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 15:00:25 -0600 Subject: [PATCH 42/71] Fix http size issue --- Dockerfile | 30 +++++++++++++++--------------- 1 file changed, 15 insertions(+), 15 deletions(-) diff --git a/Dockerfile b/Dockerfile index a395005..8d2f35f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -5,19 +5,16 @@ FROM rust:slim AS builder ARG VCS_REF ARG BUILD_DATE ARG VERSION -ARG RUST_TARGET=x86_64-unknown-linux-musl +ARG RUST_TARGET=x86_64-unknown-linux-gnu -# Install build dependencies including protobuf compiler and musl toolchain +# Install build dependencies RUN apt-get update && apt-get install -y \ pkg-config \ protobuf-compiler \ - musl-tools \ - linux-headers-amd64 \ + build-essential \ + cmake \ && rm -rf /var/lib/apt/lists/* -# Enable target -RUN rustup target add ${RUST_TARGET} - # Create app directory WORKDIR /app @@ -27,27 +24,30 @@ COPY Cargo.toml Cargo.lock build.rs ./ # Copy proto files for gRPC compilation COPY proto ./proto +# Create dummy source files for dependency caching RUN mkdir -p src && echo "fn main() {}" > src/main.rs && echo "pub fn dummy() {}" > src/lib.rs + +# Build dependencies RUN cargo build --release --features redis-cache --target ${RUST_TARGET} +# Remove dummy files and copy real source RUN rm -f src/main.rs src/lib.rs COPY src ./src -RUN cargo clean +# Clean and rebuild with actual source +RUN cargo clean RUN cargo build --release --bin cosmic-sync-server --features redis-cache --target ${RUST_TARGET} -# Runtime stage -FROM gcr.io/distroless/static:nonroot +# Runtime stage - use base with glibc instead of static +FROM gcr.io/distroless/cc:nonroot WORKDIR /app -# Copy the binary from builder stage (musl static) -ARG RUST_TARGET=x86_64-unknown-linux-musl +# Copy the binary from builder stage +ARG RUST_TARGET=x86_64-unknown-linux-gnu COPY --from=builder /app/target/${RUST_TARGET}/release/cosmic-sync-server /app/cosmic-sync-server COPY config ./config USER nonroot:nonroot - EXPOSE 50051 8080 -# Distroless lacks curl; rely on container orchestrator health checks -ENTRYPOINT ["/app/cosmic-sync-server"] +ENTRYPOINT ["/app/cosmic-sync-server"] \ No newline at end of file From 266d29726a7c2a39de8e8cf7e70e46b278b4db78 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 16:46:15 -0600 Subject: [PATCH 43/71] Fix http size issue --- src/server/service.rs | 59 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 49 insertions(+), 10 deletions(-) diff --git a/src/server/service.rs b/src/server/service.rs index ba23717..aca7ad8 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -32,6 +32,7 @@ use futures::Stream; use futures::StreamExt; use std::pin::Pin; use std::sync::Arc; +use std::time::Duration; use tokio::io::AsyncWriteExt; use tokio::sync::mpsc; use tokio_stream::wrappers::ReceiverStream; @@ -783,12 +784,21 @@ impl SyncService for SyncServiceImpl { ) -> Result, Status> { debug!("Auth updates subscription request received"); - let (_tx, rx) = mpsc::channel(128); + let (tx, rx) = mpsc::channel(128); let stream = ReceiverStream::new(rx); - // TODO: 실제 사용자 인증 상태 모니터링 및 업데이트 로직 구현 + // Keep connection alive with periodic heartbeat tokio::spawn(async move { - // 향후 실제 이벤트 전송 로직 구현 + let mut interval = tokio::time::interval(Duration::from_secs(30)); + loop { + interval.tick().await; + // Send heartbeat or check if client is still connected + if tx.is_closed() { + debug!("Auth updates subscription closed by client"); + break; + } + // TODO: Send actual auth update events when implemented + } }); Ok(Response::new( @@ -802,12 +812,20 @@ impl SyncService for SyncServiceImpl { ) -> Result, Status> { debug!("Device updates subscription request received"); - let (_tx, rx) = mpsc::channel(128); + let (tx, rx) = mpsc::channel(128); let stream = ReceiverStream::new(rx); - // TODO: 실제 장치 업데이트 모니터링 구현 + // Keep connection alive with periodic heartbeat tokio::spawn(async move { - // 향후 실제 이벤트 전송 로직 구현 + let mut interval = tokio::time::interval(Duration::from_secs(30)); + loop { + interval.tick().await; + if tx.is_closed() { + debug!("Device updates subscription closed by client"); + break; + } + // TODO: Send actual device update events when implemented + } }); Ok(Response::new( @@ -821,12 +839,20 @@ impl SyncService for SyncServiceImpl { ) -> Result, Status> { debug!("Encryption key updates subscription request received"); - let (_tx, rx) = mpsc::channel(128); + let (tx, rx) = mpsc::channel(128); let stream = ReceiverStream::new(rx); - // TODO: 실제 암호화 키 업데이트 모니터링 구현 + // Keep connection alive with periodic heartbeat tokio::spawn(async move { - // 향후 실제 이벤트 전송 로직 구현 + let mut interval = tokio::time::interval(Duration::from_secs(30)); + loop { + interval.tick().await; + if tx.is_closed() { + debug!("Encryption key updates subscription closed by client"); + break; + } + // TODO: Send actual encryption key update events when implemented + } }); Ok(Response::new( @@ -1291,7 +1317,20 @@ impl SyncService for SyncServiceImpl { ) -> Result, Status> { debug!("Version updates subscription requested"); - let (_tx, rx) = mpsc::channel(128); + let (tx, rx) = mpsc::channel(128); + + // Keep connection alive with periodic heartbeat + tokio::spawn(async move { + let mut interval = tokio::time::interval(Duration::from_secs(30)); + loop { + interval.tick().await; + if tx.is_closed() { + debug!("Version updates subscription closed by client"); + break; + } + // TODO: Send actual version update events when implemented + } + }); // Create a stream from the receiver let stream = From 34e78d65801bec81e4f34ac63ba6d4b8de6ff74f Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 17:20:34 -0600 Subject: [PATCH 44/71] Fix http size issue --- .github/workflows/deploy-production.yml | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/.github/workflows/deploy-production.yml b/.github/workflows/deploy-production.yml index 6de52f9..0750d21 100644 --- a/.github/workflows/deploy-production.yml +++ b/.github/workflows/deploy-production.yml @@ -69,40 +69,40 @@ jobs: with: task-definition: ${{ steps.task-def.outputs.task-definition }} service: production-pop-os-cosmic-sync - cluster: pop-os-us-west-2 + cluster: genesis76-us-east-2 wait-for-service-stability: true - name: Check ECS Service Status run: | echo "Checking ECS service status..." aws ecs describe-services \ - --cluster pop-os-us-west-2 \ + --cluster genesis76-us-east-2 \ --services production-pop-os-cosmic-sync \ --query 'services[0].{Status:status,RunningCount:runningCount,PendingCount:pendingCount,DesiredCount:desiredCount}' echo "Getting recent ECS events..." aws ecs describe-services \ - --cluster pop-os-us-west-2 \ + --cluster genesis76-us-east-2 \ --services production-pop-os-cosmic-sync \ --query 'services[0].events[:10]' echo "Getting task details..." if aws ecs list-tasks --cluster pop-os-us-west-2 --service-name production-pop-os-cosmic-sync --query 'taskArns[0]' --output text 2>/dev/null; then TASK_ARN=$(aws ecs list-tasks \ - --cluster pop-os-us-west-2 \ + --cluster genesis76-us-east-2 \ --service-name production-pop-os-cosmic-sync \ --query 'taskArns[0]' --output text) if [ "$TASK_ARN" != "None" ] && [ "$TASK_ARN" != "" ]; then echo "Task ARN: $TASK_ARN" aws ecs describe-tasks \ - --cluster pop-os-us-west-2 \ + --cluster genesis76-us-east-2 \ --tasks $TASK_ARN \ --query 'tasks[0].{LastStatus:lastStatus,HealthStatus:healthStatus,CreatedAt:createdAt,StoppedReason:stoppedReason}' 2>/dev/null || echo "Could not get task details" echo "Getting container details..." aws ecs describe-tasks \ - --cluster pop-os-us-west-2 \ + --cluster genesis76-us-east-2 \ --tasks $TASK_ARN \ --query 'tasks[0].containers[?name==`app`].{Name:name,LastStatus:lastStatus,ExitCode:exitCode,Reason:reason}' 2>/dev/null || echo "Could not get container details" else From 7b12ca479436d1b4c12763b364b39acf41a0cbc3 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 20:55:09 -0600 Subject: [PATCH 45/71] Fix http size issue --- .github/workflows/deploy-production.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/deploy-production.yml b/.github/workflows/deploy-production.yml index 0750d21..75993cb 100644 --- a/.github/workflows/deploy-production.yml +++ b/.github/workflows/deploy-production.yml @@ -70,7 +70,7 @@ jobs: task-definition: ${{ steps.task-def.outputs.task-definition }} service: production-pop-os-cosmic-sync cluster: genesis76-us-east-2 - wait-for-service-stability: true + wait-for-service-stability: false - name: Check ECS Service Status run: | @@ -87,7 +87,7 @@ jobs: --query 'services[0].events[:10]' echo "Getting task details..." - if aws ecs list-tasks --cluster pop-os-us-west-2 --service-name production-pop-os-cosmic-sync --query 'taskArns[0]' --output text 2>/dev/null; then + if aws ecs list-tasks --cluster genesis76-us-east-2 --service-name production-pop-os-cosmic-sync --query 'taskArns[0]' --output text 2>/dev/null; then TASK_ARN=$(aws ecs list-tasks \ --cluster genesis76-us-east-2 \ --service-name production-pop-os-cosmic-sync \ @@ -110,5 +110,5 @@ jobs: fi else echo "Permission denied for ListTasks - checking CloudWatch Logs instead" - echo "Check logs at: https://console.aws.amazon.com/cloudwatch/home?region=us-west-2#logsV2:log-groups/log-group/%2Fecs%2Fproduction-pop-os-cosmic-sync" + echo "Check logs at: https://console.aws.amazon.com/cloudwatch/home?region=us-east-2#logsV2:log-groups/log-group/%2Fecs%2Fproduction-pop-os-cosmic-sync" fi \ No newline at end of file From 9c2c2993a51d80358710aee1569b930c966cd427 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 21:27:38 -0600 Subject: [PATCH 46/71] Fix http size issue --- .github/workflows/deploy-production.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/deploy-production.yml b/.github/workflows/deploy-production.yml index 810e707..75993cb 100644 --- a/.github/workflows/deploy-production.yml +++ b/.github/workflows/deploy-production.yml @@ -70,7 +70,7 @@ jobs: task-definition: ${{ steps.task-def.outputs.task-definition }} service: production-pop-os-cosmic-sync cluster: genesis76-us-east-2 - wait-for-service-stability: true + wait-for-service-stability: false - name: Check ECS Service Status run: | From fbe4cc704df4bceecd3b216bcf4171362a44f12c Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 22:20:26 -0600 Subject: [PATCH 47/71] Fix http size issue --- .github/workflows/deploy-staging.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 87c0b66..0cb1ea7 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -54,6 +54,7 @@ jobs: - name: Download Task Definition run: | + # Use explicit task definition family name instead of secret aws ecs describe-task-definition \ --task-definition ${{ secrets.STAGING_AWS_TASK_DEFINITION }} \ --query taskDefinition > /tmp/task.json From 47ebffb5922838821842d826325ed826209a1e67 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Fri, 26 Sep 2025 22:46:25 -0600 Subject: [PATCH 48/71] Fix http size issue --- .github/workflows/deploy-staging.yml | 13 +++++++++---- 1 file changed, 9 insertions(+), 4 deletions(-) diff --git a/.github/workflows/deploy-staging.yml b/.github/workflows/deploy-staging.yml index 0cb1ea7..0ad22f1 100644 --- a/.github/workflows/deploy-staging.yml +++ b/.github/workflows/deploy-staging.yml @@ -77,15 +77,17 @@ jobs: - name: App health check (no ECS read permissions required) env: - HEALTHCHECK_URL: https://sync.genesis76.com/health + HEALTHCHECK_URL: https://sync.genesis76.com/health/live run: | echo "Waiting for app health endpoint..." set +e sleep 60 - URL="${HEALTHCHECK_URL:-https://sync.genesis76.com/health}" + URL="${HEALTHCHECK_URL:-https://sync.genesis76.com/health/live}" for i in $(seq 1 60); do + echo "Health check attempt $i/60..." + # Try HTTP/2 first if curl -fsS --http2 --connect-timeout 5 --max-time 8 -A 'GitHubActionsHealthCheck/1.0' -H 'Accept: application/json' -H 'Cache-Control: no-cache, no-store' -H 'Pragma: no-cache' "$URL"; then - echo "App is healthy" + echo "App is healthy (HTTP/2)" exit 0 fi # Fallback to HTTP/1.1 in case ALB enforces HTTP/1.1 only @@ -93,7 +95,10 @@ jobs: echo "App is healthy (HTTP/1.1)" exit 0 fi + # Show debug info on failure + echo "Health check failed, checking /health/details..." + curl -v "https://sync.genesis76.com/health/details" || echo "Details endpoint also failed" sleep 10 done - echo "App health check failed" + echo "App health check failed after 60 attempts" exit 1 From ca1655252a2bed056b8005a266a462a636de7d4f Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sat, 27 Sep 2025 00:17:53 -0600 Subject: [PATCH 49/71] Check point for 2024 --- Cargo.toml | 5 +++-- build.rs | 4 ++-- src/server/startup.rs | 4 ++-- src/storage/mod.rs | 4 ++-- 4 files changed, 9 insertions(+), 8 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index cf18c14..fd0f36f 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,7 +1,7 @@ [package] name = "cosmic-sync-server" version = "2.0.0" -edition = "2021" +edition = "2024" authors = ["System76 "] description = "High-performance synchronization server for System76's COSMIC Desktop Environment" repository = "https://github.com/pop-os/cosmic-sync-server" @@ -9,7 +9,7 @@ license = "GPL-3.0" keywords = ["sync", "cosmic", "desktop", "system76", "grpc"] categories = ["network-programming", "web-programming", "filesystem"] readme = "README.md" -rust-version = "1.75" +rust-version = "1.86" [lib] name = "cosmic_sync_server" @@ -133,6 +133,7 @@ tokio-test = "0.4" # Build dependencies [build-dependencies] tonic-build = "0.14.2" +prost-build = "0.14" tonic-prost-build = "0.14.2" # Feature flags for conditional compilation diff --git a/build.rs b/build.rs index e6456c1..2bad92d 100755 --- a/build.rs +++ b/build.rs @@ -1,9 +1,9 @@ -// Use tonic-prost-build (tonic 0.14+) - fn main() -> Result<(), Box> { println!("cargo:rerun-if-changed=proto/sync.proto"); println!("cargo:rerun-if-changed=proto/health.proto"); + // Use tonic-prost-build (tonic 0.14.x) simple API + // Note: compile_protos takes a single path; call per file tonic_prost_build::compile_protos("proto/sync.proto")?; tonic_prost_build::compile_protos("proto/health.proto")?; diff --git a/src/server/startup.rs b/src/server/startup.rs index f83ad8b..df87b01 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -316,7 +316,7 @@ async fn init_storage_from_config(config: &ServerConfig) -> Result) -> Result<()> { pub async fn init_storage_legacy(db_url: Option) -> Arc { match db_url { Some(url) if url.starts_with("mysql://") => match parse_mysql_url(&url) { - Ok(config) => match init_storage(&config).await { + Ok(config) => match init_storage(config).await { Ok(storage) => return storage, Err(e) => { error!("Failed to initialize MySQL storage: {}", e); diff --git a/src/storage/mod.rs b/src/storage/mod.rs index c4cbf79..792198b 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -514,7 +514,7 @@ pub struct StorageFactory; impl StorageFactory { /// Create MySQL storage #[instrument(skip(config))] - pub async fn create_mysql_storage(config: &DatabaseConfig) -> AppResult { + pub async fn create_mysql_storage(config: DatabaseConfig) -> AppResult { info!("Creating optimized MySQL storage"); let host = config.host.clone(); @@ -571,7 +571,7 @@ impl StorageFactory { /// Optimized storage initialization #[instrument(skip(config))] -pub async fn init_storage(config: &DatabaseConfig) -> AppResult> { +pub async fn init_storage(config: DatabaseConfig) -> AppResult> { info!("Initializing optimized storage layer"); let storage = StorageFactory::create_mysql_storage(config).await?; From b5068f16e7841e4b796423b01bbf106d6322c0c5 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sat, 27 Sep 2025 00:31:06 -0600 Subject: [PATCH 50/71] Check point for 2024 --- Cargo.lock | 3 ++- src/auth/token.rs | 2 +- src/container/builder.rs | 2 +- src/main.rs | 2 +- src/server/app_state.rs | 6 +++++- src/utils/crypto.rs | 2 +- 6 files changed, 11 insertions(+), 6 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index febd6c1..5ba603f 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1,6 +1,6 @@ # This file is automatically @generated by Cargo. # It is not intended for manual editing. -version = 3 +version = 4 [[package]] name = "actix-codec" @@ -1483,6 +1483,7 @@ dependencies = [ "oauth2", "once_cell", "prost", + "prost-build", "prost-types", "rand 0.8.5", "redis", diff --git a/src/auth/token.rs b/src/auth/token.rs index 82b5a75..ea3bc43 100644 --- a/src/auth/token.rs +++ b/src/auth/token.rs @@ -35,7 +35,7 @@ pub fn generate_session_token() -> String { .duration_since(UNIX_EPOCH) .map(|d| d.as_secs()) .unwrap_or(0); - let random = thread_rng().gen::(); + let random = thread_rng().r#gen::(); format!("token_{}_{}", now, random) } diff --git a/src/container/builder.rs b/src/container/builder.rs index 1d3873b..f311fdb 100644 --- a/src/container/builder.rs +++ b/src/container/builder.rs @@ -57,7 +57,7 @@ impl ContainerBuilder { Arc::new(crate::storage::memory::MemoryStorage::new()) as Arc } else { info!("📊 Initializing storage from configuration"); - init_storage(&config.database).await? + init_storage(config.database.clone()).await? } }; diff --git a/src/main.rs b/src/main.rs index 6780ef7..dcc208c 100644 --- a/src/main.rs +++ b/src/main.rs @@ -79,7 +79,7 @@ async fn start_legacy() -> Result<()> { let config = build_config().await?; // Initialize storage layer with connection pooling - let storage = init_storage(&config.database).await?; + let storage = init_storage(config.database.clone()).await?; info!( "🚀 Starting COSMIC Sync Server v{}", diff --git a/src/server/app_state.rs b/src/server/app_state.rs index 646e545..172153b 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -133,7 +133,11 @@ impl AppState { ] { if std::env::var(k).is_err() { if let Some(v) = loader.get_config_value(k, None).await { - std::env::set_var(k, &v); + // SAFETY: Setting environment variables during initialization before + // worker threads start to avoid races with concurrent env access. + unsafe { + std::env::set_var(k, &v); + } } } } diff --git a/src/utils/crypto.rs b/src/utils/crypto.rs index d00e0d0..e48a15a 100644 --- a/src/utils/crypto.rs +++ b/src/utils/crypto.rs @@ -154,7 +154,7 @@ pub fn generate_file_id(user_id: &str, filename: &str, file_hash: &str) -> u64 { let timestamp_nanos = now.as_nanos(); // 랜덤 요소 추가 (16비트) - let random_part: u16 = rand::thread_rng().gen(); + let random_part: u16 = rand::thread_rng().r#gen(); // 원래 입력에 타임스탬프와 랜덤 값 추가 let input = format!( From 875870be217318da7eb8d32fe2a4f9f642433784 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sat, 27 Sep 2025 00:34:23 -0600 Subject: [PATCH 51/71] Check point for 2024 - fmt --- src/auth/oauth.rs | 20 +++++++++----- src/auth/token.rs | 4 +-- src/bin/rabbit_consumer.rs | 4 +-- src/container/builder.rs | 2 +- src/domain/events.rs | 2 +- src/handlers/api.rs | 2 +- src/handlers/auth_handler.rs | 22 ++++++++++----- src/handlers/device_handler.rs | 22 ++++++++++----- src/handlers/file/delete.rs | 2 +- src/handlers/file/download.rs | 2 +- src/handlers/file/list.rs | 11 ++++++-- src/handlers/file/upload.rs | 2 +- src/handlers/file_handler.rs | 12 ++++++--- src/handlers/health.rs | 2 +- src/handlers/metrics.rs | 2 +- src/handlers/oauth.rs | 12 ++++++--- src/handlers/sync_handler.rs | 2 +- src/handlers/usage_handler.rs | 2 +- src/handlers/watcher_handler.rs | 37 ++++++++++++++++++++----- src/lib.rs | 6 ++--- src/monitoring.rs | 2 +- src/server/app_state.rs | 15 +++++++---- src/server/connection_cleanup.rs | 2 +- src/server/event_bus.rs | 4 +-- src/server/http.rs | 2 +- src/server/notification_manager.rs | 2 +- src/server/service.rs | 11 +++++--- src/server/startup.rs | 6 ++--- src/services/auth_service.rs | 2 +- src/services/encryption_service.rs | 2 +- src/services/file_service.rs | 43 +++++++++++++++++++++++------- src/services/usage_service.rs | 2 +- src/storage/file_storage.rs | 4 +-- src/storage/memory.rs | 8 +++--- src/storage/mod.rs | 6 ++--- src/storage/mysql.rs | 8 ++---- src/storage/mysql_file.rs | 14 ++++++---- src/storage/mysql_watcher.rs | 32 ++++++++++++++++------ src/utils/crypto.rs | 2 +- src/utils/validator.rs | 5 +++- 40 files changed, 231 insertions(+), 111 deletions(-) diff --git a/src/auth/oauth.rs b/src/auth/oauth.rs index 8e5fe33..0705b59 100644 --- a/src/auth/oauth.rs +++ b/src/auth/oauth.rs @@ -11,7 +11,7 @@ use crate::{ }; use chrono::{DateTime, Utc}; use hex; -use rand::{rngs::OsRng, RngCore}; +use rand::{RngCore, rngs::OsRng}; use reqwest::Client; use serde::Deserialize; use sha2::{Digest, Sha256}; @@ -206,7 +206,7 @@ impl OAuthService { return Err(AuthError::ExternalServiceError(format!( "Failed to connect to auth server: {}", e - ))) + ))); } }; @@ -245,7 +245,7 @@ impl OAuthService { None => { return Err(AuthError::UserNotFound( "User not found or not available".to_string(), - )) + )); } }; @@ -367,7 +367,10 @@ impl OAuthService { } Ok(None) => { // Account doesn't exist in local DB, try to fetch from external auth server - info!("🔄 Account not found in local DB, attempting to fetch from external auth server: account_hash={}", account_hash); + info!( + "🔄 Account not found in local DB, attempting to fetch from external auth server: account_hash={}", + account_hash + ); // Try to get user info from external auth server using the token match self.get_user_info_from_external_server(token).await { @@ -403,7 +406,10 @@ impl OAuthService { } } Err(e) => { - warn!("⚠️ Could not fetch user info from external server: {}. Proceeding with token validation anyway.", e); + warn!( + "⚠️ Could not fetch user info from external server: {}. Proceeding with token validation anyway.", + e + ); // 계속 진행 - 계정은 외부 서버에 있을 수 있음 } } @@ -769,7 +775,9 @@ pub async fn process_oauth_code( info!("✅ Account creation verified in database"); } Ok(None) => { - error!("⚠️ Account not found after creation - may be a database sync issue"); + error!( + "⚠️ Account not found after creation - may be a database sync issue" + ); } Err(e) => { error!("⚠️ Error verifying account creation: {}", e); diff --git a/src/auth/token.rs b/src/auth/token.rs index ea3bc43..8d66353 100644 --- a/src/auth/token.rs +++ b/src/auth/token.rs @@ -1,8 +1,8 @@ use chrono::Utc; use hex; -use rand::thread_rng; use rand::Rng; -use rand::{random, rngs::OsRng, RngCore}; +use rand::thread_rng; +use rand::{RngCore, random, rngs::OsRng}; use sha2::{Digest, Sha256}; use std::time::{SystemTime, UNIX_EPOCH}; diff --git a/src/bin/rabbit_consumer.rs b/src/bin/rabbit_consumer.rs index e069ff7..b6d56f9 100644 --- a/src/bin/rabbit_consumer.rs +++ b/src/bin/rabbit_consumer.rs @@ -6,9 +6,9 @@ use cosmic_sync_server::config::settings::MessageBrokerConfig; use cosmic_sync_server::server::event_bus::RabbitMqEventBus; use lapin::{ + BasicProperties, ExchangeKind, options::*, types::{AMQPValue, FieldTable}, - BasicProperties, ExchangeKind, }; #[cfg(not(feature = "redis-cache"))] use once_cell::sync::Lazy; @@ -18,7 +18,7 @@ use std::collections::HashSet; use std::sync::Mutex; #[cfg(feature = "redis-cache")] -use redis::{aio::ConnectionManager, Client as RedisClient}; +use redis::{Client as RedisClient, aio::ConnectionManager}; #[cfg(feature = "redis-cache")] use tokio::sync::OnceCell; diff --git a/src/container/builder.rs b/src/container/builder.rs index f311fdb..81f22fe 100644 --- a/src/container/builder.rs +++ b/src/container/builder.rs @@ -3,7 +3,7 @@ use crate::{ container::AppContainer, error::Result, services::{AuthService, DeviceService, EncryptionService, FileService}, - storage::{init_storage, Storage}, + storage::{Storage, init_storage}, }; use std::sync::Arc; use tracing::{info, instrument}; diff --git a/src/domain/events.rs b/src/domain/events.rs index a9d9730..1ce0d7e 100644 --- a/src/domain/events.rs +++ b/src/domain/events.rs @@ -252,7 +252,7 @@ pub trait EventStore: Send + Sync { /// Get events for a specific aggregate async fn get_events_for_aggregate(&self, aggregate_id: &str) - -> Result>; + -> Result>; /// Get events by type async fn get_events_by_type(&self, event_type: &str) -> Result>; diff --git a/src/handlers/api.rs b/src/handlers/api.rs index 4d58682..9222671 100644 --- a/src/handlers/api.rs +++ b/src/handlers/api.rs @@ -1,6 +1,6 @@ //! API handlers for system information -use actix_web::{web, HttpResponse, Result}; +use actix_web::{HttpResponse, Result, web}; use serde_json::json; /// Get API information diff --git a/src/handlers/auth_handler.rs b/src/handlers/auth_handler.rs index e3855d9..db8eae2 100644 --- a/src/handlers/auth_handler.rs +++ b/src/handlers/auth_handler.rs @@ -183,7 +183,8 @@ impl AuthHandler { debug!("Found existing session for device_hash: {}", device_hash); // Log detailed session information - info!("Session details for device_hash {}: client_id={}, auth_token_present={}, account_hash_present={}, encryption_key_present={}", + info!( + "Session details for device_hash {}: client_id={}, auth_token_present={}, account_hash_present={}, encryption_key_present={}", device_hash, session.client_id, session.auth_token.is_some(), @@ -215,8 +216,13 @@ impl AuthHandler { let account_hash = session.account_hash.unwrap_or_default(); let encryption_key = session.encryption_key.unwrap_or_default(); - info!("Returning complete auth status for device_hash {}: token_length={}, account_hash={}, key_length={}", - device_hash, auth_token.len(), account_hash, encryption_key.len()); + info!( + "Returning complete auth status for device_hash {}: token_length={}, account_hash={}, key_length={}", + device_hash, + auth_token.len(), + account_hash, + encryption_key.len() + ); // Authentication is complete - return full information let resp = CheckAuthStatusResponse { @@ -773,7 +779,7 @@ impl crate::services::Handler for AuthHandler { } // HTTP handler functions -use actix_web::{web, HttpRequest, HttpResponse, Result as ActixResult}; +use actix_web::{HttpRequest, HttpResponse, Result as ActixResult, web}; use serde_json::json; /// HTTP handler for checking auth status @@ -816,8 +822,12 @@ pub async fn handle_check_auth_status( session_id: Some(resp.session_id.clone()), } } else { - debug!("Authentication not complete for device_hash: {}, is_complete={}, auth_token_empty={}", - device_hash, resp.is_complete, resp.auth_token.is_empty()); + debug!( + "Authentication not complete for device_hash: {}, is_complete={}, auth_token_empty={}", + device_hash, + resp.is_complete, + resp.auth_token.is_empty() + ); crate::handlers::oauth::AuthStatusResponse { authenticated: false, token: None, diff --git a/src/handlers/device_handler.rs b/src/handlers/device_handler.rs index 9520815..84d002c 100644 --- a/src/handlers/device_handler.rs +++ b/src/handlers/device_handler.rs @@ -43,8 +43,10 @@ impl DeviceHandler { request: Request, ) -> Result, Status> { let mut req = request.into_inner(); - info!("device registration request: account_hash={}, device_hash={}, os_version={}, app_version={}", - req.account_hash, req.device_hash, req.os_version, req.app_version); + info!( + "device registration request: account_hash={}, device_hash={}, os_version={}, app_version={}", + req.account_hash, req.device_hash, req.os_version, req.app_version + ); // Input validation if req.account_hash.is_empty() { @@ -106,7 +108,10 @@ impl DeviceHandler { match self.app_state.device.register_device(&device).await { Ok(_) => { - info!("✅ device registration/update successful: account_hash={}, device_hash={}", server_account_hash, req.device_hash); + info!( + "✅ device registration/update successful: account_hash={}, device_hash={}", + server_account_hash, req.device_hash + ); // Publish device registered/updated event let routing_key = format!("device.registered.{}", server_account_hash); @@ -138,7 +143,10 @@ impl DeviceHandler { Ok(Response::new(response)) } Err(e) => { - error!("device registration/update failed: account_hash={}, device_hash={}, error={}", server_account_hash, req.device_hash, e); + error!( + "device registration/update failed: account_hash={}, device_hash={}, error={}", + server_account_hash, req.device_hash, e + ); let response = RegisterDeviceResponse { success: false, device_hash: String::new(), @@ -204,8 +212,10 @@ impl DeviceHandler { // log changed info if os_changed || app_changed || active_changed { - info!("device info changed: os_version_changed={}, app_version_changed={}, active_changed={}", - os_changed, app_changed, active_changed); + info!( + "device info changed: os_version_changed={}, app_version_changed={}, active_changed={}", + os_changed, app_changed, active_changed + ); } // save updated device info diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 4a17b78..14149e5 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -22,7 +22,7 @@ pub async fn handle_delete_file( _ => { return Ok(Response::new(response::file_delete_error( "Authentication failed", - ))) + ))); } }; let server_account_hash = verified.account_hash; diff --git a/src/handlers/file/download.rs b/src/handlers/file/download.rs index 177d9dc..6c3a3ef 100644 --- a/src/handlers/file/download.rs +++ b/src/handlers/file/download.rs @@ -38,7 +38,7 @@ pub async fn handle_download_file( _ => { return Ok(Response::new(response::file_download_error( "Authentication failed", - ))) + ))); } }; let server_account_hash = verified.account_hash; diff --git a/src/handlers/file/list.rs b/src/handlers/file/list.rs index 22a1b73..0663a46 100644 --- a/src/handlers/file/list.rs +++ b/src/handlers/file/list.rs @@ -49,7 +49,9 @@ pub async fn handle_list_files( .downcast_ref::() .is_some() { - warn!("ListFiles using in-memory storage backend - data may appear empty if previous uploads were to MySQL"); + warn!( + "ListFiles using in-memory storage backend - data may appear empty if previous uploads were to MySQL" + ); } // Convert client group_id to server group_id via FileService @@ -201,7 +203,12 @@ pub async fn handle_list_files( } if let Some(_) = time_filter { - info!("📊 Recovery sync results: {} files processed, {} filtered out, {} files returned", files_processed, files_filtered, sync_files.len()); + info!( + "📊 Recovery sync results: {} files processed, {} filtered out, {} files returned", + files_processed, + files_filtered, + sync_files.len() + ); } else { debug!("📋 File list: {} files returned", sync_files.len()); } diff --git a/src/handlers/file/upload.rs b/src/handlers/file/upload.rs index eb957f5..a4fcf85 100644 --- a/src/handlers/file/upload.rs +++ b/src/handlers/file/upload.rs @@ -32,7 +32,7 @@ pub async fn handle_upload_file( _ => { return Ok(Response::new(response::file_upload_error( "Authentication failed", - ))) + ))); } }; let server_account_hash = verified.account_hash; diff --git a/src/handlers/file_handler.rs b/src/handlers/file_handler.rs index 44cc217..26e30d5 100644 --- a/src/handlers/file_handler.rs +++ b/src/handlers/file_handler.rs @@ -137,8 +137,10 @@ impl FileHandler { req: &crate::sync::UploadFileRequest, normalized_file_path: &str, ) -> Result<(), String> { - debug!("Starting single-attempt file path validation: group_id={}, watcher_id={}, file_path={}", - req.group_id, req.watcher_id, normalized_file_path); + debug!( + "Starting single-attempt file path validation: group_id={}, watcher_id={}, file_path={}", + req.group_id, req.watcher_id, normalized_file_path + ); // get watcher info once (no retry) let watcher = match self @@ -172,8 +174,10 @@ impl FileHandler { }; if !file_is_in_watcher_folder { - error!("File path validation failed: file '{}' is not within watcher folder '{}' (recursive: {})", - normalized_file_path, normalized_watcher_folder, watcher.recursive_path); + error!( + "File path validation failed: file '{}' is not within watcher folder '{}' (recursive: {})", + normalized_file_path, normalized_watcher_folder, watcher.recursive_path + ); return Err(format!( "File path '{}' is not within watcher folder '{}'", normalized_file_path, normalized_watcher_folder diff --git a/src/handlers/health.rs b/src/handlers/health.rs index 86b05b9..abc3136 100644 --- a/src/handlers/health.rs +++ b/src/handlers/health.rs @@ -1,7 +1,7 @@ use crate::handlers::HealthHandler; use crate::server::service::SyncServiceImpl; use crate::sync::{HealthCheckRequest, HealthCheckResponse}; -use actix_web::{web, HttpResponse, Result as ActixResult}; +use actix_web::{HttpResponse, Result as ActixResult, web}; use serde_json::json; use std::sync::Arc; use tonic::{Request, Response, Status}; diff --git a/src/handlers/metrics.rs b/src/handlers/metrics.rs index c1596fb..fa43521 100644 --- a/src/handlers/metrics.rs +++ b/src/handlers/metrics.rs @@ -1,6 +1,6 @@ //! Metrics handlers for monitoring -use actix_web::{web, HttpResponse, Result}; +use actix_web::{HttpResponse, Result, web}; use serde_json::json; /// Get Prometheus-formatted metrics diff --git a/src/handlers/oauth.rs b/src/handlers/oauth.rs index a9e5798..fca3ec5 100644 --- a/src/handlers/oauth.rs +++ b/src/handlers/oauth.rs @@ -62,7 +62,7 @@ impl OAuthHandler for SyncServiceImpl { use crate::auth::oauth::process_oauth_code; use crate::handlers::auth_handler::AuthHandler; use crate::server::app_state::{AppState, AuthSession}; -use actix_web::{get, web, HttpRequest, HttpResponse, Result as ActixResult}; +use actix_web::{HttpRequest, HttpResponse, Result as ActixResult, get, web}; use serde::{Deserialize, Serialize}; use serde_json::json; use std::sync::Arc; @@ -205,7 +205,9 @@ pub async fn handle_oauth_callback( ); v } else { - warn!("OAuth callback without state token or device identifiers - generating temporary device_hash"); + warn!( + "OAuth callback without state token or device identifiers - generating temporary device_hash" + ); let temp_device_hash = format!("temp_{}", chrono::Utc::now().timestamp()); info!("Generated temporary device_hash: {}", temp_device_hash); temp_device_hash @@ -258,7 +260,8 @@ pub async fn handle_oauth_callback( ); if exists { if let Some(session) = sessions.get(&device_hash) { - info!("Existing session details: client_id={}, auth_token_present={}, account_hash_present={}", + info!( + "Existing session details: client_id={}, auth_token_present={}, account_hash_present={}", session.client_id, session.auth_token.is_some(), session.account_hash.is_some() @@ -312,7 +315,8 @@ pub async fn handle_oauth_callback( // Verify session was updated correctly if let Ok(sessions) = state.auth_sessions.lock() { if let Some(updated_session) = sessions.get(&device_hash) { - info!("Verification - Updated session has auth_token: {}, account_hash: {}", + info!( + "Verification - Updated session has auth_token: {}, account_hash: {}", updated_session.auth_token.is_some(), updated_session.account_hash.is_some() ); diff --git a/src/handlers/sync_handler.rs b/src/handlers/sync_handler.rs index 82d37ef..b499408 100644 --- a/src/handlers/sync_handler.rs +++ b/src/handlers/sync_handler.rs @@ -1,6 +1,6 @@ use crate::server::app_state::AppState; -use crate::services::version_service::VersionService; use crate::services::Handler; +use crate::services::version_service::VersionService; use crate::storage::Storage; use crate::sync::{ AuthUpdateNotification, BroadcastFileRestoreRequest, BroadcastFileRestoreResponse, diff --git a/src/handlers/usage_handler.rs b/src/handlers/usage_handler.rs index 5c8a609..fa50102 100644 --- a/src/handlers/usage_handler.rs +++ b/src/handlers/usage_handler.rs @@ -1,4 +1,4 @@ -use actix_web::{web, HttpRequest, HttpResponse, Responder}; +use actix_web::{HttpRequest, HttpResponse, Responder, web}; use chrono::{Datelike, NaiveDate, Utc}; use serde::{Deserialize, Serialize}; use std::sync::Arc; diff --git a/src/handlers/watcher_handler.rs b/src/handlers/watcher_handler.rs index e0e3679..5d07959 100644 --- a/src/handlers/watcher_handler.rs +++ b/src/handlers/watcher_handler.rs @@ -393,7 +393,10 @@ impl WatcherHandler { .await { Ok(id) => { - info!("Watcher group registered successfully: account_hash={}, group_id={}, server_db_id={}", account_hash, req.group_id, id); + info!( + "Watcher group registered successfully: account_hash={}, group_id={}, server_db_id={}", + account_hash, req.group_id, id + ); id } Err(e) => { @@ -466,7 +469,10 @@ impl WatcherHandler { return_message: String::new(), }; - debug!("Returning successful response for register watcher group: client_group_id={}, server_db_id={}", client_group_id, registered_group_id); + debug!( + "Returning successful response for register watcher group: client_group_id={}, server_db_id={}", + client_group_id, registered_group_id + ); Ok(Response::new(response)) } @@ -583,7 +589,10 @@ impl WatcherHandler { return Err(Status::internal("Failed to save watcher conditions")); } } else { - debug!("Skip saving empty conditions to preserve existing watcher conditions: watcher_id={}", watcher_id); + debug!( + "Skip saving empty conditions to preserve existing watcher conditions: watcher_id={}", + watcher_id + ); } } @@ -743,8 +752,10 @@ impl WatcherHandler { let sync_start = std::time::Instant::now(); - debug!("Processing integrated configuration sync for user: {}, device: {}, incremental: {}, force: {}", - account_hash, device_hash, incremental, force_update); + debug!( + "Processing integrated configuration sync for user: {}, device: {}, incremental: {}, force: {}", + account_hash, device_hash, incremental, force_update + ); // Validate device if required auth::validate_device_if_required( @@ -1019,8 +1030,20 @@ impl WatcherHandler { conflict_details, }; - info!("Integrated configuration sync completed for user: {}, operations: {}, duration: {:.2}ms", - account_hash, response.stats.as_ref().map(|s| s.total_operations).unwrap_or_default(), response.stats.as_ref().map(|s| s.sync_duration_ms).unwrap_or_default()); + info!( + "Integrated configuration sync completed for user: {}, operations: {}, duration: {:.2}ms", + account_hash, + response + .stats + .as_ref() + .map(|s| s.total_operations) + .unwrap_or_default(), + response + .stats + .as_ref() + .map(|s| s.sync_duration_ms) + .unwrap_or_default() + ); Ok(Response::new(response)) } diff --git a/src/lib.rs b/src/lib.rs index 20dad6a..d3904ab 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -43,8 +43,8 @@ pub use config::settings::{Config, DatabaseConfig, ServerConfig}; // Storage abstractions with performance traits pub use storage::{ - init_storage, memory::MemoryStorage, mysql::MySqlStorage, Result as StorageResult, Storage, - StorageError, + Result as StorageResult, Storage, StorageError, init_storage, memory::MemoryStorage, + mysql::MySqlStorage, }; // Event bus exports for consumers @@ -84,7 +84,7 @@ pub mod features { pub mod prelude { pub use crate::{ Account, AppContainer, AppResult, AuthToken, ContainerBuilder, DatabaseConfig, Device, - FileInfo, Result, ServerConfig, Storage, SyncError, NAME, VERSION, + FileInfo, NAME, Result, ServerConfig, Storage, SyncError, VERSION, }; pub use async_trait::async_trait; diff --git a/src/monitoring.rs b/src/monitoring.rs index 9306a1b..8bd8c26 100644 --- a/src/monitoring.rs +++ b/src/monitoring.rs @@ -1,7 +1,7 @@ use serde::{Deserialize, Serialize}; use std::sync::{ - atomic::{AtomicU64, AtomicUsize, Ordering}, Arc, + atomic::{AtomicU64, AtomicUsize, Ordering}, }; use std::time::{Duration, Instant}; use tokio::sync::RwLock; diff --git a/src/server/app_state.rs b/src/server/app_state.rs index 172153b..7d0436f 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -11,7 +11,7 @@ use crate::services::usage_service::{UsageChecker, UsageConfig, UsageService}; use crate::services::version_service::{VersionService, VersionServiceImpl}; use crate::storage::mysql::MySqlStorage; use crate::storage::mysql_watcher::MySqlWatcherExt; -use crate::storage::{memory::MemoryStorage, FileStorage, Storage}; +use crate::storage::{FileStorage, Storage, memory::MemoryStorage}; use chrono::{DateTime, Utc}; use std::collections::HashMap; use std::sync::Arc; @@ -393,7 +393,9 @@ impl AppState { // check if Storage trait object is MySqlStorage // already Arc so can't use directly // DatabaseFileStorage will create its own MySQL connection - info!("Database storage type selected, DatabaseFileStorage will create its own MySQL connection"); + info!( + "Database storage type selected, DatabaseFileStorage will create its own MySQL connection" + ); } // create default file storage @@ -684,7 +686,10 @@ impl AppState { return Err(e); } } else { - debug!("Incoming conditions are empty; preserving existing watcher conditions: watcher_id={}", watcher_id); + debug!( + "Incoming conditions are empty; preserving existing watcher conditions: watcher_id={}", + watcher_id + ); } // return existing watcher ID return Ok(watcher_id); @@ -731,7 +736,7 @@ impl AppState { update_type: crate::sync::watcher_group_update_notification::UpdateType, ) -> Result<(), crate::storage::StorageError> { use crate::sync::{ - watcher_group_update_notification::UpdateType, WatcherGroupUpdateNotification, + WatcherGroupUpdateNotification, watcher_group_update_notification::UpdateType, }; // get group data @@ -784,7 +789,7 @@ impl AppState { update_type: crate::sync::watcher_preset_update_notification::UpdateType, ) -> Result<(), crate::storage::StorageError> { use crate::sync::{ - watcher_preset_update_notification::UpdateType, WatcherPresetUpdateNotification, + WatcherPresetUpdateNotification, watcher_preset_update_notification::UpdateType, }; // create notification to broadcast diff --git a/src/server/connection_cleanup.rs b/src/server/connection_cleanup.rs index 8793119..2c23c93 100644 --- a/src/server/connection_cleanup.rs +++ b/src/server/connection_cleanup.rs @@ -2,7 +2,7 @@ use crate::server::connection_tracker::ConnectionTracker; use std::sync::Arc; -use tokio::time::{interval, Duration}; +use tokio::time::{Duration, interval}; use tracing::{debug, error, info}; /// Connection cleanup scheduler for removing old inactive connections diff --git a/src/server/event_bus.rs b/src/server/event_bus.rs index a0b2988..2b90278 100644 --- a/src/server/event_bus.rs +++ b/src/server/event_bus.rs @@ -26,8 +26,8 @@ impl EventBus for NoopEventBus { // RabbitMQ implementation use lapin::{ - options::*, types::FieldTable, BasicProperties, Channel, Connection, ConnectionProperties, - ExchangeKind, + BasicProperties, Channel, Connection, ConnectionProperties, ExchangeKind, options::*, + types::FieldTable, }; use std::sync::Arc; use tokio_stream::StreamExt; diff --git a/src/server/http.rs b/src/server/http.rs index 950d3e6..026d7a6 100644 --- a/src/server/http.rs +++ b/src/server/http.rs @@ -1,4 +1,4 @@ -use actix_web::{get, HttpResponse, Responder}; +use actix_web::{HttpResponse, Responder, get}; use serde::Serialize; use tracing::debug; diff --git a/src/server/notification_manager.rs b/src/server/notification_manager.rs index ef03de8..3562ff3 100644 --- a/src/server/notification_manager.rs +++ b/src/server/notification_manager.rs @@ -5,7 +5,7 @@ use base64::Engine as _; use std::collections::HashMap; use std::sync::Arc; use thiserror::Error; -use tokio::sync::{mpsc, Mutex}; +use tokio::sync::{Mutex, mpsc}; use tonic::Status; use tracing::{debug, error, info, warn}; diff --git a/src/server/service.rs b/src/server/service.rs index aca7ad8..1ff5d4b 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -118,8 +118,10 @@ impl SyncServiceImpl { debug!("Auth validation successful for account: {}", account_hash); Ok(()) } else { - error!("Auth validation failed: token valid={}, expected_account={}, actual_account={}", - auth_result.valid, account_hash, auth_result.account_hash); + error!( + "Auth validation failed: token valid={}, expected_account={}, actual_account={}", + auth_result.valid, account_hash, auth_result.account_hash + ); Err(Status::unauthenticated("Invalid authentication")) } } @@ -349,7 +351,10 @@ impl SyncServiceImpl { } } - info!("Initial file sync completed: {} files synced, {} skipped, total processed: {} for {}:{}", sync_count, skip_count, total_files, account_hash, device_hash); + info!( + "Initial file sync completed: {} files synced, {} skipped, total processed: {} for {}:{}", + sync_count, skip_count, total_files, account_hash, device_hash + ); Ok(()) } } diff --git a/src/server/startup.rs b/src/server/startup.rs index df87b01..574d66c 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -15,15 +15,15 @@ use crate::{ app_state::AppState, service::{SyncClientServiceImpl, SyncServiceImpl}, }, - storage::{init_storage, Storage}, + storage::{Storage, init_storage}, sync::{ sync_client_service_server::SyncClientServiceServer, sync_service_server::SyncServiceServer, }, }; use actix_cors::Cors; -use actix_web::{middleware, web, App, HttpServer}; -use tonic_health::{server::health_reporter, ServingStatus}; +use actix_web::{App, HttpServer, middleware, web}; +use tonic_health::{ServingStatus, server::health_reporter}; /// Optimized server startup with performance monitoring #[instrument(skip(config))] diff --git a/src/services/auth_service.rs b/src/services/auth_service.rs index 850aae9..2e3e0c3 100644 --- a/src/services/auth_service.rs +++ b/src/services/auth_service.rs @@ -5,7 +5,7 @@ use crate::sync::{ AuthNotificationResponse, AuthSuccessNotification, LoginResponse, OAuthExchangeResponse, VerifyLoginResponse, }; -use base64::{engine::general_purpose, Engine as _}; +use base64::{Engine as _, engine::general_purpose}; use chrono::{Duration, Utc}; use rand::Rng; use sha2::{Digest, Sha256}; diff --git a/src/services/encryption_service.rs b/src/services/encryption_service.rs index 90f0fd1..64248a4 100644 --- a/src/services/encryption_service.rs +++ b/src/services/encryption_service.rs @@ -1,6 +1,6 @@ use crate::storage::{Storage, StorageError}; use rand::distributions::Alphanumeric; -use rand::{thread_rng, Rng}; +use rand::{Rng, thread_rng}; use std::sync::Arc; use tracing::{debug, error, info, warn}; diff --git a/src/services/file_service.rs b/src/services/file_service.rs index 01bd72d..0a06fec 100644 --- a/src/services/file_service.rs +++ b/src/services/file_service.rs @@ -222,8 +222,13 @@ impl FileService { data: &Vec, update_type: sync::file_update_notification::UpdateType, ) -> Result<(), StorageError> { - debug!("🔄 FileService::store_file_with_update_type started: file_id={}, filename={}, size={} bytes, update_type={:?}", - file_info.file_id, file_info.filename, data.len(), update_type); + debug!( + "🔄 FileService::store_file_with_update_type started: file_id={}, filename={}, size={} bytes, update_type={:?}", + file_info.file_id, + file_info.filename, + data.len(), + update_type + ); // Store file metadata debug!("📄 Storing file metadata..."); @@ -365,7 +370,9 @@ impl FileService { file_info.revision ); info!(" → Account: {}", file_info.account_hash); - info!(" → 💡 File will be synchronized when clients reconnect and subscribe"); + info!( + " → 💡 File will be synchronized when clients reconnect and subscribe" + ); } } Err(e) => warn!("❌ Failed to broadcast file update to clients: {}", e), @@ -543,8 +550,14 @@ impl FileService { match file_info_result { Some((file_info, is_deleted)) => { - debug!("파일 정보 조회됨: file_id={}, account_hash={}, file_path={}, filename={}, is_deleted={}", - file_id, file_info.account_hash, file_info.file_path, file_info.filename, is_deleted); + debug!( + "파일 정보 조회됨: file_id={}, account_hash={}, file_path={}, filename={}, is_deleted={}", + file_id, + file_info.account_hash, + file_info.file_path, + file_info.filename, + is_deleted + ); if is_deleted { info!("파일이 이미 삭제되어 있음: file_id={}", file_id); @@ -588,8 +601,13 @@ impl FileService { } } - info!("파일 삭제 완료: file_id={}, account_hash={}, file_path={}, filename={}", - file_id, file_info.account_hash, file_info.file_path, file_info.filename); + info!( + "파일 삭제 완료: file_id={}, account_hash={}, file_path={}, filename={}", + file_id, + file_info.account_hash, + file_info.file_path, + file_info.filename + ); Ok(()) } Err(e) => { @@ -767,8 +785,15 @@ impl FileService { .await { Ok(Some(file_info)) => { - info!("파일을 찾았습니다: ID={}, 경로={}, 이름={}, 리비전={}, group_id={}, watcher_id={}", - file_info.file_id, search_path, search_name, file_info.revision, group_id, watcher_id); + info!( + "파일을 찾았습니다: ID={}, 경로={}, 이름={}, 리비전={}, group_id={}, watcher_id={}", + file_info.file_id, + search_path, + search_name, + file_info.revision, + group_id, + watcher_id + ); Ok(Some(file_info.into())) } Ok(None) => { diff --git a/src/services/usage_service.rs b/src/services/usage_service.rs index 04e03a3..7ca5f26 100644 --- a/src/services/usage_service.rs +++ b/src/services/usage_service.rs @@ -205,7 +205,7 @@ impl UsageService { "Storage limit exceeded. Current: {} bytes, Limit: {} bytes, Requested: {} bytes", usage.bytes_used, usage.bytes_limit, additional_bytes )), - warnings + warnings, )); } else { warnings.push(format!( diff --git a/src/storage/file_storage.rs b/src/storage/file_storage.rs index 626eef4..9be3019 100644 --- a/src/storage/file_storage.rs +++ b/src/storage/file_storage.rs @@ -5,14 +5,14 @@ use tracing::{debug, error, info, warn}; // remove unused fs helpers here (file storage is DB/S3 backed) // AWS SDK imports -use aws_config::{meta::region::RegionProviderChain, BehaviorVersion}; +use aws_config::{BehaviorVersion, meta::region::RegionProviderChain}; use aws_sdk_s3::operation::create_bucket::CreateBucketError; use aws_sdk_s3::operation::get_object::GetObjectError; use aws_sdk_s3::operation::head_bucket::HeadBucketError; use aws_sdk_s3::operation::head_object::HeadObjectError; use aws_sdk_s3::primitives::ByteStream; use aws_sdk_s3::types::{BucketLocationConstraint, CreateBucketConfiguration}; -use aws_sdk_s3::{config::Credentials, Client as S3Client}; +use aws_sdk_s3::{Client as S3Client, config::Credentials}; use aws_types::region::Region; use tokio::sync::OnceCell; diff --git a/src/storage/memory.rs b/src/storage/memory.rs index f16f57c..f885fc3 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -4,13 +4,13 @@ use std::sync::Arc; use tokio::sync::Mutex as TokioMutex; use tracing::debug; +use crate::models::FileEntry; use crate::models::account::Account; use crate::models::auth::AuthToken; use crate::models::device::{Device, DeviceInfo as ModelDeviceInfo}; use crate::models::file::FileInfo as ModelFileInfo; use crate::models::file::{FileInfo, FileNotice}; use crate::models::watcher::{WatcherGroup, WatcherPreset}; -use crate::models::FileEntry; use crate::storage::{Result, Storage, StorageError, StorageMetrics}; use crate::sync::{DeviceInfo, WatcherData, WatcherGroupData}; @@ -454,7 +454,7 @@ impl Storage for MemoryStorage { // 워처 그룹 복제 및 서버 ID 설정 let mut group = watcher_group.clone(); group.id = server_id; // 서버에서 생성한 ID로 변경 - // local_id는 클라이언트에서 온 값 그대로 유지 + // local_id는 클라이언트에서 온 값 그대로 유지 // watcher_ids를 복사 group.watcher_ids = watcher_group.watcher_ids.clone(); @@ -543,12 +543,12 @@ impl Storage for MemoryStorage { Some(_) => { return Err(StorageError::PermissionDenied( "Not the owner of the watcher group".to_string(), - )) + )); } None => { return Err(StorageError::NotFound( "Watcher group not found".to_string(), - )) + )); } }; diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 792198b..1c0eb6e 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -30,7 +30,7 @@ use crate::{ config::settings::{DatabaseConfig, StorageConfig, StorageType}, error::{Result as AppResult, SyncError}, models::{ - file::FileNotice, Account, AuthToken, Device, FileInfo, WatcherCondition, WatcherGroup, + Account, AuthToken, Device, FileInfo, WatcherCondition, WatcherGroup, file::FileNotice, }, sync::{WatcherData, WatcherGroupData}, }; @@ -269,7 +269,7 @@ pub trait Storage: Sync + Send { async fn store_file_info(&self, file: FileInfo) -> Result; async fn get_file_info(&self, file_id: u64) -> Result>; async fn get_file_info_include_deleted(&self, file_id: u64) - -> Result>; + -> Result>; async fn get_file_info_by_path( &self, account_hash: &str, @@ -317,7 +317,7 @@ pub trait Storage: Sync + Send { // Batch file operations async fn batch_store_files(&self, files: Vec) -> Result>; async fn batch_delete_files(&self, account_hash: &str, file_ids: Vec) - -> Result>; + -> Result>; // FileData related methods (optimized for large files) async fn store_file_data(&self, file_id: u64, data_bytes: Vec) -> Result<()>; diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 85741cd..97ae6f5 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1,8 +1,8 @@ use async_trait::async_trait; use chrono::prelude::*; // mysql_async removed; using only sqlx -use sqlx::mysql::MySqlPoolOptions as SqlxMySqlPoolOptions; use sqlx::MySqlPool as SqlxMySqlPool; +use sqlx::mysql::MySqlPoolOptions as SqlxMySqlPoolOptions; use tracing::{debug, error, info, warn}; use crate::models::account::Account; @@ -127,11 +127,7 @@ impl MySqlStorage { .map_err(|e| { StorageError::Database(format!("Failed to reselect watcher_groups: {}", e)) })?; - if let Some(id) = re { - id - } else { - 0 - } + if let Some(id) = re { id } else { 0 } } else { server_group_id }; diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index afa3d5b..e11885d 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -18,7 +18,7 @@ pub trait MySqlFileExt { /// 파일 정보 조회 (삭제된 파일 포함) async fn get_file_info_include_deleted(&self, file_id: u64) - -> Result>; + -> Result>; /// 경로로 파일 정보 조회 async fn get_file_info_by_path( @@ -750,8 +750,10 @@ impl MySqlFileExt for MySqlStorage { }; let new_revision = current_revision + 1; - debug!("파일 삭제 처리: file_id={}, file_path={}, filename={}, current_revision={}, new_revision={}", - file_id, file_path, filename, current_revision, new_revision); + debug!( + "파일 삭제 처리: file_id={}, file_path={}, filename={}, current_revision={}, new_revision={}", + file_id, file_path, filename, current_revision, new_revision + ); let now = Utc::now().timestamp(); @@ -1189,8 +1191,10 @@ impl MySqlFileExt for MySqlStorage { let size: u64 = row.try_get("size").unwrap_or(0); let key_id_opt: Option = row.try_get("key_id").ok(); - info!("✅ find_file_by_criteria 결과: file_id={}, filename={}, watcher_id={}, revision={}", - file_id, filename, watcher_id, revision); + info!( + "✅ find_file_by_criteria 결과: file_id={}, filename={}, watcher_id={}, revision={}", + file_id, filename, watcher_id, revision + ); // datetime을 Unix timestamp로 변환 let timestamp = prost_types::Timestamp { diff --git a/src/storage/mysql_watcher.rs b/src/storage/mysql_watcher.rs index e8612c6..eced4d7 100644 --- a/src/storage/mysql_watcher.rs +++ b/src/storage/mysql_watcher.rs @@ -791,8 +791,10 @@ impl MySqlWatcherExt for MySqlStorage { ) -> Result> { // Normalize folder path to preserve tilde (~) prefix for home directory let normalized_folder = helpers::normalize_path_preserve_tilde(folder); - debug!("Finding watcher by folder: account={}, group_id={}, original_folder={}, normalized_folder={}", - account_hash, group_id, folder, normalized_folder); + debug!( + "Finding watcher by folder: account={}, group_id={}, original_folder={}, normalized_folder={}", + account_hash, group_id, folder, normalized_folder + ); let row_opt = sqlx::query( r#"SELECT id FROM watchers WHERE account_hash = ? AND group_id = ? AND folder = ?"#, @@ -837,8 +839,14 @@ impl MySqlWatcherExt for MySqlStorage { // Normalize folder path to preserve tilde (~) prefix for home directory let normalized_folder = crate::utils::helpers::normalize_path_preserve_tilde(&watcher_data.folder); - debug!("Creating new watcher with conditions: account={}, group_id={}, original_folder={}, normalized_folder={}, is_recursive={}", - account_hash, group_id, &watcher_data.folder, normalized_folder, watcher_data.recursive_path); + debug!( + "Creating new watcher with conditions: account={}, group_id={}, original_folder={}, normalized_folder={}, is_recursive={}", + account_hash, + group_id, + &watcher_data.folder, + normalized_folder, + watcher_data.recursive_path + ); // use sqlx::Acquire; // not needed @@ -899,7 +907,10 @@ impl MySqlWatcherExt for MySqlStorage { id } None => { - error!("Watcher group not found for client group_id: {} after 15 attempts. Groups must be created via register_watcher_group first.", group_id); + error!( + "Watcher group not found for client group_id: {} after 15 attempts. Groups must be created via register_watcher_group first.", + group_id + ); return Err(StorageError::Database(format!( "Watcher group with client group_id {} not found after waiting", group_id @@ -919,7 +930,10 @@ impl MySqlWatcherExt for MySqlStorage { ); // 기존 watcher가 있는지 확인하고 타임스탬프 비교 (local_group_id 포함) - debug!("Checking for existing watcher with watcher_id: {}, account_hash: {}, local_group_id: {}", watcher_data.watcher_id, account_hash, group_id); + debug!( + "Checking for existing watcher with watcher_id: {}, account_hash: {}, local_group_id: {}", + watcher_data.watcher_id, account_hash, group_id + ); let existing_watcher: Option = sqlx::query_scalar( r#"SELECT updated_at FROM watchers WHERE watcher_id = ? AND account_hash = ? AND local_group_id = ?"# ) @@ -968,8 +982,10 @@ impl MySqlWatcherExt for MySqlStorage { return Ok(existing_id); } } else { - info!("Client watcher is newer (server: {}, client: {}), proceeding with watcher update", - existing_datetime, client_datetime); + info!( + "Client watcher is newer (server: {}, client: {}), proceeding with watcher update", + existing_datetime, client_datetime + ); // 기존 파일/워처를 삭제하지 않는다. 대신 이후 UPSERT로 워처 레코드를 갱신해 ID를 보존한다. // 조건은 필요 시 별도 경로에서 정리한다. diff --git a/src/utils/crypto.rs b/src/utils/crypto.rs index e48a15a..359e39d 100644 --- a/src/utils/crypto.rs +++ b/src/utils/crypto.rs @@ -1,5 +1,5 @@ use hex; -use rand::{rngs::OsRng, Rng, RngCore}; +use rand::{Rng, RngCore, rngs::OsRng}; use sha2::{Digest, Sha256}; use tracing::info; diff --git a/src/utils/validator.rs b/src/utils/validator.rs index 0e8ccec..03abf22 100644 --- a/src/utils/validator.rs +++ b/src/utils/validator.rs @@ -51,7 +51,10 @@ pub fn validate_watcher_folder(folder: &str) -> Result<(), String> { let allowed = whitelist.contains(seg) || regex_opt.as_ref().map_or(false, |re| re.is_match(seg)); if !allowed { - return Err(format!("Watcher folder contains numeric-only segment '{}' which is not allowed (set {}=1 or whitelist/regex)", seg, ENV_ALLOW_NUMERIC)); + return Err(format!( + "Watcher folder contains numeric-only segment '{}' which is not allowed (set {}=1 or whitelist/regex)", + seg, ENV_ALLOW_NUMERIC + )); } } } From 32fe89dc4ba6b68997fa885b2e88ec9cfa6953fc Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Sat, 27 Sep 2025 00:43:10 -0600 Subject: [PATCH 52/71] Check point for 2024 - dockerfile --- Dockerfile | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 8d2f35f..77aef29 100644 --- a/Dockerfile +++ b/Dockerfile @@ -38,15 +38,25 @@ COPY src ./src RUN cargo clean RUN cargo build --release --bin cosmic-sync-server --features redis-cache --target ${RUST_TARGET} -# Runtime stage - use base with glibc instead of static -FROM gcr.io/distroless/cc:nonroot +# Runtime stage - use Ubuntu 24.04 for newer glibc compatibility +FROM ubuntu:24.04 WORKDIR /app +# Install minimal runtime dependencies +RUN apt-get update && apt-get install -y --no-install-recommends \ + ca-certificates \ + && rm -rf /var/lib/apt/lists/* \ + && groupadd --gid 65532 nonroot \ + && useradd --uid 65532 --gid 65532 --no-create-home --shell /bin/false nonroot + # Copy the binary from builder stage ARG RUST_TARGET=x86_64-unknown-linux-gnu COPY --from=builder /app/target/${RUST_TARGET}/release/cosmic-sync-server /app/cosmic-sync-server COPY config ./config +# Change ownership to nonroot user +RUN chown -R nonroot:nonroot /app + USER nonroot:nonroot EXPOSE 50051 8080 From c30060c854010f36d3325329de062c49de1f2fc3 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 30 Sep 2025 16:34:53 -0600 Subject: [PATCH 53/71] Check point for streaming/key_id patch --- .github/workflows/deploy-production.yml | 22 +++++++++++----------- proto/sync.proto | 2 +- src/handlers/file/download.rs | 16 +++++++++++++++- src/handlers/file/find.rs | 1 + src/handlers/file/list.rs | 1 + src/models/file.rs | 3 ++- src/server/service.rs | 1 + src/services/version_service.rs | 1 + 8 files changed, 33 insertions(+), 14 deletions(-) diff --git a/.github/workflows/deploy-production.yml b/.github/workflows/deploy-production.yml index 75993cb..cd02207 100644 --- a/.github/workflows/deploy-production.yml +++ b/.github/workflows/deploy-production.yml @@ -68,41 +68,41 @@ jobs: uses: aws-actions/amazon-ecs-deploy-task-definition@v2 with: task-definition: ${{ steps.task-def.outputs.task-definition }} - service: production-pop-os-cosmic-sync - cluster: genesis76-us-east-2 + service: production-system76-cosmic-sync + cluster: system76-us-east-2 wait-for-service-stability: false - name: Check ECS Service Status run: | echo "Checking ECS service status..." aws ecs describe-services \ - --cluster genesis76-us-east-2 \ - --services production-pop-os-cosmic-sync \ + --cluster system76-us-east-2 \ + --services production-system76-cosmic-sync \ --query 'services[0].{Status:status,RunningCount:runningCount,PendingCount:pendingCount,DesiredCount:desiredCount}' echo "Getting recent ECS events..." aws ecs describe-services \ - --cluster genesis76-us-east-2 \ - --services production-pop-os-cosmic-sync \ + --cluster system76-us-east-2 \ + --services production-system76-cosmic-sync \ --query 'services[0].events[:10]' echo "Getting task details..." - if aws ecs list-tasks --cluster genesis76-us-east-2 --service-name production-pop-os-cosmic-sync --query 'taskArns[0]' --output text 2>/dev/null; then + if aws ecs list-tasks --cluster system76-us-east-2 --service-name production-pop-os-cosmic-sync --query 'taskArns[0]' --output text 2>/dev/null; then TASK_ARN=$(aws ecs list-tasks \ - --cluster genesis76-us-east-2 \ - --service-name production-pop-os-cosmic-sync \ + --cluster system76-us-east-2 \ + --service-name production-system76-cosmic-sync \ --query 'taskArns[0]' --output text) if [ "$TASK_ARN" != "None" ] && [ "$TASK_ARN" != "" ]; then echo "Task ARN: $TASK_ARN" aws ecs describe-tasks \ - --cluster genesis76-us-east-2 \ + --cluster system76-us-east-2 \ --tasks $TASK_ARN \ --query 'tasks[0].{LastStatus:lastStatus,HealthStatus:healthStatus,CreatedAt:createdAt,StoppedReason:stoppedReason}' 2>/dev/null || echo "Could not get task details" echo "Getting container details..." aws ecs describe-tasks \ - --cluster genesis76-us-east-2 \ + --cluster system76-us-east-2 \ --tasks $TASK_ARN \ --query 'tasks[0].containers[?name==`app`].{Name:name,LastStatus:lastStatus,ExitCode:exitCode,Reason:reason}' 2>/dev/null || echo "Could not get container details" else diff --git a/proto/sync.proto b/proto/sync.proto index 7e8dcec..359319d 100755 --- a/proto/sync.proto +++ b/proto/sync.proto @@ -366,7 +366,6 @@ message UploadFileRequest { int64 revision = 11; google.protobuf.Timestamp updated_time = 12; uint64 file_size = 13; - string key_id = 14; } @@ -456,6 +455,7 @@ message FileInfo { string file_path = 9; int64 revision = 10; uint64 file_size = 11; + string key_id = 12; } message DeleteFileRequest { diff --git a/src/handlers/file/download.rs b/src/handlers/file/download.rs index 6c3a3ef..c9f26f0 100644 --- a/src/handlers/file/download.rs +++ b/src/handlers/file/download.rs @@ -125,7 +125,21 @@ pub async fn handle_download_file( } } - // Get file data + // Size-based switching: if file is large, advise using streaming API + // Threshold: 2 MiB + const STREAM_THRESHOLD_BYTES: u64 = 1 * 1024 * 1024; + + if file_info.size > STREAM_THRESHOLD_BYTES { + info!( + "File is large ({} bytes), advising streaming API for file_id={}", + file_info.size, file_id + ); + return Ok(Response::new(response::file_download_error( + "File too large for unary download; use DownloadFileStream", + ))); + } + + // Get file data (fits in unary response) let download_result = handler.app_state.file.get_file_data(file_id).await; // Record download usage after attempt diff --git a/src/handlers/file/find.rs b/src/handlers/file/find.rs index d1f2789..23c25b4 100644 --- a/src/handlers/file/find.rs +++ b/src/handlers/file/find.rs @@ -92,6 +92,7 @@ pub async fn handle_find_file_by_criteria( }), revision: file_info.revision, file_size: file_info.size, + key_id: file_info.key_id.clone().unwrap_or_default(), }; Ok(Response::new(FindFileResponse { success: true, diff --git a/src/handlers/file/list.rs b/src/handlers/file/list.rs index 0663a46..0876f68 100644 --- a/src/handlers/file/list.rs +++ b/src/handlers/file/list.rs @@ -198,6 +198,7 @@ pub async fn handle_list_files( }), revision: file.revision, file_size: file.size, + key_id: file.key_id.clone().unwrap_or_default(), }; sync_files.push(file_info); } diff --git a/src/models/file.rs b/src/models/file.rs index a6980f3..2c04b98 100644 --- a/src/models/file.rs +++ b/src/models/file.rs @@ -169,6 +169,7 @@ impl From<&FileInfo> for sync::FileInfo { updated_time: Some(file_info.updated_time.clone()), revision: file_info.revision, file_size: file_info.size, + key_id: file_info.key_id.clone().unwrap_or_default(), } } } @@ -192,7 +193,7 @@ impl From for FileInfo { revision: proto.revision, account_hash: String::new(), size: proto.file_size, - key_id: None, + key_id: if proto.key_id.is_empty() { None } else { Some(proto.key_id) }, } } } diff --git a/src/server/service.rs b/src/server/service.rs index 1ff5d4b..294c4ac 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -245,6 +245,7 @@ impl SyncServiceImpl { seconds: file.updated_time.seconds, nanos: file.updated_time.nanos, }), + key_id: file.key_id.clone().unwrap_or_default(), }; // Encrypt metadata for transport if account key is available diff --git a/src/services/version_service.rs b/src/services/version_service.rs index fea40b7..f7fdfd2 100644 --- a/src/services/version_service.rs +++ b/src/services/version_service.rs @@ -105,6 +105,7 @@ impl VersionServiceImpl { file_path: file.file_path.clone(), revision: file.revision, file_size: file.file_size as u64, + key_id: String::new(), // SyncFile does not have key_id field } } From cbafc383e8fc8105b135e6f91375249e29e1d201 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 1 Oct 2025 16:16:44 -0600 Subject: [PATCH 54/71] Fix starting process --- .gitignore | 2 + Cargo.toml | 2 +- backup.sql | 0 backup/mysql-backup.tar.gz | Bin 0 -> 87 bytes backup_20251001.sql | 0 docker-compose.yml | 6 +- docs/STREAMING_DOWNLOAD_GUIDE.md | 364 +++++++++++++++++++++++++++++++ full_backup.sql | 0 src/server/app_state.rs | 6 + src/server/service.rs | 239 +++++++++++++++++--- src/server/startup.rs | 2 + 11 files changed, 585 insertions(+), 36 deletions(-) create mode 100644 backup.sql create mode 100644 backup/mysql-backup.tar.gz create mode 100644 backup_20251001.sql create mode 100644 docs/STREAMING_DOWNLOAD_GUIDE.md create mode 100644 full_backup.sql diff --git a/.gitignore b/.gitignore index fafefd6..75700c0 100755 --- a/.gitignore +++ b/.gitignore @@ -51,3 +51,5 @@ Desktop.ini # Backup files *~ *.bak + +mysql_data/ \ No newline at end of file diff --git a/Cargo.toml b/Cargo.toml index fd0f36f..980542c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,7 +2,7 @@ name = "cosmic-sync-server" version = "2.0.0" edition = "2024" -authors = ["System76 "] +authors = ["System76 "] description = "High-performance synchronization server for System76's COSMIC Desktop Environment" repository = "https://github.com/pop-os/cosmic-sync-server" license = "GPL-3.0" diff --git a/backup.sql b/backup.sql new file mode 100644 index 0000000..e69de29 diff --git a/backup/mysql-backup.tar.gz b/backup/mysql-backup.tar.gz new file mode 100644 index 0000000000000000000000000000000000000000..a409a4d9d0a5a7cc5d1ef12d13633392c305b2d2 GIT binary patch literal 87 zcmb2|=3oE==F6AU7g!oFFck*}8}PEcFqx5?k(H2^my?l{kijNmZf 1MB + +## Client Implementation Guide + +### 1. Download Method Selection + +클라이언트는 파일 메타데이터의 `file_size` 필드를 확인하여 다운로드 방법을 결정합니다. + +```rust +// Example implementation (Rust) +use crate::sync::{DownloadFileRequest, DownloadFileChunk}; + +const STREAMING_THRESHOLD: u64 = 1 * 1024 * 1024; // 1MB + +async fn download_file( + client: &mut SyncServiceClient, + file_info: &FileInfo, + request: DownloadFileRequest, +) -> Result, Box> { + if file_info.file_size > STREAMING_THRESHOLD { + // Use streaming download for large files + download_file_stream(client, request).await + } else { + // Use unary download for small files + download_file_unary(client, request).await + } +} +``` + +### 2. Unary Download (Small Files ≤ 1MB) + +```rust +async fn download_file_unary( + client: &mut SyncServiceClient, + request: DownloadFileRequest, +) -> Result, Box> { + let response = client.download_file(request).await?; + let download_response = response.into_inner(); + + if !download_response.success { + return Err(download_response.return_message.into()); + } + + Ok(download_response.file_data) +} +``` + +### 3. Streaming Download (Large Files > 1MB) + +```rust +async fn download_file_stream( + client: &mut SyncServiceClient, + request: DownloadFileRequest, +) -> Result, Box> { + let mut stream = client.download_file_stream(request).await?.into_inner(); + + let mut file_data = Vec::new(); + let mut expected_seq = 0u64; + + while let Some(chunk_result) = stream.message().await? { + let chunk = chunk_result; + + // Verify sequence order + if chunk.seq != expected_seq { + return Err(format!( + "Chunk sequence mismatch: expected {}, got {}", + expected_seq, chunk.seq + ).into()); + } + + // Append chunk data + file_data.extend_from_slice(&chunk.data); + expected_seq += 1; + + // Check if this is the last chunk + if chunk.last { + info!("Download completed: received {} chunks, total {} bytes", + expected_seq, file_data.len()); + break; + } + } + + Ok(file_data) +} +``` + +### 4. Error Handling + +#### Server-Side Errors + +1. **파일이 너무 큰 경우 (Unary 사용 시)**: +``` +Error: "File too large for unary download; use DownloadFileStream" +``` +→ 자동으로 스트리밍 다운로드로 재시도 + +2. **Bandwidth Quota 초과**: +``` +Error: "Bandwidth quota exceeded: Monthly bandwidth limit reached" +Status: RESOURCE_EXHAUSTED +``` +→ 사용자에게 알림 후 나중에 재시도 + +3. **파일 미존재**: +``` +Error: "File not found" 또는 "File data not found" +Status: NOT_FOUND +``` +→ 파일이 삭제되었거나 존재하지 않음 + +#### Client-Side Retry Logic + +```rust +async fn download_file_with_retry( + client: &mut SyncServiceClient, + file_info: &FileInfo, + request: DownloadFileRequest, + max_retries: u32, +) -> Result, Box> { + let mut attempt = 0; + + loop { + attempt += 1; + + match download_file(client, file_info, request.clone()).await { + Ok(data) => return Ok(data), + Err(e) => { + let error_msg = e.to_string(); + + // Check if error suggests using streaming + if error_msg.contains("too large") && file_info.file_size <= STREAMING_THRESHOLD { + warn!("Server suggests streaming, updating threshold"); + // Force streaming for this file + return download_file_stream(client, request).await; + } + + // Check if error is retryable + if !is_retryable_error(&error_msg) || attempt >= max_retries { + return Err(e); + } + + // Exponential backoff + let delay = std::time::Duration::from_secs(2u64.pow(attempt - 1)); + warn!("Download failed (attempt {}/{}), retrying in {:?}: {}", + attempt, max_retries, delay, error_msg); + tokio::time::sleep(delay).await; + } + } + } +} + +fn is_retryable_error(error_msg: &str) -> bool { + error_msg.contains("network") || + error_msg.contains("timeout") || + error_msg.contains("unavailable") +} +``` + +### 5. Progress Tracking (Streaming) + +```rust +async fn download_file_stream_with_progress( + client: &mut SyncServiceClient, + request: DownloadFileRequest, + total_size: u64, + mut progress_callback: F, +) -> Result, Box> +where + F: FnMut(u64, u64), // (downloaded_bytes, total_bytes) +{ + let mut stream = client.download_file_stream(request).await?.into_inner(); + + let mut file_data = Vec::with_capacity(total_size as usize); + let mut expected_seq = 0u64; + let mut downloaded_bytes = 0u64; + + while let Some(chunk_result) = stream.message().await? { + let chunk = chunk_result; + + if chunk.seq != expected_seq { + return Err(format!( + "Chunk sequence mismatch: expected {}, got {}", + expected_seq, chunk.seq + ).into()); + } + + let chunk_size = chunk.data.len() as u64; + file_data.extend_from_slice(&chunk.data); + downloaded_bytes += chunk_size; + expected_seq += 1; + + // Report progress + progress_callback(downloaded_bytes, total_size); + + if chunk.last { + break; + } + } + + Ok(file_data) +} +``` + +## Best Practices + +### 1. File Metadata 캐싱 +- `ListFiles` 또는 `FindFileByCriteria`로 미리 파일 메타데이터를 가져옵니다 +- `file_size`를 확인하여 다운로드 방법을 사전에 결정합니다 + +### 2. Connection Reuse +- gRPC 클라이언트 연결을 재사용하여 오버헤드를 줄입니다 +- 여러 파일을 다운로드할 때 동일한 채널을 사용합니다 + +### 3. Concurrent Downloads +- 여러 파일을 동시에 다운로드할 때 너무 많은 동시 스트림을 열지 않습니다 +- 권장: 최대 3-5개의 동시 스트림 + +```rust +use tokio::sync::Semaphore; + +const MAX_CONCURRENT_DOWNLOADS: usize = 3; + +async fn download_files_concurrently( + client: Arc>>, + files: Vec<(FileInfo, DownloadFileRequest)>, +) -> Vec, Box>> { + let semaphore = Arc::new(Semaphore::new(MAX_CONCURRENT_DOWNLOADS)); + + let tasks: Vec<_> = files.into_iter().map(|(file_info, request)| { + let client = client.clone(); + let semaphore = semaphore.clone(); + + tokio::spawn(async move { + let _permit = semaphore.acquire().await.unwrap(); + let mut client = client.lock().await; + download_file(&mut client, &file_info, request).await + }) + }).collect(); + + futures::future::join_all(tasks).await + .into_iter() + .map(|r| r.unwrap()) + .collect() +} +``` + +### 4. Memory Management +- 스트리밍 다운로드 시 메모리 버퍼 크기를 제한합니다 +- 대용량 파일은 디스크에 직접 쓰는 것을 고려합니다 + +```rust +use tokio::io::AsyncWriteExt; + +async fn download_file_stream_to_disk( + client: &mut SyncServiceClient, + request: DownloadFileRequest, + output_path: &Path, +) -> Result<(), Box> { + let mut stream = client.download_file_stream(request).await?.into_inner(); + let mut file = tokio::fs::File::create(output_path).await?; + + let mut expected_seq = 0u64; + + while let Some(chunk_result) = stream.message().await? { + let chunk = chunk_result; + + if chunk.seq != expected_seq { + return Err(format!( + "Chunk sequence mismatch: expected {}, got {}", + expected_seq, chunk.seq + ).into()); + } + + // Write chunk directly to disk + file.write_all(&chunk.data).await?; + expected_seq += 1; + + if chunk.last { + file.flush().await?; + break; + } + } + + Ok(()) +} +``` + +### 5. Encryption Key Handling +- 파일이 암호화된 경우 (`is_encrypted = true`), `key_id`를 사용하여 복호화 키를 가져옵니다 +- 다운로드 전에 `RequestEncryptionKey`를 호출하거나 로컬 `key.toml`에서 키를 조회합니다 + +```rust +async fn download_and_decrypt( + client: &mut SyncServiceClient, + file_info: &FileInfo, + request: DownloadFileRequest, + key_store: &KeyStore, +) -> Result, Box> { + // Download file + let encrypted_data = download_file(client, file_info, request).await?; + + // Decrypt if necessary + if file_info.is_encrypted && !file_info.key_id.is_empty() { + let key = key_store.get_key(&file_info.key_id)?; + decrypt_file_data(&encrypted_data, &key) + } else { + Ok(encrypted_data) + } +} +``` + +## Testing + +### Unit Test Example +```rust +#[tokio::test] +async fn test_download_file_selection() { + let small_file = FileInfo { + file_size: 512 * 1024, // 512KB + ..Default::default() + }; + + let large_file = FileInfo { + file_size: 5 * 1024 * 1024, // 5MB + ..Default::default() + }; + + assert!(small_file.file_size <= STREAMING_THRESHOLD); + assert!(large_file.file_size > STREAMING_THRESHOLD); +} +``` + +## Troubleshooting + +### 문제: "File too large" 에러가 발생합니다 +**해결**: 클라이언트가 1MB 이상 파일에 대해 `DownloadFile` 대신 `DownloadFileStream`을 호출하도록 수정하세요. + +### 문제: 스트리밍 다운로드가 중간에 끊깁니다 +**해결**: +1. 네트워크 연결 상태 확인 +2. 서버 로그에서 스트림 오류 확인 +3. gRPC keepalive 설정 확인 +4. 재시도 로직 구현 + +### 문제: Bandwidth quota exceeded 에러 +**해결**: +1. 사용량 제한이 초과되었는지 확인 +2. 관리자에게 문의하여 quota 증가 요청 +3. 다음 월초까지 대기 + +## Related Documentation +- [Server Configuration](../config/README.md) +- [File Upload Guide](./UPLOAD_GUIDE.md) +- [Authentication Guide](./AUTHENTICATION.md) + + + diff --git a/full_backup.sql b/full_backup.sql new file mode 100644 index 0000000..e69de29 diff --git a/src/server/app_state.rs b/src/server/app_state.rs index 7d0436f..b76a3aa 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -352,6 +352,12 @@ impl AppState { ) -> crate::storage::Result>> { Ok(None) } + async fn get_file_data_stream( + &self, + _file_id: u64, + ) -> crate::storage::Result> + Send + Unpin>>> { + Ok(None) + } async fn delete_file_data(&self, _file_id: u64) -> crate::storage::Result<()> { Ok(()) } diff --git a/src/server/service.rs b/src/server/service.rs index 294c4ac..c694303 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -514,12 +514,9 @@ impl SyncService for SyncServiceImpl { request: Request, ) -> Result, Status> { let req = request.into_inner(); - - // Use existing handler to validate and fetch metadata + full bytes once for now - // In future, switch storage.get_file_data_stream for real streaming from backend let file_id = req.file_id; - let device_hash = req.device_hash.clone(); + // Authenticate let verified = self .app_state .oauth @@ -530,6 +527,7 @@ impl SyncService for SyncServiceImpl { return Err(Status::unauthenticated("Invalid authentication")); } + // Get file metadata let file_info = self .app_state .file @@ -538,40 +536,217 @@ impl SyncService for SyncServiceImpl { .map_err(|e| Status::internal(format!("Failed to get file info: {}", e)))? .ok_or_else(|| Status::not_found("File not found"))?; - let total_data = self + let file_size = file_info.size; + let account_hash = file_info.account_hash.clone(); + let revision = file_info.revision; + let device_hash = req.device_hash.clone(); + + info!( + "Streaming download: file_id={}, size={} bytes", + file_id, file_size + ); + + // Check bandwidth quota before streaming download + let event_id = nanoid::nanoid!(16); + let usage_check = self .app_state - .file - .get_file_data(file_id) - .await - .map_err(|e| Status::internal(format!("Failed to load data: {}", e)))? - .ok_or_else(|| Status::not_found("File data not found"))?; + .usage_checker + .check_before_operation( + &account_hash, + crate::services::usage_service::UsageOperation::Download { + bytes: file_size, + file_id, + revision, + device_hash: device_hash.clone(), + event_id: event_id.clone(), + }, + ) + .await; - // Chunking in-memory for now (hybrid step 1) - let (tx, rx) = mpsc::channel(16); - let chunk_size: usize = 1024 * 1024; // 1MB + match usage_check { + Ok(check_result) => { + if !check_result.allowed { + error!( + "Streaming download blocked due to bandwidth quota: {}", + check_result.reason.as_ref().unwrap_or(&"Unknown reason".to_string()) + ); + return Err(Status::resource_exhausted(format!( + "Bandwidth quota exceeded: {}", + check_result.reason.unwrap_or_else(|| "Monthly bandwidth limit reached".to_string()) + ))); + } - tokio::spawn(async move { - let mut seq: u64 = 0; - let total_size = total_data.len() as u64; - for slice in total_data.chunks(chunk_size) { - let last = ((seq + 1) * chunk_size as u64) >= total_size; - let msg = DownloadFileChunk { - data: slice.to_vec(), - seq, - last, - total_size, - }; - if tx.send(Ok(msg)).await.is_err() { - break; + // Log warnings + for warning in &check_result.warnings { + warn!("Bandwidth warning for {}: {}", account_hash, warning); } - seq += 1; } - }); + Err(e) => { + // Fail-open: allow download but log error + error!("Usage check failed, allowing streaming download: {}", e); + } + } - let stream = ReceiverStream::new(rx); - Ok(Response::new( - Box::pin(stream) as Self::DownloadFileStreamStream - )) + // Try to use real streaming if available (S3/storage stream) + match self.app_state.storage.get_file_data_stream(file_id).await { + Ok(Some(backend_stream)) => { + debug!("Using native storage streaming for file_id={}", file_id); + + // Convert storage stream to gRPC chunks + let (tx, rx) = mpsc::channel(8); // Small buffer for backpressure + let chunk_size: usize = 1024 * 1024; // 1MB chunks + let total_size = file_info.size; + + let usage_checker = self.app_state.usage_checker.clone(); + let account_hash_clone = account_hash.clone(); + let device_hash_clone = device_hash.clone(); + + tokio::spawn(async move { + use futures::StreamExt; + let mut seq: u64 = 0; + let mut buffer = Vec::with_capacity(chunk_size); + let mut pinned_stream = Box::pin(backend_stream); + let mut stream_success = true; + + while let Some(result) = pinned_stream.next().await { + match result { + Ok(bytes) => { + buffer.extend_from_slice(&bytes); + + // Send full chunks + while buffer.len() >= chunk_size { + let chunk_data = buffer.drain(..chunk_size).collect::>(); + let msg = DownloadFileChunk { + data: chunk_data, + seq, + last: false, + total_size, + }; + if tx.send(Ok(msg)).await.is_err() { + debug!("Client disconnected, stopping stream"); + stream_success = false; + break; + } + seq += 1; + } + } + Err(e) => { + error!("Stream read error: {}", e); + let _ = tx.send(Err(Status::internal(format!("Stream error: {}", e)))).await; + stream_success = false; + break; + } + } + } + + // Send remaining data as final chunk + if stream_success && !buffer.is_empty() { + let msg = DownloadFileChunk { + data: buffer, + seq, + last: true, + total_size, + }; + if tx.send(Ok(msg)).await.is_err() { + stream_success = false; + } + } + + // Record usage after stream completion + let operation_result = if stream_success { + crate::services::usage_service::OperationResult::Success + } else { + crate::services::usage_service::OperationResult::Failed + }; + + if let Err(e) = usage_checker.record_after_operation( + &account_hash_clone, + crate::services::usage_service::UsageOperation::Download { + bytes: total_size, + file_id, + revision, + device_hash: device_hash_clone, + event_id, + }, + operation_result, + ).await { + error!("Failed to record streaming download usage: {}", e); + } + }); + + let stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(stream) as Self::DownloadFileStreamStream)) + } + Ok(None) => { + warn!("Storage stream returned None for file_id={}", file_id); + Err(Status::not_found("File data not found")) + } + Err(_) => { + // Fallback: NotImplemented or other storage errors -> use in-memory chunking + debug!("Storage streaming not available, falling back to in-memory chunking"); + + let total_data = self + .app_state + .file + .get_file_data(file_id) + .await + .map_err(|e| Status::internal(format!("Failed to load data: {}", e)))? + .ok_or_else(|| Status::not_found("File data not found"))?; + + let (tx, rx) = mpsc::channel(16); + let chunk_size: usize = 1024 * 1024; // 1MB + + let usage_checker = self.app_state.usage_checker.clone(); + let account_hash_clone = account_hash.clone(); + let device_hash_clone = device_hash.clone(); + + tokio::spawn(async move { + let mut seq: u64 = 0; + let total_size = total_data.len() as u64; + let mut stream_success = true; + + for slice in total_data.chunks(chunk_size) { + let last = ((seq + 1) * chunk_size as u64) >= total_size; + let msg = DownloadFileChunk { + data: slice.to_vec(), + seq, + last, + total_size, + }; + if tx.send(Ok(msg)).await.is_err() { + debug!("Client disconnected during fallback chunking"); + stream_success = false; + break; + } + seq += 1; + } + + // Record usage after fallback stream completion + let operation_result = if stream_success { + crate::services::usage_service::OperationResult::Success + } else { + crate::services::usage_service::OperationResult::Failed + }; + + if let Err(e) = usage_checker.record_after_operation( + &account_hash_clone, + crate::services::usage_service::UsageOperation::Download { + bytes: total_size, + file_id, + revision, + device_hash: device_hash_clone, + event_id, + }, + operation_result, + ).await { + error!("Failed to record fallback download usage: {}", e); + } + }); + + let stream = ReceiverStream::new(rx); + Ok(Response::new(Box::pin(stream) as Self::DownloadFileStreamStream)) + } + } } async fn list_files( diff --git a/src/server/startup.rs b/src/server/startup.rs index 574d66c..9fe66f0 100644 --- a/src/server/startup.rs +++ b/src/server/startup.rs @@ -137,6 +137,8 @@ async fn start_grpc_server(config: &ServerConfig, app_state: Arc) -> R let sync_client_service = SyncClientServiceImpl::new(app_state.clone()); // Wrap services with compression and message size limits + // Note: 64MB limit is sufficient for 1MB streaming chunks + metadata + // Unary downloads > 1MB will be rejected and advised to use streaming let sync_service = SyncServiceServer::new(sync_service) .accept_compressed(CompressionEncoding::Gzip) .send_compressed(CompressionEncoding::Gzip) From 948f0904a0f2e80a065c989cfaa4f1ac82c60c95 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 1 Oct 2025 16:56:16 -0600 Subject: [PATCH 55/71] Fix mysql auth_token mismatch --- src/storage/file_storage.rs | 86 ++++++++++++++++++++++++++++++++++++ src/storage/memory.rs | 16 +++++-- src/storage/mod.rs | 6 +++ src/storage/mysql.rs | 50 ++++++++++++++++----- src/storage/mysql_auth.rs | 26 ++++++----- src/storage/mysql_storage.rs | 34 ++++++++------ 6 files changed, 178 insertions(+), 40 deletions(-) diff --git a/src/storage/file_storage.rs b/src/storage/file_storage.rs index 9be3019..8ce118f 100644 --- a/src/storage/file_storage.rs +++ b/src/storage/file_storage.rs @@ -93,6 +93,17 @@ impl FileStorage for DatabaseFileStorage { self.mysql_storage.get_file_data(file_id).await } + async fn get_file_data_stream( + &self, + file_id: u64, + ) -> Result> + Send + Unpin>>> { + use crate::storage::Storage; + debug!("Streaming file data from database: file_id={}", file_id); + + // Delegate to MySQL storage stream implementation (via Storage trait) + Storage::get_file_data_stream(&*self.mysql_storage, file_id).await + } + async fn delete_file_data(&self, file_id: u64) -> Result<()> { debug!("Deleting file data from database: file_id={}", file_id); @@ -499,6 +510,81 @@ impl FileStorage for S3FileStorage { } } + /// Get file data as stream (real S3 streaming) + async fn get_file_data_stream( + &self, + file_id: u64, + ) -> Result> + Send + Unpin>>> { + let s3_key = self.generate_s3_key(file_id); + debug!( + "Streaming file data from S3: bucket={}, key={}", + self.config.bucket, s3_key + ); + + // Get object from S3 + let client = self.get_client().await?; + match client + .get_object() + .bucket(&self.config.bucket) + .key(&s3_key) + .send() + .await + { + Ok(response) => { + // AWS ByteStream to channel-based stream + // ByteStream doesn't implement futures::Stream directly, so we need to + // collect it or convert it via a channel + let (tx, rx) = tokio::sync::mpsc::channel(8); + let mut byte_stream = response.body; + + tokio::spawn(async move { + use bytes::Buf; + use tokio::io::AsyncReadExt; + + // Convert ByteStream to AsyncRead and read chunks + let mut reader = byte_stream.into_async_read(); + let mut buffer = vec![0u8; 1024 * 1024]; // 1MB buffer + + loop { + match reader.read(&mut buffer).await { + Ok(0) => break, // EOF + Ok(n) => { + let chunk = bytes::Bytes::copy_from_slice(&buffer[..n]); + if tx.send(Ok(chunk)).await.is_err() { + break; // Receiver dropped + } + } + Err(e) => { + let _ = tx.send(Err(crate::storage::StorageError::S3Error( + format!("Stream read error: {}", e) + ))).await; + break; + } + } + } + }); + + let stream = tokio_stream::wrappers::ReceiverStream::new(rx); + Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + } + Err(e) => { + // Check if it's a 404 error + if let aws_sdk_s3::error::SdkError::ServiceError(service_err) = &e { + if let GetObjectError::NoSuchKey(_) = service_err.err() { + debug!("File not found in S3 stream: {}", s3_key); + return Ok(None); + } + } + + error!("Failed to stream file from S3: {}", e); + Err(StorageError::S3Error(format!( + "Failed to stream download: {}", + e + ))) + } + } + } + async fn delete_file_data(&self, file_id: u64) -> Result<()> { let s3_key = self.generate_s3_key(file_id); debug!( diff --git a/src/storage/memory.rs b/src/storage/memory.rs index f885fc3..3a3bcf9 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -1349,11 +1349,19 @@ impl Storage for MemoryStorage { async fn get_file_data_stream( &self, - _file_id: u64, + file_id: u64, ) -> Result> + Send + Unpin>>> { - Err(StorageError::NotImplemented( - "get_file_data_stream not implemented".to_string(), - )) + // For memory storage, convert Vec to stream + match self.get_file_data(file_id).await? { + Some(data) => { + use futures::stream::StreamExt; + let stream = futures::stream::once(async move { + Ok(bytes::Bytes::from(data)) + }).boxed(); + Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + } + None => Ok(None), + } } async fn update_encryption_key( diff --git a/src/storage/mod.rs b/src/storage/mod.rs index 1c0eb6e..114daff 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -197,6 +197,12 @@ pub trait FileStorage: Send + Sync { /// Retrieve file data with caching async fn get_file_data(&self, file_id: u64) -> Result>>; + /// Retrieve file data as stream (for large files) + async fn get_file_data_stream( + &self, + file_id: u64, + ) -> Result> + Send + Unpin>>>; + /// Delete file data with cleanup async fn delete_file_data(&self, file_id: u64) -> Result<()>; diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 97ae6f5..11a673b 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -263,12 +263,14 @@ impl MySqlStorage { // Create auth_tokens table let create_auth_tokens_table = r" CREATE TABLE IF NOT EXISTS auth_tokens ( - id VARCHAR(36) NOT NULL, - token VARCHAR(255) NOT NULL PRIMARY KEY, + id VARCHAR(36) NOT NULL PRIMARY KEY, account_hash VARCHAR(255) NOT NULL, + access_token VARCHAR(1024) NOT NULL, + refresh_token VARCHAR(1024), + token_type VARCHAR(20) NOT NULL, created_at BIGINT NOT NULL, expires_at BIGINT NOT NULL, - is_active BOOLEAN NOT NULL DEFAULT TRUE, + INDEX (access_token(255)), INDEX (account_hash), FOREIGN KEY (account_hash) REFERENCES accounts(account_hash) ON DELETE CASCADE )"; @@ -2208,11 +2210,24 @@ impl Storage for MySqlStorage { async fn get_file_data_stream( &self, - _file_id: u64, + file_id: u64, ) -> Result> + Send + Unpin>>> { - Err(StorageError::NotImplemented( - "get_file_data_stream not implemented".to_string(), - )) + // For MySQL BLOB storage, we load the entire data and convert to stream + // True streaming from MySQL BLOB is not practical with sqlx + debug!("MySQL storage: converting blob to stream for file_id={}", file_id); + + // Disambiguate: use the MySqlFileExt trait method + match MySqlFileExt::get_file_data(self, file_id).await? { + Some(data) => { + // Convert Vec to stream of Bytes + use futures::stream::StreamExt; + let stream = futures::stream::once(async move { + Ok(bytes::Bytes::from(data)) + }).boxed(); + Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + } + None => Ok(None), + } } async fn update_encryption_key( @@ -2314,11 +2329,24 @@ impl MySqlStorage { async fn get_file_data_stream( &self, - _file_id: u64, + file_id: u64, ) -> Result> + Send + Unpin>>> { - Err(StorageError::NotImplemented( - "get_file_data_stream not implemented".to_string(), - )) + // For MySQL BLOB storage, we load the entire data and convert to stream + // True streaming from MySQL BLOB is not practical with sqlx + debug!("MySQL storage: converting blob to stream for file_id={}", file_id); + + // Disambiguate: use the MySqlFileExt trait method + match MySqlFileExt::get_file_data(self, file_id).await? { + Some(data) => { + // Convert Vec to stream of Bytes + use futures::stream::StreamExt; + let stream = futures::stream::once(async move { + Ok(bytes::Bytes::from(data)) + }).boxed(); + Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + } + None => Ok(None), + } } async fn update_encryption_key( diff --git a/src/storage/mysql_auth.rs b/src/storage/mysql_auth.rs index 623b59e..5b9855d 100644 --- a/src/storage/mysql_auth.rs +++ b/src/storage/mysql_auth.rs @@ -27,15 +27,17 @@ pub trait MySqlAuthExt { impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 생성 async fn create_auth_token(&self, auth_token: &AuthToken) -> Result<()> { - // 스키마에 맞게 최소 필드 저장 (token 컬럼 사용) + // 스키마에 맞게 필드 저장 (access_token, refresh_token, token_type 사용) sqlx::query( r#"INSERT INTO auth_tokens ( - id, account_hash, token, created_at, expires_at - ) VALUES (?, ?, ?, ?, ?)"#, + id, account_hash, access_token, refresh_token, token_type, created_at, expires_at + ) VALUES (?, ?, ?, ?, ?, ?, ?)"#, ) .bind(&auth_token.token_id) .bind(&auth_token.account_hash) .bind(&auth_token.access_token) + .bind(&auth_token.refresh_token) + .bind(&auth_token.token_type) .bind(auth_token.created_at.timestamp()) .bind(auth_token.expires_at.timestamp()) .execute(self.get_sqlx_pool()) @@ -48,11 +50,11 @@ impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 조회 async fn get_auth_token(&self, token: &str) -> Result> { debug!("데이터베이스에서 인증 토큰 조회: {}", token); - let token_data: Option<(String, String, String, i64, i64)> = sqlx::query_as( + let token_data: Option<(String, String, String, Option, String, i64, i64)> = sqlx::query_as( r#"SELECT - id, account_hash, token, expires_at, created_at + id, account_hash, access_token, refresh_token, token_type, expires_at, created_at FROM auth_tokens - WHERE token = ?"#, + WHERE access_token = ?"#, ) .bind(token) .fetch_optional(self.get_sqlx_pool()) @@ -60,7 +62,7 @@ impl MySqlAuthExt for MySqlStorage { .map_err(|e| StorageError::Database(format!("토큰 조회 쿼리 실패: {}", e)))?; match token_data { - Some((token_id, account_hash, access_token, expires_at, created_at)) => { + Some((token_id, account_hash, access_token, refresh_token, token_type, expires_at, created_at)) => { // 타임스탬프를 DateTime으로 변환 let expires_at = match Utc.timestamp_opt(expires_at, 0) { chrono::LocalResult::Single(dt) => dt, @@ -84,13 +86,13 @@ impl MySqlAuthExt for MySqlStorage { } }; - // AuthToken 객체 생성 (스키마에 없는 필드는 기본값 적용) + // AuthToken 객체 생성 let auth_token = AuthToken { token_id, account_hash, access_token, - token_type: "Bearer".to_string(), - refresh_token: None, + token_type, + refresh_token, scope: None, expires_at, created_at, @@ -114,7 +116,7 @@ impl MySqlAuthExt for MySqlStorage { let result: Option = sqlx::query_scalar( r#"SELECT account_hash FROM auth_tokens - WHERE token = ? + WHERE access_token = ? AND account_hash = ? AND expires_at > ?"#, ) @@ -137,7 +139,7 @@ impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 삭제 async fn delete_auth_token(&self, token: &str) -> Result<()> { - sqlx::query(r#"DELETE FROM auth_tokens WHERE token = ?"#) + sqlx::query(r#"DELETE FROM auth_tokens WHERE access_token = ?"#) .bind(token) .execute(self.get_sqlx_pool()) .await diff --git a/src/storage/mysql_storage.rs b/src/storage/mysql_storage.rs index 83912be..6a9900d 100644 --- a/src/storage/mysql_storage.rs +++ b/src/storage/mysql_storage.rs @@ -108,15 +108,18 @@ impl Storage for MySqlStorage { /// 인증 토큰 저장 async fn save_auth_token(&self, token: &AuthToken) -> Result<(), Box> { - // 테이블 스키마: token(PK), account_hash, created_at, expires_at + // 테이블 스키마: id, account_hash, access_token, refresh_token, token_type, created_at, expires_at sqlx::query( - "INSERT INTO auth_tokens (token, account_hash, created_at, expires_at) - VALUES (?, ?, ?, ?)" + "INSERT INTO auth_tokens (id, account_hash, access_token, refresh_token, token_type, created_at, expires_at) + VALUES (?, ?, ?, ?, ?, ?, ?)" ) - .bind(&token.access_token) + .bind(&token.token_id) .bind(&token.account_hash) - .bind(token.created_at) - .bind(token.expires_at) + .bind(&token.access_token) + .bind(&token.refresh_token) + .bind(&token.token_type) + .bind(token.created_at.timestamp()) + .bind(token.expires_at.timestamp()) .execute(&self.pool) .await?; @@ -125,12 +128,13 @@ impl Storage for MySqlStorage { /// 토큰으로 계정 해시 조회 async fn get_token_data(&self, token: &str) -> Result, Box> { + let now = Utc::now().timestamp(); let record = sqlx::query!( "SELECT account_hash FROM auth_tokens - WHERE token = ? AND expires_at > ?", + WHERE access_token = ? AND expires_at > ?", token, - Utc::now() + now ) .fetch_optional(&self.pool) .await?; @@ -142,15 +146,19 @@ impl Storage for MySqlStorage { async fn store_token(&self, token: &str, account_hash: &str) -> Result<(), Box> { let now = Utc::now(); let expires_at = now + chrono::Duration::days(30); + let token_id = uuid::Uuid::new_v4().to_string(); sqlx::query( - "INSERT INTO auth_tokens (token, account_hash, created_at, expires_at) - VALUES (?, ?, ?, ?)" + "INSERT INTO auth_tokens (id, account_hash, access_token, refresh_token, token_type, created_at, expires_at) + VALUES (?, ?, ?, ?, ?, ?, ?)" ) - .bind(token) + .bind(token_id) .bind(account_hash) - .bind(now) - .bind(expires_at) + .bind(token) + .bind(Option::::None) + .bind("Bearer") + .bind(now.timestamp()) + .bind(expires_at.timestamp()) .execute(&self.pool) .await?; From ebd3f03890c06a2cd10777538d2cb3095a3608b1 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Wed, 1 Oct 2025 16:59:14 -0600 Subject: [PATCH 56/71] Fix starting process --- Cargo.toml | 4 ---- 1 file changed, 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index a8edb7c..980542c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -2,11 +2,7 @@ name = "cosmic-sync-server" version = "2.0.0" edition = "2024" -<<<<<<< HEAD authors = ["System76 "] -======= -authors = ["System76 "] ->>>>>>> staging description = "High-performance synchronization server for System76's COSMIC Desktop Environment" repository = "https://github.com/pop-os/cosmic-sync-server" license = "GPL-3.0" From 9e049c7f0ef02518fb1349c37abbc57fa411f009 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Thu, 2 Oct 2025 10:29:32 -0600 Subject: [PATCH 57/71] Migrate DB table --- src/models/file.rs | 6 ++- src/server/app_state.rs | 10 ++++- src/server/service.rs | 77 ++++++++++++++++++++++--------------- src/storage/file_storage.rs | 22 +++++++---- src/storage/memory.rs | 10 +++-- src/storage/mysql.rs | 34 ++++++++++------ src/storage/mysql_auth.rs | 25 ++++++++---- 7 files changed, 120 insertions(+), 64 deletions(-) diff --git a/src/models/file.rs b/src/models/file.rs index 2c04b98..5f446b5 100644 --- a/src/models/file.rs +++ b/src/models/file.rs @@ -193,7 +193,11 @@ impl From for FileInfo { revision: proto.revision, account_hash: String::new(), size: proto.file_size, - key_id: if proto.key_id.is_empty() { None } else { Some(proto.key_id) }, + key_id: if proto.key_id.is_empty() { + None + } else { + Some(proto.key_id) + }, } } } diff --git a/src/server/app_state.rs b/src/server/app_state.rs index b76a3aa..667c4ec 100644 --- a/src/server/app_state.rs +++ b/src/server/app_state.rs @@ -355,7 +355,15 @@ impl AppState { async fn get_file_data_stream( &self, _file_id: u64, - ) -> crate::storage::Result> + Send + Unpin>>> { + ) -> crate::storage::Result< + Option< + Box< + dyn futures::Stream> + + Send + + Unpin, + >, + >, + > { Ok(None) } async fn delete_file_data(&self, _file_id: u64) -> crate::storage::Result<()> { diff --git a/src/server/service.rs b/src/server/service.rs index c694303..8513916 100755 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -568,11 +568,16 @@ impl SyncService for SyncServiceImpl { if !check_result.allowed { error!( "Streaming download blocked due to bandwidth quota: {}", - check_result.reason.as_ref().unwrap_or(&"Unknown reason".to_string()) + check_result + .reason + .as_ref() + .unwrap_or(&"Unknown reason".to_string()) ); return Err(Status::resource_exhausted(format!( "Bandwidth quota exceeded: {}", - check_result.reason.unwrap_or_else(|| "Monthly bandwidth limit reached".to_string()) + check_result + .reason + .unwrap_or_else(|| "Monthly bandwidth limit reached".to_string()) ))); } @@ -591,7 +596,7 @@ impl SyncService for SyncServiceImpl { match self.app_state.storage.get_file_data_stream(file_id).await { Ok(Some(backend_stream)) => { debug!("Using native storage streaming for file_id={}", file_id); - + // Convert storage stream to gRPC chunks let (tx, rx) = mpsc::channel(8); // Small buffer for backpressure let chunk_size: usize = 1024 * 1024; // 1MB chunks @@ -612,7 +617,7 @@ impl SyncService for SyncServiceImpl { match result { Ok(bytes) => { buffer.extend_from_slice(&bytes); - + // Send full chunks while buffer.len() >= chunk_size { let chunk_data = buffer.drain(..chunk_size).collect::>(); @@ -632,7 +637,9 @@ impl SyncService for SyncServiceImpl { } Err(e) => { error!("Stream read error: {}", e); - let _ = tx.send(Err(Status::internal(format!("Stream error: {}", e)))).await; + let _ = tx + .send(Err(Status::internal(format!("Stream error: {}", e)))) + .await; stream_success = false; break; } @@ -659,23 +666,28 @@ impl SyncService for SyncServiceImpl { crate::services::usage_service::OperationResult::Failed }; - if let Err(e) = usage_checker.record_after_operation( - &account_hash_clone, - crate::services::usage_service::UsageOperation::Download { - bytes: total_size, - file_id, - revision, - device_hash: device_hash_clone, - event_id, - }, - operation_result, - ).await { + if let Err(e) = usage_checker + .record_after_operation( + &account_hash_clone, + crate::services::usage_service::UsageOperation::Download { + bytes: total_size, + file_id, + revision, + device_hash: device_hash_clone, + event_id, + }, + operation_result, + ) + .await + { error!("Failed to record streaming download usage: {}", e); } }); let stream = ReceiverStream::new(rx); - Ok(Response::new(Box::pin(stream) as Self::DownloadFileStreamStream)) + Ok(Response::new( + Box::pin(stream) as Self::DownloadFileStreamStream + )) } Ok(None) => { warn!("Storage stream returned None for file_id={}", file_id); @@ -684,7 +696,7 @@ impl SyncService for SyncServiceImpl { Err(_) => { // Fallback: NotImplemented or other storage errors -> use in-memory chunking debug!("Storage streaming not available, falling back to in-memory chunking"); - + let total_data = self .app_state .file @@ -728,23 +740,28 @@ impl SyncService for SyncServiceImpl { crate::services::usage_service::OperationResult::Failed }; - if let Err(e) = usage_checker.record_after_operation( - &account_hash_clone, - crate::services::usage_service::UsageOperation::Download { - bytes: total_size, - file_id, - revision, - device_hash: device_hash_clone, - event_id, - }, - operation_result, - ).await { + if let Err(e) = usage_checker + .record_after_operation( + &account_hash_clone, + crate::services::usage_service::UsageOperation::Download { + bytes: total_size, + file_id, + revision, + device_hash: device_hash_clone, + event_id, + }, + operation_result, + ) + .await + { error!("Failed to record fallback download usage: {}", e); } }); let stream = ReceiverStream::new(rx); - Ok(Response::new(Box::pin(stream) as Self::DownloadFileStreamStream)) + Ok(Response::new( + Box::pin(stream) as Self::DownloadFileStreamStream + )) } } } diff --git a/src/storage/file_storage.rs b/src/storage/file_storage.rs index 8ce118f..e4453da 100644 --- a/src/storage/file_storage.rs +++ b/src/storage/file_storage.rs @@ -536,15 +536,15 @@ impl FileStorage for S3FileStorage { // collect it or convert it via a channel let (tx, rx) = tokio::sync::mpsc::channel(8); let mut byte_stream = response.body; - + tokio::spawn(async move { use bytes::Buf; use tokio::io::AsyncReadExt; - + // Convert ByteStream to AsyncRead and read chunks let mut reader = byte_stream.into_async_read(); let mut buffer = vec![0u8; 1024 * 1024]; // 1MB buffer - + loop { match reader.read(&mut buffer).await { Ok(0) => break, // EOF @@ -555,17 +555,23 @@ impl FileStorage for S3FileStorage { } } Err(e) => { - let _ = tx.send(Err(crate::storage::StorageError::S3Error( - format!("Stream read error: {}", e) - ))).await; + let _ = tx + .send(Err(crate::storage::StorageError::S3Error(format!( + "Stream read error: {}", + e + )))) + .await; break; } } } }); - + let stream = tokio_stream::wrappers::ReceiverStream::new(rx); - Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + Ok(Some(Box::new(stream) + as Box< + dyn futures::Stream> + Send + Unpin, + >)) } Err(e) => { // Check if it's a 404 error diff --git a/src/storage/memory.rs b/src/storage/memory.rs index 3a3bcf9..d9f6ef1 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -1355,10 +1355,12 @@ impl Storage for MemoryStorage { match self.get_file_data(file_id).await? { Some(data) => { use futures::stream::StreamExt; - let stream = futures::stream::once(async move { - Ok(bytes::Bytes::from(data)) - }).boxed(); - Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + let stream = + futures::stream::once(async move { Ok(bytes::Bytes::from(data)) }).boxed(); + Ok(Some(Box::new(stream) + as Box< + dyn futures::Stream> + Send + Unpin, + >)) } None => Ok(None), } diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 11a673b..2c6bd27 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -2214,17 +2214,22 @@ impl Storage for MySqlStorage { ) -> Result> + Send + Unpin>>> { // For MySQL BLOB storage, we load the entire data and convert to stream // True streaming from MySQL BLOB is not practical with sqlx - debug!("MySQL storage: converting blob to stream for file_id={}", file_id); - + debug!( + "MySQL storage: converting blob to stream for file_id={}", + file_id + ); + // Disambiguate: use the MySqlFileExt trait method match MySqlFileExt::get_file_data(self, file_id).await? { Some(data) => { // Convert Vec to stream of Bytes use futures::stream::StreamExt; - let stream = futures::stream::once(async move { - Ok(bytes::Bytes::from(data)) - }).boxed(); - Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + let stream = + futures::stream::once(async move { Ok(bytes::Bytes::from(data)) }).boxed(); + Ok(Some(Box::new(stream) + as Box< + dyn futures::Stream> + Send + Unpin, + >)) } None => Ok(None), } @@ -2333,17 +2338,22 @@ impl MySqlStorage { ) -> Result> + Send + Unpin>>> { // For MySQL BLOB storage, we load the entire data and convert to stream // True streaming from MySQL BLOB is not practical with sqlx - debug!("MySQL storage: converting blob to stream for file_id={}", file_id); - + debug!( + "MySQL storage: converting blob to stream for file_id={}", + file_id + ); + // Disambiguate: use the MySqlFileExt trait method match MySqlFileExt::get_file_data(self, file_id).await? { Some(data) => { // Convert Vec to stream of Bytes use futures::stream::StreamExt; - let stream = futures::stream::once(async move { - Ok(bytes::Bytes::from(data)) - }).boxed(); - Ok(Some(Box::new(stream) as Box> + Send + Unpin>)) + let stream = + futures::stream::once(async move { Ok(bytes::Bytes::from(data)) }).boxed(); + Ok(Some(Box::new(stream) + as Box< + dyn futures::Stream> + Send + Unpin, + >)) } None => Ok(None), } diff --git a/src/storage/mysql_auth.rs b/src/storage/mysql_auth.rs index 5b9855d..244d64c 100644 --- a/src/storage/mysql_auth.rs +++ b/src/storage/mysql_auth.rs @@ -50,19 +50,28 @@ impl MySqlAuthExt for MySqlStorage { /// 인증 토큰 조회 async fn get_auth_token(&self, token: &str) -> Result> { debug!("데이터베이스에서 인증 토큰 조회: {}", token); - let token_data: Option<(String, String, String, Option, String, i64, i64)> = sqlx::query_as( - r#"SELECT + let token_data: Option<(String, String, String, Option, String, i64, i64)> = + sqlx::query_as( + r#"SELECT id, account_hash, access_token, refresh_token, token_type, expires_at, created_at FROM auth_tokens WHERE access_token = ?"#, - ) - .bind(token) - .fetch_optional(self.get_sqlx_pool()) - .await - .map_err(|e| StorageError::Database(format!("토큰 조회 쿼리 실패: {}", e)))?; + ) + .bind(token) + .fetch_optional(self.get_sqlx_pool()) + .await + .map_err(|e| StorageError::Database(format!("토큰 조회 쿼리 실패: {}", e)))?; match token_data { - Some((token_id, account_hash, access_token, refresh_token, token_type, expires_at, created_at)) => { + Some(( + token_id, + account_hash, + access_token, + refresh_token, + token_type, + expires_at, + created_at, + )) => { // 타임스탬프를 DateTime으로 변환 let expires_at = match Utc.timestamp_opt(expires_at, 0) { chrono::LocalResult::Single(dt) => dt, From f3f332b1d640bf63a9a08dbfe4bdaf1c3138be76 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 16:20:27 -0700 Subject: [PATCH 58/71] Update delete routine --- src/handlers/file/delete.rs | 40 ++++++++++++++++++++++---- src/handlers/file/rename.rs | 39 ++++++++++++++++++++++++-- src/storage/mysql.rs | 44 +++++++++++++++++++++++++++-- src/storage/mysql_file.rs | 56 ++++++++++++++++++++----------------- 4 files changed, 143 insertions(+), 36 deletions(-) diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 4525817..55424aa 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -101,11 +101,41 @@ pub async fn handle_delete_file( ))) } Err(e) => { - error!("File deletion failed: file_id={}, error={}", file_id, e); - Ok(Response::new(response::file_delete_error(format!( - "File deletion failed: {}", - e - )))) + // Determine error category for logging and client classification + let error_msg = format!("{}", e); + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") + || error_msg.contains("permission") + || error_msg.contains("access denied") + || error_msg.contains("type mismatch") + || error_msg.contains("mismatched types"); + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") + || error_msg.contains("File not found") + || error_msg.contains("already deleted") + || error_msg.contains("File data not found"); + + error!( + "Delete operation failed - file_id: {}, path: {}, error: {}, error_type: {:?}, is_permanent: {}", + file_id, + req.file_path, + error_msg, + if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, + is_permanent_error + ); + + // Format error message for client + let client_error_msg = if is_not_found { + format!("File not found: already deleted") + } else if is_permanent_error { + format!("DB schema error: column type mismatch - {}", error_msg) + } else { + format!("File deletion failed: {}", error_msg) + }; + + Ok(Response::new(response::file_delete_error(client_error_msg))) } } } diff --git a/src/handlers/file/rename.rs b/src/handlers/file/rename.rs index 160593d..03f9dc7 100644 --- a/src/handlers/file/rename.rs +++ b/src/handlers/file/rename.rs @@ -48,7 +48,7 @@ pub async fn handle_rename_file( Ok(None) => { return Ok(Response::new(RenameFileResponse { success: false, - return_message: "File not found".to_string(), + return_message: "File not found: already deleted".to_string(), new_revision: 0, conflict: Some(ConflictInfo { r#type: ConflictType::FileNotFound as i32, @@ -181,10 +181,43 @@ pub async fn handle_rename_file( })) } Err(e) => { - error!("Failed to rename file: {}", e); + // Determine error category for logging and client classification + let error_msg = format!("{}", e); + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") + || error_msg.contains("permission") + || error_msg.contains("access denied") + || error_msg.contains("type mismatch") + || error_msg.contains("mismatched types"); + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") + || error_msg.contains("File not found") + || error_msg.contains("already deleted"); + + error!( + "Rename operation failed - file_id: {}, old_path: {}, new_path: {}, error: {}, error_type: {:?}, is_permanent: {}", + req.file_id, + req.old_file_path, + req.new_file_path, + error_msg, + if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, + is_permanent_error + ); + + // Format error message for client + let client_error_msg = if is_not_found { + format!("File not found: already deleted") + } else if is_permanent_error { + format!("DB schema error: column type mismatch - {}", error_msg) + } else { + format!("Failed to rename file: {}", error_msg) + }; + Ok(Response::new(RenameFileResponse { success: false, - return_message: format!("Failed to rename file: {}", e), + return_message: client_error_msg, new_revision: file_info.revision, conflict: None, })) diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 02bc245..7036b80 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1577,15 +1577,53 @@ impl Storage for MySqlStorage { device_hash: &str, new_revision: i64, ) -> Result<()> { - let query = "UPDATE files SET file_path = ?, device_hash = ?, revision = ?, updated_at = NOW() WHERE id = ?"; + use sqlx::Row; + + // First, get file information to obtain account_hash, group_id, watcher_id for encryption + let row_opt = sqlx::query( + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"# + ) + .bind(file_id) + .fetch_optional(&self.sqlx_pool) + .await + .map_err(|e| StorageError::Database(format!("Failed to get file info for rename (schema/database error): {}", e)))?; + + let (account_hash, group_id, watcher_id) = match row_opt { + Some(row) => { + let account_hash: String = row.try_get("account_hash").unwrap_or_default(); + let group_id: i32 = row.try_get("group_id").unwrap_or(0); + let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); + (account_hash, group_id, watcher_id) + } + None => { + return Err(StorageError::NotFound(format!("File not found: {}", file_id))); + } + }; + + // Encrypt new_file_path for VARBINARY storage + let cfg = crate::server::app_state::AppState::get_config(); + let new_file_path_bytes = if let Some(kv) = cfg.server_encode_key.as_ref() { + if kv.len() == 32 { + let key: &[u8; 32] = kv.as_slice().try_into().expect("len checked"); + let aad = format!("{}:{}:{}", account_hash, group_id, watcher_id); + crate::utils::crypto::aead_encrypt(key, new_file_path.as_bytes(), aad.as_bytes()) + } else { + new_file_path.as_bytes().to_vec() + } + } else { + new_file_path.as_bytes().to_vec() + }; + + // Update file_path with encrypted bytes (VARBINARY) + let query = "UPDATE files SET file_path = ?, device_hash = ?, revision = ?, updated_time = NOW() WHERE file_id = ?"; sqlx::query(query) - .bind(new_file_path) + .bind(&new_file_path_bytes) .bind(device_hash) .bind(new_revision) .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| StorageError::General(format!("Failed to rename file: {}", e)))?; + .map_err(|e| StorageError::Database(format!("Failed to rename file (schema/VARBINARY type mismatch): {}", e)))?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index fa79dd7..93fdc74 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -737,11 +737,12 @@ impl MySqlFileExt for MySqlStorage { account_hash, file_id ); let mut tx = self.get_sqlx_pool().begin().await.map_err(|e| { - StorageError::Database(format!("Transaction start failed(sqlx): {}", e)) + StorageError::Database(format!("Transaction start failed (schema/database error): {}", e)) })?; // Check if file exists and belongs to the user - let file_exists: Option<(u64, i64, String, String, String, i32, i32)> = sqlx::query_as( + // Use Vec for VARBINARY columns (file_path, filename) + let row_opt = sqlx::query( r#"SELECT file_id, revision, file_path, filename, device_hash, group_id, watcher_id FROM files WHERE file_id = ? AND account_hash = ?"#, ) @@ -749,9 +750,9 @@ impl MySqlFileExt for MySqlStorage { .bind(account_hash) .fetch_optional(&mut *tx) .await - .map_err(|e| StorageError::Database(format!("File existence check failed(sqlx): {}", e)))?; + .map_err(|e| StorageError::Database(format!("File existence check failed (schema/database error): {}", e)))?; - if file_exists.is_none() { + if row_opt.is_none() { debug!( "File to delete does not exist or does not belong to the user: file_id={}, account_hash={}", file_id, account_hash @@ -762,20 +763,23 @@ impl MySqlFileExt for MySqlStorage { ))); } - let (_, current_revision, file_path, filename, device_hash, group_id, watcher_id) = - if let Some(rec) = file_exists { - rec - } else { - return Err(StorageError::NotFound(format!( - "Cannot find file: {}", - file_id - ))); - }; + use sqlx::Row; + let row = row_opt.unwrap(); + let current_revision: i64 = row.try_get("revision").unwrap_or(0); + let file_path_bytes: Vec = row.try_get("file_path").unwrap_or_default(); + let filename_bytes: Vec = row.try_get("filename").unwrap_or_default(); + let device_hash: String = row.try_get("device_hash").unwrap_or_default(); + let group_id: i32 = row.try_get("group_id").unwrap_or(0); + let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); + let new_revision = current_revision + 1; + // Decrypt for logging purposes only + let file_path_for_log = self.decrypt_text(account_hash, group_id, watcher_id, file_path_bytes.clone()); + let filename_for_log = self.decrypt_text(account_hash, group_id, watcher_id, filename_bytes.clone()); debug!( "Processing file deletion: file_id={}, file_path={}, filename={}, current_revision={}, new_revision={}", - file_id, file_path, filename, current_revision, new_revision + file_id, file_path_for_log, filename_for_log, current_revision, new_revision ); let now = Utc::now().timestamp(); @@ -787,25 +791,26 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { StorageError::Database(format!( - "Existing file deletion marking failed(sqlx): {}", + "Existing file deletion marking failed (schema/database error): {}", e )) })?; // 2. Update all previous revisions with the same file path and name to is_deleted=1 + // Use Vec for VARBINARY columns to avoid type mismatch sqlx::query( r#"UPDATE files SET is_deleted = 1 WHERE account_hash = ? AND file_path = ? AND filename = ? AND group_id = ?"#, ) .bind(account_hash) - .bind(&file_path) - .bind(&filename) + .bind(&file_path_bytes) + .bind(&filename_bytes) .bind(group_id) .execute(&mut *tx) .await .map_err(|e| { StorageError::Database(format!( - "Previous version file deletion marking failed(sqlx): {}", + "Previous version file deletion marking failed (schema/VARBINARY type mismatch): {}", e )) })?; @@ -813,13 +818,14 @@ impl MySqlFileExt for MySqlStorage { // 3. Adding deletion history debug!( "Adding deletion history: file_path={}, filename={}", - file_path, filename + file_path_for_log, filename_for_log ); // Generate new file_id (random value) let new_file_id = rand::random::(); // INSERT with explicitly specified file_id field + // Use Vec for VARBINARY columns to avoid type mismatch sqlx::query( r#"INSERT INTO files (file_id, account_hash, device_hash, file_path, filename, file_hash, size, unix_permissions) @@ -828,14 +834,14 @@ impl MySqlFileExt for MySqlStorage { .bind(new_file_id) .bind(account_hash) .bind(&device_hash) - .bind(&file_path) - .bind(&filename) - .bind(&file_path) + .bind(&file_path_bytes) + .bind(&filename_bytes) + .bind(&file_path_bytes) // file_hash uses file_path_bytes .bind(0i64) .bind(None::) .execute(&mut *tx) .await - .map_err(|e| StorageError::Database(format!("Deletion history addition failed (step 1, sqlx): {}", e)))?; + .map_err(|e| StorageError::Database(format!("Deletion history addition failed (step 1, schema/VARBINARY type mismatch): {}", e)))?; // Update remaining fields sqlx::query( @@ -858,14 +864,14 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { StorageError::Database(format!( - "Deletion history addition failed (step 2, sqlx): {}", + "Deletion history addition failed (step 2, schema/database error): {}", e )) })?; // Commit transaction tx.commit().await.map_err(|e| { - StorageError::Database(format!("Transaction commit failed(sqlx): {}", e)) + StorageError::Database(format!("Transaction commit failed (schema/database error): {}", e)) })?; info!( From a8d973d6c7325485438f075caeae11ab7013875b Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 16:21:50 -0700 Subject: [PATCH 59/71] Update delete routine --- src/handlers/file/delete.rs | 18 +++++++++--------- src/handlers/file/rename.rs | 18 +++++++++--------- src/storage/mysql.rs | 23 ++++++++++++++++++----- src/storage/mysql_file.rs | 23 ++++++++++++++++++----- 4 files changed, 54 insertions(+), 28 deletions(-) diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 55424aa..6e4e469 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -103,20 +103,20 @@ pub async fn handle_delete_file( Err(e) => { // Determine error category for logging and client classification let error_msg = format!("{}", e); - let is_permanent_error = error_msg.contains("VARBINARY") - || error_msg.contains("VARCHAR") - || error_msg.contains("schema") + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") || error_msg.contains("permission") || error_msg.contains("access denied") || error_msg.contains("type mismatch") || error_msg.contains("mismatched types"); - - let is_not_found = error_msg.contains("not found") - || error_msg.contains("No such file") + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") || error_msg.contains("File not found") || error_msg.contains("already deleted") || error_msg.contains("File data not found"); - + error!( "Delete operation failed - file_id: {}, path: {}, error: {}, error_type: {:?}, is_permanent: {}", file_id, @@ -125,7 +125,7 @@ pub async fn handle_delete_file( if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, is_permanent_error ); - + // Format error message for client let client_error_msg = if is_not_found { format!("File not found: already deleted") @@ -134,7 +134,7 @@ pub async fn handle_delete_file( } else { format!("File deletion failed: {}", error_msg) }; - + Ok(Response::new(response::file_delete_error(client_error_msg))) } } diff --git a/src/handlers/file/rename.rs b/src/handlers/file/rename.rs index 03f9dc7..9b0f7e9 100644 --- a/src/handlers/file/rename.rs +++ b/src/handlers/file/rename.rs @@ -183,19 +183,19 @@ pub async fn handle_rename_file( Err(e) => { // Determine error category for logging and client classification let error_msg = format!("{}", e); - let is_permanent_error = error_msg.contains("VARBINARY") - || error_msg.contains("VARCHAR") - || error_msg.contains("schema") + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") || error_msg.contains("permission") || error_msg.contains("access denied") || error_msg.contains("type mismatch") || error_msg.contains("mismatched types"); - - let is_not_found = error_msg.contains("not found") - || error_msg.contains("No such file") + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") || error_msg.contains("File not found") || error_msg.contains("already deleted"); - + error!( "Rename operation failed - file_id: {}, old_path: {}, new_path: {}, error: {}, error_type: {:?}, is_permanent: {}", req.file_id, @@ -205,7 +205,7 @@ pub async fn handle_rename_file( if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, is_permanent_error ); - + // Format error message for client let client_error_msg = if is_not_found { format!("File not found: already deleted") @@ -214,7 +214,7 @@ pub async fn handle_rename_file( } else { format!("Failed to rename file: {}", error_msg) }; - + Ok(Response::new(RenameFileResponse { success: false, return_message: client_error_msg, diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 7036b80..8724490 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1578,15 +1578,20 @@ impl Storage for MySqlStorage { new_revision: i64, ) -> Result<()> { use sqlx::Row; - + // First, get file information to obtain account_hash, group_id, watcher_id for encryption let row_opt = sqlx::query( - r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"# + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"#, ) .bind(file_id) .fetch_optional(&self.sqlx_pool) .await - .map_err(|e| StorageError::Database(format!("Failed to get file info for rename (schema/database error): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to get file info for rename (schema/database error): {}", + e + )) + })?; let (account_hash, group_id, watcher_id) = match row_opt { Some(row) => { @@ -1596,7 +1601,10 @@ impl Storage for MySqlStorage { (account_hash, group_id, watcher_id) } None => { - return Err(StorageError::NotFound(format!("File not found: {}", file_id))); + return Err(StorageError::NotFound(format!( + "File not found: {}", + file_id + ))); } }; @@ -1623,7 +1631,12 @@ impl Storage for MySqlStorage { .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| StorageError::Database(format!("Failed to rename file (schema/VARBINARY type mismatch): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to rename file (schema/VARBINARY type mismatch): {}", + e + )) + })?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 93fdc74..8fd7e22 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -737,7 +737,10 @@ impl MySqlFileExt for MySqlStorage { account_hash, file_id ); let mut tx = self.get_sqlx_pool().begin().await.map_err(|e| { - StorageError::Database(format!("Transaction start failed (schema/database error): {}", e)) + StorageError::Database(format!( + "Transaction start failed (schema/database error): {}", + e + )) })?; // Check if file exists and belongs to the user @@ -750,7 +753,12 @@ impl MySqlFileExt for MySqlStorage { .bind(account_hash) .fetch_optional(&mut *tx) .await - .map_err(|e| StorageError::Database(format!("File existence check failed (schema/database error): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "File existence check failed (schema/database error): {}", + e + )) + })?; if row_opt.is_none() { debug!( @@ -775,8 +783,10 @@ impl MySqlFileExt for MySqlStorage { let new_revision = current_revision + 1; // Decrypt for logging purposes only - let file_path_for_log = self.decrypt_text(account_hash, group_id, watcher_id, file_path_bytes.clone()); - let filename_for_log = self.decrypt_text(account_hash, group_id, watcher_id, filename_bytes.clone()); + let file_path_for_log = + self.decrypt_text(account_hash, group_id, watcher_id, file_path_bytes.clone()); + let filename_for_log = + self.decrypt_text(account_hash, group_id, watcher_id, filename_bytes.clone()); debug!( "Processing file deletion: file_id={}, file_path={}, filename={}, current_revision={}, new_revision={}", file_id, file_path_for_log, filename_for_log, current_revision, new_revision @@ -871,7 +881,10 @@ impl MySqlFileExt for MySqlStorage { // Commit transaction tx.commit().await.map_err(|e| { - StorageError::Database(format!("Transaction commit failed (schema/database error): {}", e)) + StorageError::Database(format!( + "Transaction commit failed (schema/database error): {}", + e + )) })?; info!( From 6c297a6e2ec07e06f4a4b8ff0c58ae25095655b9 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 16:46:23 -0700 Subject: [PATCH 60/71] Update delete routine --- src/storage/mysql.rs | 102 +++++++++++++++++++++++++++++++++++-------- 1 file changed, 84 insertions(+), 18 deletions(-) diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 8724490..0c7d6dc 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1047,6 +1047,85 @@ impl MySqlStorage { } } + // Check and create reset_monthly_quota procedure if it doesn't exist + let procedure_exists: bool = sqlx::query_scalar( + r#"SELECT COUNT(*) > 0 FROM information_schema.routines + WHERE routine_schema = DATABASE() + AND routine_name = 'reset_monthly_quota' + AND routine_type = 'PROCEDURE'"# + ) + .fetch_one(self.get_sqlx_pool()) + .await + .unwrap_or(false); + + if !procedure_exists { + info!("Creating reset_monthly_quota stored procedure"); + + // Create procedure without DELIMITER (sqlx doesn't support it) + // We need to execute the entire procedure definition as a single statement + let procedure_sql = r#" +CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) +BEGIN + DECLARE v_current_month VARCHAR(7); + DECLARE v_next_reset_date DATE; + + SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); + SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); + + UPDATE usage_storage + SET api_calls_count = 0, + quota_reset_date = v_next_reset_date, + updated_at = NOW() + WHERE account_hash = p_account_hash; + + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, upload_bytes, download_bytes, + upload_count, download_count + ) + SELECT + account_hash, + DATE_FORMAT(v_next_reset_date, '%Y-%m'), + 0, 0, 0, 0 + FROM usage_storage + WHERE account_hash = p_account_hash + ON DUPLICATE KEY UPDATE updated_at = NOW(); + + INSERT INTO quota_events ( + account_hash, event_type, current_value, limit_value, + severity, message + ) VALUES ( + p_account_hash, 'QUOTA_RESET', 0, 0, + 'INFO', 'Monthly quota has been reset' + ); +END"#; + + // Use raw query to execute procedure creation + // Note: sqlx may have issues with procedure creation, so we use query_drop + if let Err(e) = sqlx::query(procedure_sql) + .execute(self.get_sqlx_pool()) + .await + { + // If procedure creation fails, try with DROP first + let _ = sqlx::query("DROP PROCEDURE IF EXISTS reset_monthly_quota") + .execute(self.get_sqlx_pool()) + .await; + + if let Err(e2) = sqlx::query(procedure_sql) + .execute(self.get_sqlx_pool()) + .await + { + warn!("Failed to create reset_monthly_quota procedure: {}. Error: {}", e, e2); + // Don't fail the entire migration if procedure creation fails + } else { + info!("✅ reset_monthly_quota procedure created successfully"); + } + } else { + info!("✅ reset_monthly_quota procedure created successfully"); + } + } else { + debug!("reset_monthly_quota procedure already exists"); + } + info!("Database schema migration complete"); Ok(()) } @@ -1578,20 +1657,15 @@ impl Storage for MySqlStorage { new_revision: i64, ) -> Result<()> { use sqlx::Row; - + // First, get file information to obtain account_hash, group_id, watcher_id for encryption let row_opt = sqlx::query( - r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"#, + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"# ) .bind(file_id) .fetch_optional(&self.sqlx_pool) .await - .map_err(|e| { - StorageError::Database(format!( - "Failed to get file info for rename (schema/database error): {}", - e - )) - })?; + .map_err(|e| StorageError::Database(format!("Failed to get file info for rename (schema/database error): {}", e)))?; let (account_hash, group_id, watcher_id) = match row_opt { Some(row) => { @@ -1601,10 +1675,7 @@ impl Storage for MySqlStorage { (account_hash, group_id, watcher_id) } None => { - return Err(StorageError::NotFound(format!( - "File not found: {}", - file_id - ))); + return Err(StorageError::NotFound(format!("File not found: {}", file_id))); } }; @@ -1631,12 +1702,7 @@ impl Storage for MySqlStorage { .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| { - StorageError::Database(format!( - "Failed to rename file (schema/VARBINARY type mismatch): {}", - e - )) - })?; + .map_err(|e| StorageError::Database(format!("Failed to rename file (schema/VARBINARY type mismatch): {}", e)))?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", From a154cf60fe96887832e9c82dfe575595d7948a8c Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 16:47:00 -0700 Subject: [PATCH 61/71] Update delete routine --- src/storage/mysql.rs | 34 +++++++++++++++++++++++++--------- 1 file changed, 25 insertions(+), 9 deletions(-) diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 0c7d6dc..232332d 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1052,7 +1052,7 @@ impl MySqlStorage { r#"SELECT COUNT(*) > 0 FROM information_schema.routines WHERE routine_schema = DATABASE() AND routine_name = 'reset_monthly_quota' - AND routine_type = 'PROCEDURE'"# + AND routine_type = 'PROCEDURE'"#, ) .fetch_one(self.get_sqlx_pool()) .await @@ -1060,7 +1060,7 @@ impl MySqlStorage { if !procedure_exists { info!("Creating reset_monthly_quota stored procedure"); - + // Create procedure without DELIMITER (sqlx doesn't support it) // We need to execute the entire procedure definition as a single statement let procedure_sql = r#" @@ -1109,12 +1109,15 @@ END"#; let _ = sqlx::query("DROP PROCEDURE IF EXISTS reset_monthly_quota") .execute(self.get_sqlx_pool()) .await; - + if let Err(e2) = sqlx::query(procedure_sql) .execute(self.get_sqlx_pool()) .await { - warn!("Failed to create reset_monthly_quota procedure: {}. Error: {}", e, e2); + warn!( + "Failed to create reset_monthly_quota procedure: {}. Error: {}", + e, e2 + ); // Don't fail the entire migration if procedure creation fails } else { info!("✅ reset_monthly_quota procedure created successfully"); @@ -1657,15 +1660,20 @@ impl Storage for MySqlStorage { new_revision: i64, ) -> Result<()> { use sqlx::Row; - + // First, get file information to obtain account_hash, group_id, watcher_id for encryption let row_opt = sqlx::query( - r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"# + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"#, ) .bind(file_id) .fetch_optional(&self.sqlx_pool) .await - .map_err(|e| StorageError::Database(format!("Failed to get file info for rename (schema/database error): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to get file info for rename (schema/database error): {}", + e + )) + })?; let (account_hash, group_id, watcher_id) = match row_opt { Some(row) => { @@ -1675,7 +1683,10 @@ impl Storage for MySqlStorage { (account_hash, group_id, watcher_id) } None => { - return Err(StorageError::NotFound(format!("File not found: {}", file_id))); + return Err(StorageError::NotFound(format!( + "File not found: {}", + file_id + ))); } }; @@ -1702,7 +1713,12 @@ impl Storage for MySqlStorage { .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| StorageError::Database(format!("Failed to rename file (schema/VARBINARY type mismatch): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to rename file (schema/VARBINARY type mismatch): {}", + e + )) + })?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", From 89e0dd1a45eb29d59ef9a5ac458a10ec984350b4 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 17:48:03 -0700 Subject: [PATCH 62/71] Update delete routine --- src/handlers/file/find.rs | 6 ++++ src/storage/mysql_file.rs | 58 ++++++++++++++++++++++++++++++++++----- 2 files changed, 57 insertions(+), 7 deletions(-) diff --git a/src/handlers/file/find.rs b/src/handlers/file/find.rs index 8c54fc3..eb13123 100644 --- a/src/handlers/file/find.rs +++ b/src/handlers/file/find.rs @@ -22,6 +22,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })); } } @@ -41,6 +42,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })); } Err(e) => { @@ -51,6 +53,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })); } } @@ -101,6 +104,7 @@ pub async fn handle_find_file_by_criteria( file_id: file_info.file_id, revision: file_info.revision, file_info: Some(proto_file_info), + is_deleted: false, // Always false - only non-deleted files reach here })) } Ok(None) => Ok(Response::new(FindFileResponse { @@ -109,6 +113,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })), Err(e) => Ok(Response::new(FindFileResponse { success: false, @@ -116,6 +121,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })), } } diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 8fd7e22..95025ec 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -648,7 +648,8 @@ impl MySqlFileExt for MySqlStorage { file_id, account_hash, device_hash, file_path, filename, file_hash, UNIX_TIMESTAMP(created_time) as created_ts, UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted FROM files WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE AND revision = ? ORDER BY revision DESC LIMIT 1"#, @@ -667,7 +668,8 @@ impl MySqlFileExt for MySqlStorage { file_id, account_hash, device_hash, file_path, filename, file_hash, UNIX_TIMESTAMP(created_time) as created_ts, UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted FROM files WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE ORDER BY revision DESC LIMIT 1"#, @@ -682,7 +684,27 @@ impl MySqlFileExt for MySqlStorage { }; if let Some(row) = row_opt { + // Extract is_deleted first for validation + let is_deleted: bool = row.try_get("is_deleted").unwrap_or(false); let file_id: u64 = row.try_get("file_id").unwrap_or(0); + let revision: i64 = row.try_get("revision").unwrap_or(0); + + debug!("📊 find_file_by_path_and_name query returned: file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted); + + // Critical validation: Double-check is_deleted + if is_deleted { + error!("❌ CRITICAL BUG: find_file_by_path_and_name returned deleted file!"); + error!( + " file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted + ); + error!(" This should NEVER happen - database inconsistency detected"); + return Ok(None); + } + + debug!("✅ Verified file is not deleted: file_id={}", file_id); + let acc_hash: String = row.try_get("account_hash").unwrap_or_default(); let device_hash: String = row.try_get("device_hash").unwrap_or_default(); let file_path_b: Vec = row.try_get("file_path").unwrap_or_default(); @@ -691,7 +713,6 @@ impl MySqlFileExt for MySqlStorage { let updated_ts: Option = row.try_get("updated_ts").unwrap_or(None); let group_id: i32 = row.try_get("group_id").unwrap_or(0); let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); - let revision: i64 = row.try_get("revision").unwrap_or(0); let size: u64 = row.try_get("size").unwrap_or(0); let key_id_opt: Option = row.try_get("key_id").ok(); @@ -1210,7 +1231,8 @@ impl MySqlFileExt for MySqlStorage { file_id, account_hash, device_hash, file_path, filename, file_hash, UNIX_TIMESTAMP(created_time) as created_ts, UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted FROM files WHERE account_hash = ? AND server_group_id = ? AND server_watcher_id = ? AND is_deleted = FALSE AND ( @@ -1232,9 +1254,32 @@ impl MySqlFileExt for MySqlStorage { .map_err(|e| { error!("❌ File search query execution failed(sqlx): {}", e); StorageError::Database(format!("File search query execution failed: {}", e)) })?; if let Some(row) = row { - debug!("✅ File found!"); - // Extract required fields from Row object + // Extract is_deleted first for validation + let is_deleted: bool = row.try_get("is_deleted").unwrap_or(false); let file_id: u64 = row.try_get("file_id").unwrap_or(0); + let revision: i64 = row.try_get("revision").unwrap_or(0); + + debug!("✅ File found!"); + debug!( + "📊 Query returned: file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted + ); + + // Critical validation: Double-check is_deleted + if is_deleted { + error!("❌ CRITICAL BUG: Query returned deleted file!"); + error!( + " file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted + ); + error!(" This should NEVER happen - database inconsistency detected"); + error!(" Query had is_deleted = FALSE filter but returned deleted file"); + return Ok(None); + } + + debug!("✅ Verified file is not deleted: file_id={}", file_id); + + // Extract remaining fields from Row object let acc_hash: String = row.try_get("account_hash").unwrap_or_default(); let device_hash: String = row.try_get("device_hash").unwrap_or_default(); let file_path: String = row.try_get("file_path").unwrap_or_default(); @@ -1243,7 +1288,6 @@ impl MySqlFileExt for MySqlStorage { let updated_ts: Option = row.try_get("updated_ts").unwrap_or(None); let group_id: i32 = row.try_get("group_id").unwrap_or(0); let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); - let revision: i64 = row.try_get("revision").unwrap_or(0); let size: u64 = row.try_get("size").unwrap_or(0); let key_id_opt: Option = row.try_get("key_id").ok(); From 1aa06a35823f6d28ea06370c286d517486c0ff7b Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 17:54:11 -0700 Subject: [PATCH 63/71] Update delete routine --- .../create_reset_monthly_quota_procedure.sql | 52 +++++++++++++++++++ ...0118_add_reset_monthly_quota_procedure.sql | 50 ++++++++++++++++++ ...add_reset_monthly_quota_procedure_down.sql | 4 ++ proto/sync.proto | 1 + 4 files changed, 107 insertions(+) create mode 100644 migrations/create_reset_monthly_quota_procedure.sql create mode 100644 migrations/sql/20250118_add_reset_monthly_quota_procedure.sql create mode 100644 migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql diff --git a/migrations/create_reset_monthly_quota_procedure.sql b/migrations/create_reset_monthly_quota_procedure.sql new file mode 100644 index 0000000..8309d00 --- /dev/null +++ b/migrations/create_reset_monthly_quota_procedure.sql @@ -0,0 +1,52 @@ +-- Create reset_monthly_quota stored procedure +-- Execute this file manually using: mysql -h 127.0.0.1 -P 63306 -u genesis76 -p cosmic_sync < create_reset_monthly_quota_procedure.sql + +USE cosmic_sync; + +DROP PROCEDURE IF EXISTS reset_monthly_quota; + +DELIMITER $$ + +CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) +BEGIN + DECLARE v_current_month VARCHAR(7); + DECLARE v_next_reset_date DATE; + + SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); + SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); + + -- Reset API call counter + UPDATE usage_storage + SET api_calls_count = 0, + quota_reset_date = v_next_reset_date, + updated_at = NOW() + WHERE account_hash = p_account_hash; + + -- Initialize next month bandwidth record + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, upload_bytes, download_bytes, + upload_count, download_count + ) + SELECT + account_hash, + DATE_FORMAT(v_next_reset_date, '%Y-%m'), + 0, 0, 0, 0 + FROM usage_storage + WHERE account_hash = p_account_hash + ON DUPLICATE KEY UPDATE updated_at = NOW(); + + -- Log quota reset event + INSERT INTO quota_events ( + account_hash, event_type, current_value, limit_value, + severity, message + ) VALUES ( + p_account_hash, 'QUOTA_RESET', 0, 0, + 'INFO', 'Monthly quota has been reset' + ); +END$$ + +DELIMITER ; + +-- Verify procedure was created +SHOW PROCEDURE STATUS WHERE Db = 'cosmic_sync' AND Name = 'reset_monthly_quota'; + diff --git a/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql b/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql new file mode 100644 index 0000000..9bb50b8 --- /dev/null +++ b/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql @@ -0,0 +1,50 @@ +-- Add reset_monthly_quota stored procedure for monthly quota reset +-- This procedure is called by the quota maintenance background task + +DROP PROCEDURE IF EXISTS reset_monthly_quota; + +DELIMITER $$ + +CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) +BEGIN + DECLARE v_current_month VARCHAR(7); + DECLARE v_next_reset_date DATE; + + SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); + SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); + + -- Archive current month data (already in usage_bandwidth_monthly) + -- Just need to reset for next month + + -- Reset API call counter + UPDATE usage_storage + SET api_calls_count = 0, + quota_reset_date = v_next_reset_date, + updated_at = NOW() + WHERE account_hash = p_account_hash; + + -- Initialize next month bandwidth record + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, upload_bytes, download_bytes, + upload_count, download_count + ) + SELECT + account_hash, + DATE_FORMAT(v_next_reset_date, '%Y-%m'), + 0, 0, 0, 0 + FROM usage_storage + WHERE account_hash = p_account_hash + ON DUPLICATE KEY UPDATE updated_at = NOW(); + + -- Log quota reset event + INSERT INTO quota_events ( + account_hash, event_type, current_value, limit_value, + severity, message + ) VALUES ( + p_account_hash, 'QUOTA_RESET', 0, 0, + 'INFO', 'Monthly quota has been reset' + ); +END$$ + +DELIMITER ; + diff --git a/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql b/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql new file mode 100644 index 0000000..0796505 --- /dev/null +++ b/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql @@ -0,0 +1,4 @@ +-- Rollback: Remove reset_monthly_quota stored procedure + +DROP PROCEDURE IF EXISTS reset_monthly_quota; + diff --git a/proto/sync.proto b/proto/sync.proto index 1ed0c46..ce12e0d 100755 --- a/proto/sync.proto +++ b/proto/sync.proto @@ -712,6 +712,7 @@ message FindFileResponse { uint64 file_id = 3; int64 revision = 4; FileInfo file_info = 5; // Includes full file information + bool is_deleted = 6; // File deletion status for client-side validation } // Check if file exists (including deleted files) From c8bb833fcb6658da9ab6e539942efe7ec158a6ce Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 18:19:09 -0700 Subject: [PATCH 64/71] Update delete routine --- ...0118_add_reset_monthly_quota_procedure.sql | 50 ------------------- ...add_reset_monthly_quota_procedure_down.sql | 4 -- 2 files changed, 54 deletions(-) delete mode 100644 migrations/sql/20250118_add_reset_monthly_quota_procedure.sql delete mode 100644 migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql diff --git a/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql b/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql deleted file mode 100644 index 9bb50b8..0000000 --- a/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql +++ /dev/null @@ -1,50 +0,0 @@ --- Add reset_monthly_quota stored procedure for monthly quota reset --- This procedure is called by the quota maintenance background task - -DROP PROCEDURE IF EXISTS reset_monthly_quota; - -DELIMITER $$ - -CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) -BEGIN - DECLARE v_current_month VARCHAR(7); - DECLARE v_next_reset_date DATE; - - SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); - SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); - - -- Archive current month data (already in usage_bandwidth_monthly) - -- Just need to reset for next month - - -- Reset API call counter - UPDATE usage_storage - SET api_calls_count = 0, - quota_reset_date = v_next_reset_date, - updated_at = NOW() - WHERE account_hash = p_account_hash; - - -- Initialize next month bandwidth record - INSERT INTO usage_bandwidth_monthly ( - account_hash, usage_month, upload_bytes, download_bytes, - upload_count, download_count - ) - SELECT - account_hash, - DATE_FORMAT(v_next_reset_date, '%Y-%m'), - 0, 0, 0, 0 - FROM usage_storage - WHERE account_hash = p_account_hash - ON DUPLICATE KEY UPDATE updated_at = NOW(); - - -- Log quota reset event - INSERT INTO quota_events ( - account_hash, event_type, current_value, limit_value, - severity, message - ) VALUES ( - p_account_hash, 'QUOTA_RESET', 0, 0, - 'INFO', 'Monthly quota has been reset' - ); -END$$ - -DELIMITER ; - diff --git a/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql b/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql deleted file mode 100644 index 0796505..0000000 --- a/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql +++ /dev/null @@ -1,4 +0,0 @@ --- Rollback: Remove reset_monthly_quota stored procedure - -DROP PROCEDURE IF EXISTS reset_monthly_quota; - From 65aa5a18742f97664c90d145d18aebdbda09182c Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 18:54:31 -0700 Subject: [PATCH 65/71] Update delete routine --- src/services/file_service.rs | 75 ++++++++++++++++++++++++++++++++++-- 1 file changed, 72 insertions(+), 3 deletions(-) diff --git a/src/services/file_service.rs b/src/services/file_service.rs index 3b4b98f..b60851e 100644 --- a/src/services/file_service.rs +++ b/src/services/file_service.rs @@ -599,9 +599,78 @@ impl FileService { ); if is_deleted { - info!("File is already deleted: file_id={}", file_id); - // Treat as success if already deleted - return Ok(()); + warn!( + "⚠️ Client requested deletion of already-deleted file_id={}, checking for actual active file", + file_id + ); + + // Try to find actual active file by path/name + // This handles the case where client has wrong file_id (from previous deleted file) + // revision=0 means get the latest active file + match self + .storage + .find_file_by_path_and_name( + &file_info.account_hash, + &file_info.file_path, + &file_info.filename, + 0, // revision=0: get latest active file (highest revision, not deleted) + ) + .await + { + Ok(Some(active_file)) => { + warn!( + "🔄 Found actual active file: old_file_id={}, new_file_id={}, path={}", + file_id, active_file.file_id, file_info.file_path + ); + // Delete the actual active file instead + match self + .storage + .delete_file(&active_file.account_hash, active_file.file_id) + .await + { + Ok(_) => { + self.files.lock().await.remove(&active_file.file_id); + info!( + "✅ Successfully deleted actual active file: file_id={}, path={}", + active_file.file_id, file_info.file_path + ); + + // Send notification for the actual deleted file + if let Some(nm) = &self.notification_manager { + let notification = FileUpdateNotification { + account_hash: active_file.account_hash.clone(), + device_hash: active_file.device_hash.clone(), + file_info: Some(active_file.to_sync_file()), + update_type: crate::sync::file_update_notification::UpdateType::Deleted as i32, + timestamp: chrono::Utc::now().timestamp(), + rename_info: None, + }; + let _ = nm.broadcast_file_update(notification).await; + } + + return Ok(()); + } + Err(e) => { + error!( + "Failed to delete actual active file: file_id={}, error={}", + active_file.file_id, e + ); + return Err(e); + } + } + } + Ok(None) => { + info!("No active file found with same path, file is truly deleted: file_id={}", file_id); + return Ok(()); + } + Err(e) => { + warn!( + "Error searching for active file: {}, treating as already deleted", + e + ); + return Ok(()); + } + } } // Handle file deletion From d37553f45cf7b2b82eac89d20766c95f2c5702d7 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 19:19:05 -0700 Subject: [PATCH 66/71] Update delete routine --- src/storage/mysql_file.rs | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 95025ec..8c2ed42 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -767,7 +767,7 @@ impl MySqlFileExt for MySqlStorage { // Check if file exists and belongs to the user // Use Vec for VARBINARY columns (file_path, filename) let row_opt = sqlx::query( - r#"SELECT file_id, revision, file_path, filename, device_hash, group_id, watcher_id + r#"SELECT file_id, revision, file_path, filename, device_hash, group_id, watcher_id, server_group_id, server_watcher_id FROM files WHERE file_id = ? AND account_hash = ?"#, ) .bind(file_id) @@ -800,6 +800,8 @@ impl MySqlFileExt for MySqlStorage { let device_hash: String = row.try_get("device_hash").unwrap_or_default(); let group_id: i32 = row.try_get("group_id").unwrap_or(0); let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); + let server_group_id: i32 = row.try_get("server_group_id").unwrap_or(0); + let server_watcher_id: i32 = row.try_get("server_watcher_id").unwrap_or(0); let new_revision = current_revision + 1; @@ -859,8 +861,8 @@ impl MySqlFileExt for MySqlStorage { // Use Vec for VARBINARY columns to avoid type mismatch sqlx::query( r#"INSERT INTO files - (file_id, account_hash, device_hash, file_path, filename, file_hash, size, unix_permissions) - VALUES (?, ?, ?, ?, ?, ?, ?, ?)"#, + (file_id, account_hash, device_hash, file_path, filename, file_hash, size, unix_permissions, server_group_id, server_watcher_id) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"#, ) .bind(new_file_id) .bind(account_hash) @@ -870,6 +872,8 @@ impl MySqlFileExt for MySqlStorage { .bind(&file_path_bytes) // file_hash uses file_path_bytes .bind(0i64) .bind(None::) + .bind(server_group_id) + .bind(server_watcher_id) .execute(&mut *tx) .await .map_err(|e| StorageError::Database(format!("Deletion history addition failed (step 1, schema/VARBINARY type mismatch): {}", e)))?; From 0970d7da0f8d05c1e6179958b00dc1147944daf1 Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 20:24:21 -0700 Subject: [PATCH 67/71] Update delete routine --- src/handlers/file_handler.rs | 7 +- src/services/file_service.rs | 12 ++-- src/storage/memory.rs | 28 ++++---- src/storage/mod.rs | 1 - src/storage/mysql.rs | 4 +- src/storage/mysql_file.rs | 121 +++++++++++++++-------------------- 6 files changed, 75 insertions(+), 98 deletions(-) diff --git a/src/handlers/file_handler.rs b/src/handlers/file_handler.rs index afb592d..504f31d 100644 --- a/src/handlers/file_handler.rs +++ b/src/handlers/file_handler.rs @@ -307,12 +307,7 @@ impl FileHandler { match self .app_state .file - .find_file_by_local_path( - &req.account_hash, - &normalized_file_path, - &req.filename, - req.revision, - ) + .find_file_by_local_path(&req.account_hash, &normalized_file_path, &req.filename) .await { Ok(Some(info)) => { diff --git a/src/services/file_service.rs b/src/services/file_service.rs index b60851e..87cb4d9 100644 --- a/src/services/file_service.rs +++ b/src/services/file_service.rs @@ -606,14 +606,13 @@ impl FileService { // Try to find actual active file by path/name // This handles the case where client has wrong file_id (from previous deleted file) - // revision=0 means get the latest active file + // Now searches by updated_time (latest active file) match self .storage .find_file_by_path_and_name( &file_info.account_hash, &file_info.file_path, &file_info.filename, - 0, // revision=0: get latest active file (highest revision, not deleted) ) .await { @@ -808,11 +807,10 @@ impl FileService { account_hash: &str, file_path: &str, filename: &str, - revision: i64, ) -> Result, StorageError> { debug!( - "Finding file by local path: account={}, path={}, filename={}, revision={}", - account_hash, file_path, filename, revision + "Finding file by local path (by updated_time): account={}, path={}, filename={}", + account_hash, file_path, filename ); // Normalize file path to preserve tilde (~) prefix for home directory @@ -822,9 +820,9 @@ impl FileService { file_path, normalized_file_path ); - // Query file information from storage + // Query file information from storage (searches by updated_time for latest active file) self.storage - .find_file_by_path_and_name(account_hash, &normalized_file_path, filename, revision) + .find_file_by_path_and_name(account_hash, &normalized_file_path, filename) .await } diff --git a/src/storage/memory.rs b/src/storage/memory.rs index a6133e9..cd26478 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -656,22 +656,28 @@ impl Storage for MemoryStorage { account_hash: &str, file_path: &str, filename: &str, - revision: i64, ) -> crate::storage::Result> { let data = self.data.lock().await; - for file in data.files.values() { - // Check if account, path, filename, and revision match - if file.account_hash == account_hash - && file.file_path == file_path - && file.filename == filename - && (revision == 0 || file.revision == revision) - { - return Ok(Some(file.clone())); - } + // Find all matching files and return the most recently updated one + let mut matching_files: Vec<&FileInfo> = data + .files + .values() + .filter(|file| { + file.account_hash == account_hash + && file.file_path == file_path + && file.filename == filename + }) + .collect(); + + if matching_files.is_empty() { + return Ok(None); } - Ok(None) + // Sort by updated_time descending (most recent first) + matching_files.sort_by(|a, b| b.updated_time.seconds.cmp(&a.updated_time.seconds)); + + Ok(matching_files.first().map(|&f| f.clone())) } /// Find file by path, filename, and group ID diff --git a/src/storage/mod.rs b/src/storage/mod.rs index fe2f642..be0a14d 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -316,7 +316,6 @@ pub trait Storage: Sync + Send { account_hash: &str, file_path: &str, filename: &str, - revision: i64, ) -> Result>; async fn find_file_by_criteria( &self, diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 232332d..10a8368 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1422,10 +1422,8 @@ impl Storage for MySqlStorage { account_hash: &str, file_path: &str, filename: &str, - revision: i64, ) -> Result> { - MySqlFileExt::find_file_by_path_and_name(self, account_hash, file_path, filename, revision) - .await + MySqlFileExt::find_file_by_path_and_name(self, account_hash, file_path, filename).await } /// Find file by criteria diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 8c2ed42..fd9896d 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -41,7 +41,6 @@ pub trait MySqlFileExt { account_hash: &str, file_path: &str, filename: &str, - revision: i64, ) -> Result>; /// Search file by path, filename and group ID @@ -161,10 +160,10 @@ impl MySqlFileExt for MySqlStorage { return Ok(file_info.file_id); } - debug!("🔍 Querying maximum revision..."); - // Query maximum revision of all files (including deleted files) with the same path and filename - let max_revision: Option = sqlx::query_scalar( - r#"SELECT COALESCE(MAX(revision), 0) FROM files WHERE account_hash = ? AND file_path = ? AND filename = ? AND server_group_id = ?"# + debug!("🔍 Checking active files..."); + // Check if there is a non-deleted file with the same file path and name + let existing_active_file: Option<(u64, i64)> = sqlx::query_as( + r#"SELECT file_id, revision FROM files WHERE account_hash = ? AND file_path = ? AND filename = ? AND server_group_id = ? AND is_deleted = FALSE ORDER BY updated_time DESC, revision DESC LIMIT 1"# ) .bind(&file_info.account_hash) .bind(&file_info.file_path) @@ -172,9 +171,21 @@ impl MySqlFileExt for MySqlStorage { .bind(file_info.group_id) .fetch_optional(&mut *tx) .await - .map_err(|e| { error!("❌ Maximum revision query failed(sqlx): {}", e); StorageError::Database(format!("Maximum revision query failed: {}", e)) })?; + .map_err(|e| { error!("❌ Active file existence check failed(sqlx): {}", e); StorageError::Database(format!("Active file existence check failed: {}", e)) })?; + + let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file { + // Active file exists → this is an update, increment revision + debug!( + "📝 Active file found (file_id={}, revision={}), incrementing revision", + existing_file_id, existing_revision + ); + existing_revision + 1 + } else { + // No active file → this is a new file (or re-upload after deletion), reset to revision 1 + debug!("✨ No active file found, starting with revision 1"); + 1 + }; - let new_revision = max_revision.unwrap_or(0) + 1; debug!( "📄 Preparing to store new file information: file_id={}, revision={} (key_id: {:?})", file_info.file_id, new_revision, file_info.key_id @@ -182,20 +193,7 @@ impl MySqlFileExt for MySqlStorage { // Path encryption and index computation omitted as it's handled elsewhere - debug!("🔍 Checking active files..."); - // Check if there is a non-deleted file with the same file path and name - let existing_active_file: Option = sqlx::query_scalar( - r#"SELECT file_id FROM files WHERE account_hash = ? AND file_path = ? AND filename = ? AND server_group_id = ? AND is_deleted = FALSE LIMIT 1"# - ) - .bind(&file_info.account_hash) - .bind(&file_info.file_path) - .bind(&file_info.filename) - .bind(file_info.group_id) - .fetch_optional(&mut *tx) - .await - .map_err(|e| { error!("❌ Active file existence check failed(sqlx): {}", e); StorageError::Database(format!("Active file existence check failed: {}", e)) })?; - - if let Some(existing_file_id) = existing_active_file { + if let Some((existing_file_id, _)) = existing_active_file { // If there is an existing active file, mark it as deleted debug!( "🗑️ Marking existing active file as deleted: existing_file_id={}", @@ -621,12 +619,11 @@ impl MySqlFileExt for MySqlStorage { account_hash: &str, file_path: &str, filename: &str, - revision: i64, ) -> Result> { use sqlx::Row; debug!( - "Searching file by path and filename: account_hash={}, file_path={}, filename={}, revision={}", - account_hash, file_path, filename, revision + "🔍 Searching file by path and filename (by updated_time): account_hash={}, file_path={}, filename={}", + account_hash, file_path, filename ); // equality by eq_index @@ -642,46 +639,26 @@ impl MySqlFileExt for MySqlStorage { } else { crate::utils::crypto::make_eq_index(account_hash.as_bytes(), file_path) }; - let row_opt = if revision > 0 { - sqlx::query( - r#"SELECT - file_id, account_hash, device_hash, file_path, filename, file_hash, - UNIX_TIMESTAMP(created_time) as created_ts, - UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions, - is_deleted - FROM files - WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE AND revision = ? - ORDER BY revision DESC LIMIT 1"#, - ) - .bind(account_hash) - .bind(&eq_index) - .bind(revision) - .fetch_optional(self.get_sqlx_pool()) - .await - .map_err(|e| { - StorageError::Database(format!("File search failed(exact search, sqlx): {}", e)) - })? - } else { - sqlx::query( - r#"SELECT - file_id, account_hash, device_hash, file_path, filename, file_hash, - UNIX_TIMESTAMP(created_time) as created_ts, - UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions, - is_deleted - FROM files - WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE - ORDER BY revision DESC LIMIT 1"#, - ) - .bind(account_hash) - .bind(&eq_index) - .fetch_optional(self.get_sqlx_pool()) - .await - .map_err(|e| { - StorageError::Database(format!("File search failed(exact search, sqlx): {}", e)) - })? - }; + + let row_opt = sqlx::query( + r#"SELECT + file_id, account_hash, device_hash, file_path, filename, file_hash, + UNIX_TIMESTAMP(created_time) as created_ts, + UNIX_TIMESTAMP(updated_time) as updated_ts, + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted + FROM files + WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE + ORDER BY updated_time DESC, revision DESC, id DESC + LIMIT 1"#, + ) + .bind(account_hash) + .bind(&eq_index) + .fetch_optional(self.get_sqlx_pool()) + .await + .map_err(|e| { + StorageError::Database(format!("File search failed(by updated_time, sqlx): {}", e)) + })?; if let Some(row) = row_opt { // Extract is_deleted first for validation @@ -689,8 +666,11 @@ impl MySqlFileExt for MySqlStorage { let file_id: u64 = row.try_get("file_id").unwrap_or(0); let revision: i64 = row.try_get("revision").unwrap_or(0); - debug!("📊 find_file_by_path_and_name query returned: file_id={}, revision={}, is_deleted={}", - file_id, revision, is_deleted); + let updated_ts_value: Option = row.try_get("updated_ts").ok().flatten(); + debug!( + "📊 find_file_by_path_and_name query returned: file_id={}, revision={}, updated_time={:?}, is_deleted={}", + file_id, revision, updated_ts_value, is_deleted + ); // Critical validation: Double-check is_deleted if is_deleted { @@ -1225,7 +1205,7 @@ impl MySqlFileExt for MySqlStorage { }; debug!( - "🔍 Final search criteria: path='{}', filename='{}'", + "🔍 Final search criteria (by updated_time): path='{}', filename='{}'", search_path, search_filename ); @@ -1243,7 +1223,7 @@ impl MySqlFileExt for MySqlStorage { (file_path = ? AND filename = ?) OR (file_path = ? AND filename = ?) ) - ORDER BY revision DESC + ORDER BY updated_time DESC, revision DESC, id DESC LIMIT 1"# ) .bind(account_hash) @@ -1263,10 +1243,11 @@ impl MySqlFileExt for MySqlStorage { let file_id: u64 = row.try_get("file_id").unwrap_or(0); let revision: i64 = row.try_get("revision").unwrap_or(0); + let updated_ts_val: Option = row.try_get("updated_ts").ok().flatten(); debug!("✅ File found!"); debug!( - "📊 Query returned: file_id={}, revision={}, is_deleted={}", - file_id, revision, is_deleted + "📊 Query returned: file_id={}, revision={}, updated_time={:?}, is_deleted={}", + file_id, revision, updated_ts_val, is_deleted ); // Critical validation: Double-check is_deleted From 5f13cf2a26ba96a47dec2ee799e144e0e83fc12a Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 20:26:06 -0700 Subject: [PATCH 68/71] Update delete routine --- src/storage/mysql_file.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index fd9896d..20251d9 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -173,7 +173,8 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { error!("❌ Active file existence check failed(sqlx): {}", e); StorageError::Database(format!("Active file existence check failed: {}", e)) })?; - let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file { + let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file + { // Active file exists → this is an update, increment revision debug!( "📝 Active file found (file_id={}, revision={}), incrementing revision", @@ -639,7 +640,7 @@ impl MySqlFileExt for MySqlStorage { } else { crate::utils::crypto::make_eq_index(account_hash.as_bytes(), file_path) }; - + let row_opt = sqlx::query( r#"SELECT file_id, account_hash, device_hash, file_path, filename, file_hash, From 94213d393cddafd6cfb2fa4acbaa7f42cb32489f Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 20:35:05 -0700 Subject: [PATCH 69/71] Update delete routine --- src/handlers/file/delete.rs | 18 +++++++++--------- src/handlers/file/find.rs | 2 +- src/handlers/file/rename.rs | 18 +++++++++--------- src/handlers/file_handler.rs | 6 +++++- src/services/file_service.rs | 36 +++++++++++------------------------ src/storage/memory.rs | 4 +++- src/storage/mysql.rs | 37 +++++++++++------------------------- src/storage/mysql_file.rs | 5 ++--- 8 files changed, 51 insertions(+), 75 deletions(-) diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 6e4e469..55424aa 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -103,20 +103,20 @@ pub async fn handle_delete_file( Err(e) => { // Determine error category for logging and client classification let error_msg = format!("{}", e); - let is_permanent_error = error_msg.contains("VARBINARY") - || error_msg.contains("VARCHAR") - || error_msg.contains("schema") + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") || error_msg.contains("permission") || error_msg.contains("access denied") || error_msg.contains("type mismatch") || error_msg.contains("mismatched types"); - - let is_not_found = error_msg.contains("not found") - || error_msg.contains("No such file") + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") || error_msg.contains("File not found") || error_msg.contains("already deleted") || error_msg.contains("File data not found"); - + error!( "Delete operation failed - file_id: {}, path: {}, error: {}, error_type: {:?}, is_permanent: {}", file_id, @@ -125,7 +125,7 @@ pub async fn handle_delete_file( if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, is_permanent_error ); - + // Format error message for client let client_error_msg = if is_not_found { format!("File not found: already deleted") @@ -134,7 +134,7 @@ pub async fn handle_delete_file( } else { format!("File deletion failed: {}", error_msg) }; - + Ok(Response::new(response::file_delete_error(client_error_msg))) } } diff --git a/src/handlers/file/find.rs b/src/handlers/file/find.rs index eb13123..af43eca 100644 --- a/src/handlers/file/find.rs +++ b/src/handlers/file/find.rs @@ -104,7 +104,7 @@ pub async fn handle_find_file_by_criteria( file_id: file_info.file_id, revision: file_info.revision, file_info: Some(proto_file_info), - is_deleted: false, // Always false - only non-deleted files reach here + is_deleted: false, // Always false - only non-deleted files reach here })) } Ok(None) => Ok(Response::new(FindFileResponse { diff --git a/src/handlers/file/rename.rs b/src/handlers/file/rename.rs index 9b0f7e9..03f9dc7 100644 --- a/src/handlers/file/rename.rs +++ b/src/handlers/file/rename.rs @@ -183,19 +183,19 @@ pub async fn handle_rename_file( Err(e) => { // Determine error category for logging and client classification let error_msg = format!("{}", e); - let is_permanent_error = error_msg.contains("VARBINARY") - || error_msg.contains("VARCHAR") - || error_msg.contains("schema") + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") || error_msg.contains("permission") || error_msg.contains("access denied") || error_msg.contains("type mismatch") || error_msg.contains("mismatched types"); - - let is_not_found = error_msg.contains("not found") - || error_msg.contains("No such file") + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") || error_msg.contains("File not found") || error_msg.contains("already deleted"); - + error!( "Rename operation failed - file_id: {}, old_path: {}, new_path: {}, error: {}, error_type: {:?}, is_permanent: {}", req.file_id, @@ -205,7 +205,7 @@ pub async fn handle_rename_file( if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, is_permanent_error ); - + // Format error message for client let client_error_msg = if is_not_found { format!("File not found: already deleted") @@ -214,7 +214,7 @@ pub async fn handle_rename_file( } else { format!("Failed to rename file: {}", error_msg) }; - + Ok(Response::new(RenameFileResponse { success: false, return_message: client_error_msg, diff --git a/src/handlers/file_handler.rs b/src/handlers/file_handler.rs index 504f31d..6b6b6ff 100644 --- a/src/handlers/file_handler.rs +++ b/src/handlers/file_handler.rs @@ -307,7 +307,11 @@ impl FileHandler { match self .app_state .file - .find_file_by_local_path(&req.account_hash, &normalized_file_path, &req.filename) + .find_file_by_local_path( + &req.account_hash, + &normalized_file_path, + &req.filename, + ) .await { Ok(Some(info)) => { diff --git a/src/services/file_service.rs b/src/services/file_service.rs index 87cb4d9..d72ef39 100644 --- a/src/services/file_service.rs +++ b/src/services/file_service.rs @@ -603,37 +603,29 @@ impl FileService { "⚠️ Client requested deletion of already-deleted file_id={}, checking for actual active file", file_id ); - + // Try to find actual active file by path/name // This handles the case where client has wrong file_id (from previous deleted file) // Now searches by updated_time (latest active file) - match self - .storage - .find_file_by_path_and_name( - &file_info.account_hash, - &file_info.file_path, - &file_info.filename, - ) - .await - { + match self.storage.find_file_by_path_and_name( + &file_info.account_hash, + &file_info.file_path, + &file_info.filename, + ).await { Ok(Some(active_file)) => { warn!( "🔄 Found actual active file: old_file_id={}, new_file_id={}, path={}", file_id, active_file.file_id, file_info.file_path ); // Delete the actual active file instead - match self - .storage - .delete_file(&active_file.account_hash, active_file.file_id) - .await - { + match self.storage.delete_file(&active_file.account_hash, active_file.file_id).await { Ok(_) => { self.files.lock().await.remove(&active_file.file_id); info!( "✅ Successfully deleted actual active file: file_id={}, path={}", active_file.file_id, file_info.file_path ); - + // Send notification for the actual deleted file if let Some(nm) = &self.notification_manager { let notification = FileUpdateNotification { @@ -646,14 +638,11 @@ impl FileService { }; let _ = nm.broadcast_file_update(notification).await; } - + return Ok(()); } Err(e) => { - error!( - "Failed to delete actual active file: file_id={}, error={}", - active_file.file_id, e - ); + error!("Failed to delete actual active file: file_id={}, error={}", active_file.file_id, e); return Err(e); } } @@ -663,10 +652,7 @@ impl FileService { return Ok(()); } Err(e) => { - warn!( - "Error searching for active file: {}, treating as already deleted", - e - ); + warn!("Error searching for active file: {}, treating as already deleted", e); return Ok(()); } } diff --git a/src/storage/memory.rs b/src/storage/memory.rs index cd26478..1b21678 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -675,7 +675,9 @@ impl Storage for MemoryStorage { } // Sort by updated_time descending (most recent first) - matching_files.sort_by(|a, b| b.updated_time.seconds.cmp(&a.updated_time.seconds)); + matching_files.sort_by(|a, b| { + b.updated_time.seconds.cmp(&a.updated_time.seconds) + }); Ok(matching_files.first().map(|&f| f.clone())) } diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 10a8368..684f02b 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1052,7 +1052,7 @@ impl MySqlStorage { r#"SELECT COUNT(*) > 0 FROM information_schema.routines WHERE routine_schema = DATABASE() AND routine_name = 'reset_monthly_quota' - AND routine_type = 'PROCEDURE'"#, + AND routine_type = 'PROCEDURE'"# ) .fetch_one(self.get_sqlx_pool()) .await @@ -1060,7 +1060,7 @@ impl MySqlStorage { if !procedure_exists { info!("Creating reset_monthly_quota stored procedure"); - + // Create procedure without DELIMITER (sqlx doesn't support it) // We need to execute the entire procedure definition as a single statement let procedure_sql = r#" @@ -1109,15 +1109,12 @@ END"#; let _ = sqlx::query("DROP PROCEDURE IF EXISTS reset_monthly_quota") .execute(self.get_sqlx_pool()) .await; - + if let Err(e2) = sqlx::query(procedure_sql) .execute(self.get_sqlx_pool()) .await { - warn!( - "Failed to create reset_monthly_quota procedure: {}. Error: {}", - e, e2 - ); + warn!("Failed to create reset_monthly_quota procedure: {}. Error: {}", e, e2); // Don't fail the entire migration if procedure creation fails } else { info!("✅ reset_monthly_quota procedure created successfully"); @@ -1423,7 +1420,8 @@ impl Storage for MySqlStorage { file_path: &str, filename: &str, ) -> Result> { - MySqlFileExt::find_file_by_path_and_name(self, account_hash, file_path, filename).await + MySqlFileExt::find_file_by_path_and_name(self, account_hash, file_path, filename) + .await } /// Find file by criteria @@ -1658,20 +1656,15 @@ impl Storage for MySqlStorage { new_revision: i64, ) -> Result<()> { use sqlx::Row; - + // First, get file information to obtain account_hash, group_id, watcher_id for encryption let row_opt = sqlx::query( - r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"#, + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"# ) .bind(file_id) .fetch_optional(&self.sqlx_pool) .await - .map_err(|e| { - StorageError::Database(format!( - "Failed to get file info for rename (schema/database error): {}", - e - )) - })?; + .map_err(|e| StorageError::Database(format!("Failed to get file info for rename (schema/database error): {}", e)))?; let (account_hash, group_id, watcher_id) = match row_opt { Some(row) => { @@ -1681,10 +1674,7 @@ impl Storage for MySqlStorage { (account_hash, group_id, watcher_id) } None => { - return Err(StorageError::NotFound(format!( - "File not found: {}", - file_id - ))); + return Err(StorageError::NotFound(format!("File not found: {}", file_id))); } }; @@ -1711,12 +1701,7 @@ impl Storage for MySqlStorage { .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| { - StorageError::Database(format!( - "Failed to rename file (schema/VARBINARY type mismatch): {}", - e - )) - })?; + .map_err(|e| StorageError::Database(format!("Failed to rename file (schema/VARBINARY type mismatch): {}", e)))?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 20251d9..fd9896d 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -173,8 +173,7 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { error!("❌ Active file existence check failed(sqlx): {}", e); StorageError::Database(format!("Active file existence check failed: {}", e)) })?; - let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file - { + let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file { // Active file exists → this is an update, increment revision debug!( "📝 Active file found (file_id={}, revision={}), incrementing revision", @@ -640,7 +639,7 @@ impl MySqlFileExt for MySqlStorage { } else { crate::utils::crypto::make_eq_index(account_hash.as_bytes(), file_path) }; - + let row_opt = sqlx::query( r#"SELECT file_id, account_hash, device_hash, file_path, filename, file_hash, From f4ffb619e8d91e3903488f07f61d85e88f0e1acc Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 20:35:34 -0700 Subject: [PATCH 70/71] Update delete routine --- src/handlers/file/delete.rs | 18 +++++++++--------- src/handlers/file/find.rs | 2 +- src/handlers/file/rename.rs | 18 +++++++++--------- src/handlers/file_handler.rs | 6 +----- src/services/file_service.rs | 36 ++++++++++++++++++++++++----------- src/storage/memory.rs | 4 +--- src/storage/mysql.rs | 37 +++++++++++++++++++++++++----------- src/storage/mysql_file.rs | 5 +++-- 8 files changed, 75 insertions(+), 51 deletions(-) diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 55424aa..6e4e469 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -103,20 +103,20 @@ pub async fn handle_delete_file( Err(e) => { // Determine error category for logging and client classification let error_msg = format!("{}", e); - let is_permanent_error = error_msg.contains("VARBINARY") - || error_msg.contains("VARCHAR") - || error_msg.contains("schema") + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") || error_msg.contains("permission") || error_msg.contains("access denied") || error_msg.contains("type mismatch") || error_msg.contains("mismatched types"); - - let is_not_found = error_msg.contains("not found") - || error_msg.contains("No such file") + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") || error_msg.contains("File not found") || error_msg.contains("already deleted") || error_msg.contains("File data not found"); - + error!( "Delete operation failed - file_id: {}, path: {}, error: {}, error_type: {:?}, is_permanent: {}", file_id, @@ -125,7 +125,7 @@ pub async fn handle_delete_file( if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, is_permanent_error ); - + // Format error message for client let client_error_msg = if is_not_found { format!("File not found: already deleted") @@ -134,7 +134,7 @@ pub async fn handle_delete_file( } else { format!("File deletion failed: {}", error_msg) }; - + Ok(Response::new(response::file_delete_error(client_error_msg))) } } diff --git a/src/handlers/file/find.rs b/src/handlers/file/find.rs index af43eca..eb13123 100644 --- a/src/handlers/file/find.rs +++ b/src/handlers/file/find.rs @@ -104,7 +104,7 @@ pub async fn handle_find_file_by_criteria( file_id: file_info.file_id, revision: file_info.revision, file_info: Some(proto_file_info), - is_deleted: false, // Always false - only non-deleted files reach here + is_deleted: false, // Always false - only non-deleted files reach here })) } Ok(None) => Ok(Response::new(FindFileResponse { diff --git a/src/handlers/file/rename.rs b/src/handlers/file/rename.rs index 03f9dc7..9b0f7e9 100644 --- a/src/handlers/file/rename.rs +++ b/src/handlers/file/rename.rs @@ -183,19 +183,19 @@ pub async fn handle_rename_file( Err(e) => { // Determine error category for logging and client classification let error_msg = format!("{}", e); - let is_permanent_error = error_msg.contains("VARBINARY") - || error_msg.contains("VARCHAR") - || error_msg.contains("schema") + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") || error_msg.contains("permission") || error_msg.contains("access denied") || error_msg.contains("type mismatch") || error_msg.contains("mismatched types"); - - let is_not_found = error_msg.contains("not found") - || error_msg.contains("No such file") + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") || error_msg.contains("File not found") || error_msg.contains("already deleted"); - + error!( "Rename operation failed - file_id: {}, old_path: {}, new_path: {}, error: {}, error_type: {:?}, is_permanent: {}", req.file_id, @@ -205,7 +205,7 @@ pub async fn handle_rename_file( if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, is_permanent_error ); - + // Format error message for client let client_error_msg = if is_not_found { format!("File not found: already deleted") @@ -214,7 +214,7 @@ pub async fn handle_rename_file( } else { format!("Failed to rename file: {}", error_msg) }; - + Ok(Response::new(RenameFileResponse { success: false, return_message: client_error_msg, diff --git a/src/handlers/file_handler.rs b/src/handlers/file_handler.rs index 6b6b6ff..504f31d 100644 --- a/src/handlers/file_handler.rs +++ b/src/handlers/file_handler.rs @@ -307,11 +307,7 @@ impl FileHandler { match self .app_state .file - .find_file_by_local_path( - &req.account_hash, - &normalized_file_path, - &req.filename, - ) + .find_file_by_local_path(&req.account_hash, &normalized_file_path, &req.filename) .await { Ok(Some(info)) => { diff --git a/src/services/file_service.rs b/src/services/file_service.rs index d72ef39..87cb4d9 100644 --- a/src/services/file_service.rs +++ b/src/services/file_service.rs @@ -603,29 +603,37 @@ impl FileService { "⚠️ Client requested deletion of already-deleted file_id={}, checking for actual active file", file_id ); - + // Try to find actual active file by path/name // This handles the case where client has wrong file_id (from previous deleted file) // Now searches by updated_time (latest active file) - match self.storage.find_file_by_path_and_name( - &file_info.account_hash, - &file_info.file_path, - &file_info.filename, - ).await { + match self + .storage + .find_file_by_path_and_name( + &file_info.account_hash, + &file_info.file_path, + &file_info.filename, + ) + .await + { Ok(Some(active_file)) => { warn!( "🔄 Found actual active file: old_file_id={}, new_file_id={}, path={}", file_id, active_file.file_id, file_info.file_path ); // Delete the actual active file instead - match self.storage.delete_file(&active_file.account_hash, active_file.file_id).await { + match self + .storage + .delete_file(&active_file.account_hash, active_file.file_id) + .await + { Ok(_) => { self.files.lock().await.remove(&active_file.file_id); info!( "✅ Successfully deleted actual active file: file_id={}, path={}", active_file.file_id, file_info.file_path ); - + // Send notification for the actual deleted file if let Some(nm) = &self.notification_manager { let notification = FileUpdateNotification { @@ -638,11 +646,14 @@ impl FileService { }; let _ = nm.broadcast_file_update(notification).await; } - + return Ok(()); } Err(e) => { - error!("Failed to delete actual active file: file_id={}, error={}", active_file.file_id, e); + error!( + "Failed to delete actual active file: file_id={}, error={}", + active_file.file_id, e + ); return Err(e); } } @@ -652,7 +663,10 @@ impl FileService { return Ok(()); } Err(e) => { - warn!("Error searching for active file: {}, treating as already deleted", e); + warn!( + "Error searching for active file: {}, treating as already deleted", + e + ); return Ok(()); } } diff --git a/src/storage/memory.rs b/src/storage/memory.rs index 1b21678..cd26478 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -675,9 +675,7 @@ impl Storage for MemoryStorage { } // Sort by updated_time descending (most recent first) - matching_files.sort_by(|a, b| { - b.updated_time.seconds.cmp(&a.updated_time.seconds) - }); + matching_files.sort_by(|a, b| b.updated_time.seconds.cmp(&a.updated_time.seconds)); Ok(matching_files.first().map(|&f| f.clone())) } diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 684f02b..10a8368 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1052,7 +1052,7 @@ impl MySqlStorage { r#"SELECT COUNT(*) > 0 FROM information_schema.routines WHERE routine_schema = DATABASE() AND routine_name = 'reset_monthly_quota' - AND routine_type = 'PROCEDURE'"# + AND routine_type = 'PROCEDURE'"#, ) .fetch_one(self.get_sqlx_pool()) .await @@ -1060,7 +1060,7 @@ impl MySqlStorage { if !procedure_exists { info!("Creating reset_monthly_quota stored procedure"); - + // Create procedure without DELIMITER (sqlx doesn't support it) // We need to execute the entire procedure definition as a single statement let procedure_sql = r#" @@ -1109,12 +1109,15 @@ END"#; let _ = sqlx::query("DROP PROCEDURE IF EXISTS reset_monthly_quota") .execute(self.get_sqlx_pool()) .await; - + if let Err(e2) = sqlx::query(procedure_sql) .execute(self.get_sqlx_pool()) .await { - warn!("Failed to create reset_monthly_quota procedure: {}. Error: {}", e, e2); + warn!( + "Failed to create reset_monthly_quota procedure: {}. Error: {}", + e, e2 + ); // Don't fail the entire migration if procedure creation fails } else { info!("✅ reset_monthly_quota procedure created successfully"); @@ -1420,8 +1423,7 @@ impl Storage for MySqlStorage { file_path: &str, filename: &str, ) -> Result> { - MySqlFileExt::find_file_by_path_and_name(self, account_hash, file_path, filename) - .await + MySqlFileExt::find_file_by_path_and_name(self, account_hash, file_path, filename).await } /// Find file by criteria @@ -1656,15 +1658,20 @@ impl Storage for MySqlStorage { new_revision: i64, ) -> Result<()> { use sqlx::Row; - + // First, get file information to obtain account_hash, group_id, watcher_id for encryption let row_opt = sqlx::query( - r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"# + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"#, ) .bind(file_id) .fetch_optional(&self.sqlx_pool) .await - .map_err(|e| StorageError::Database(format!("Failed to get file info for rename (schema/database error): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to get file info for rename (schema/database error): {}", + e + )) + })?; let (account_hash, group_id, watcher_id) = match row_opt { Some(row) => { @@ -1674,7 +1681,10 @@ impl Storage for MySqlStorage { (account_hash, group_id, watcher_id) } None => { - return Err(StorageError::NotFound(format!("File not found: {}", file_id))); + return Err(StorageError::NotFound(format!( + "File not found: {}", + file_id + ))); } }; @@ -1701,7 +1711,12 @@ impl Storage for MySqlStorage { .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| StorageError::Database(format!("Failed to rename file (schema/VARBINARY type mismatch): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to rename file (schema/VARBINARY type mismatch): {}", + e + )) + })?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index fd9896d..20251d9 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -173,7 +173,8 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { error!("❌ Active file existence check failed(sqlx): {}", e); StorageError::Database(format!("Active file existence check failed: {}", e)) })?; - let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file { + let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file + { // Active file exists → this is an update, increment revision debug!( "📝 Active file found (file_id={}, revision={}), incrementing revision", @@ -639,7 +640,7 @@ impl MySqlFileExt for MySqlStorage { } else { crate::utils::crypto::make_eq_index(account_hash.as_bytes(), file_path) }; - + let row_opt = sqlx::query( r#"SELECT file_id, account_hash, device_hash, file_path, filename, file_hash, From daa10d9822f00185eb1f3b8ee802108fc23720fb Mon Sep 17 00:00:00 2001 From: Yongjin Chong Date: Tue, 18 Nov 2025 21:48:01 -0700 Subject: [PATCH 71/71] Update delete routine --- src/storage/mysql_file.rs | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 20251d9..fd9896d 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -173,8 +173,7 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { error!("❌ Active file existence check failed(sqlx): {}", e); StorageError::Database(format!("Active file existence check failed: {}", e)) })?; - let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file - { + let new_revision = if let Some((existing_file_id, existing_revision)) = existing_active_file { // Active file exists → this is an update, increment revision debug!( "📝 Active file found (file_id={}, revision={}), incrementing revision", @@ -640,7 +639,7 @@ impl MySqlFileExt for MySqlStorage { } else { crate::utils::crypto::make_eq_index(account_hash.as_bytes(), file_path) }; - + let row_opt = sqlx::query( r#"SELECT file_id, account_hash, device_hash, file_path, filename, file_hash,