diff --git a/.env.home b/.env.home new file mode 100644 index 0000000..7c985f2 --- /dev/null +++ b/.env.home @@ -0,0 +1,84 @@ +# ============================================================================= +# Cosmic Sync Server Environment Variables - Test Configuration +# ============================================================================= + +# ============================================================================= +# Database Configuration +# ============================================================================= +DB_USER="root" +DB_PASS="recognizer" +DB_NAME="cosmic_sync" +DB_HOST="127.0.0.1" +DB_PORT=3306 +DB_POOL=5 + +# ============================================================================= +# Server Configuration +# ============================================================================= +SERVER_HOST=0.0.0.0 +SERVER_PORT=50051 +WORKER_THREADS=4 +HEARTBEAT_INTERVAL_SECS=30 +AUTH_TOKEN_EXPIRY_HOURS=24 + +# OAuth Configuration +OAUTH_CLIENT_ID=cosmic-sync +OAUTH_CLIENT_SECRET=cosmicsecretsocmicsecret +OAUTH_REDIRECT_URI=http://192.168.50.100:8080/oauth/callback +OAUTH_AUTH_URL=http://192.168.50.100:4000/oauth/authorize +OAUTH_TOKEN_URL=http://192.168.50.100:4000/oauth/token +OAUTH_USER_INFO_URL=http://192.168.50.100:4000/api/settings +OAUTH_SCOPE=profile:read + +# Request Limits +MAX_CONCURRENT_REQUESTS=100 +MAX_FILE_SIZE=52428800 + +# Logging Configuration +RUST_LOG=cosmic_sync_server=debug,info +LOG_LEVEL=info +LOG_TO_FILE=true +LOG_FILE=logs/cosmic-sync-server.log +LOG_MAX_FILE_SIZE=10485760 +LOG_MAX_BACKUPS=5 + +# ============================================================================= +# Storage Configuration - S3 MinIO +# ============================================================================= +STORAGE_TYPE="s3" +AWS_REGION="us-east-2" +S3_BUCKET="cosmic-sync-files" +S3_KEY_PREFIX="files/" +AWS_ACCESS_KEY_ID="minioadmin" +AWS_SECRET_ACCESS_KEY="minioadmin" +S3_ENDPOINT_URL="http://127.0.0.1:9000" +S3_FORCE_PATH_STYLE="true" +S3_TIMEOUT_SECONDS="30" +S3_MAX_RETRIES="3" + +# ============================================================================= +# Development Mode +# ============================================================================= +COSMIC_SYNC_DEV_MODE="1" +COSMIC_SYNC_TEST_MODE="1" +COSMIC_SYNC_DEBUG_MODE="1" + +# # Feature Flags +# COSMIC_SYNC_TEST_MODE=false +# COSMIC_SYNC_DEBUG_MODE=false +# ENABLE_METRICS=false +# STORAGE_ENCRYPTION=true +# REQUEST_VALIDATION=true + +SERVER_ENCODE_KEY=c3e15e2f727cf777380f23a9f9fa8156c5f4f7f3e697f6dc95a47372e76ac6bf + +# OFF +RABBITMQ_ENABLED=false + +# ON +# RABBITMQ_ENABLED=true +# RABBITMQ_URL=amqp://guest:guest@127.0.0.1:5672/%2f +# RABBITMQ_EXCHANGE=cosmic.sync +# RABBITMQ_QUEUE_PREFIX=cosmic +# RABBITMQ_PREFETCH=64 +# RABBITMQ_DURABLE=true diff --git a/.env.office b/.env.office new file mode 100644 index 0000000..6dc1709 --- /dev/null +++ b/.env.office @@ -0,0 +1,84 @@ +# ============================================================================= +# Cosmic Sync Server Environment Variables - Test Configuration +# ============================================================================= + +# ============================================================================= +# Database Configuration +# ============================================================================= +DB_USER="root" +DB_PASS="recognizer" +DB_NAME="cosmic_sync" +DB_HOST="127.0.0.1" +DB_PORT=3306 +DB_POOL=5 + +# ============================================================================= +# Server Configuration +# ============================================================================= +SERVER_HOST=0.0.0.0 +SERVER_PORT=50051 +WORKER_THREADS=4 +HEARTBEAT_INTERVAL_SECS=30 +AUTH_TOKEN_EXPIRY_HOURS=24 + +# OAuth Configuration +OAUTH_CLIENT_ID=cosmic-sync +OAUTH_CLIENT_SECRET=cosmicsecretsocmicsecret +OAUTH_REDIRECT_URI=http://10.241.62.167:8080/oauth/callback +OAUTH_AUTH_URL=http://10.241.62.167:4000/oauth/authorize +OAUTH_TOKEN_URL=http://10.241.62.167:4000/oauth/token +OAUTH_USER_INFO_URL=http://10.241.62.167:4000/api/settings +OAUTH_SCOPE=profile:read + +# Request Limits +MAX_CONCURRENT_REQUESTS=100 +MAX_FILE_SIZE=52428800 + +# Logging Configuration +RUST_LOG=cosmic_sync_server=debug,info +LOG_LEVEL=info +LOG_TO_FILE=true +LOG_FILE=logs/cosmic-sync-server.log +LOG_MAX_FILE_SIZE=10485760 +LOG_MAX_BACKUPS=5 + +# ============================================================================= +# Storage Configuration - S3 MinIO +# ============================================================================= +STORAGE_TYPE="s3" +AWS_REGION="us-east-2" +S3_BUCKET="cosmic-sync-files" +S3_KEY_PREFIX="files/" +AWS_ACCESS_KEY_ID="minioadmin" +AWS_SECRET_ACCESS_KEY="minioadmin" +S3_ENDPOINT_URL="http://127.0.0.1:9000" +S3_FORCE_PATH_STYLE="true" +S3_TIMEOUT_SECONDS="30" +S3_MAX_RETRIES="3" + +# ============================================================================= +# Development Mode +# ============================================================================= +COSMIC_SYNC_DEV_MODE="1" +COSMIC_SYNC_TEST_MODE="1" +COSMIC_SYNC_DEBUG_MODE="1" + +# # Feature Flags +# COSMIC_SYNC_TEST_MODE=false +# COSMIC_SYNC_DEBUG_MODE=false +# ENABLE_METRICS=false +# STORAGE_ENCRYPTION=true +# REQUEST_VALIDATION=true + +SERVER_ENCODE_KEY=c3e15e2f727cf777380f23a9f9fa8156c5f4f7f3e697f6dc95a47372e76ac6bf + +# OFF +RABBITMQ_ENABLED=false + +# ON +# RABBITMQ_ENABLED=true +# RABBITMQ_URL=amqp://guest:guest@127.0.0.1:5672/%2f +# RABBITMQ_EXCHANGE=cosmic.sync +# RABBITMQ_QUEUE_PREFIX=cosmic +# RABBITMQ_PREFETCH=64 +# RABBITMQ_DURABLE=true diff --git a/migrations/sql/20251119_add_deleted_file_id.sql b/migrations/sql/20251119_add_deleted_file_id.sql new file mode 100644 index 0000000..ef53421 --- /dev/null +++ b/migrations/sql/20251119_add_deleted_file_id.sql @@ -0,0 +1,16 @@ +-- Add deleted_file_id column to files table for tracking deleted file references +-- Date: 2025-11-19 + +ALTER TABLE files +ADD COLUMN IF NOT EXISTS deleted_file_id BIGINT UNSIGNED NULL COMMENT 'Reference to original file_id when operation_type=DELETE' AFTER revision; + +-- Add index for efficient lookup +ALTER TABLE files +ADD INDEX IF NOT EXISTS idx_deleted_file_id (deleted_file_id); + + + + + + + diff --git a/migrations/sql/20251119_add_deleted_file_id_down.sql b/migrations/sql/20251119_add_deleted_file_id_down.sql new file mode 100644 index 0000000..bac5af7 --- /dev/null +++ b/migrations/sql/20251119_add_deleted_file_id_down.sql @@ -0,0 +1,12 @@ +-- Rollback: Remove deleted_file_id column from files table +-- Date: 2025-11-19 + +ALTER TABLE files DROP INDEX IF EXISTS idx_deleted_file_id; +ALTER TABLE files DROP COLUMN IF EXISTS deleted_file_id; + + + + + + + diff --git a/migrations/sql/20251119_add_encrypted_data_to_files.sql b/migrations/sql/20251119_add_encrypted_data_to_files.sql new file mode 100644 index 0000000..33dbb2d --- /dev/null +++ b/migrations/sql/20251119_add_encrypted_data_to_files.sql @@ -0,0 +1,26 @@ +-- Add encrypted_data column to files table for deletion recovery +-- This column stores encrypted file data for recovery purposes + +SET @column_exists = ( + SELECT COUNT(*) + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = DATABASE() + AND TABLE_NAME = 'files' + AND COLUMN_NAME = 'encrypted_data' +); + +SET @add_column_sql = IF(@column_exists = 0, + 'ALTER TABLE files ADD COLUMN encrypted_data LONGBLOB NULL AFTER key_id', + 'SELECT "Column encrypted_data already exists" AS Info' +); + +PREPARE stmt FROM @add_column_sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + + + + + + + diff --git a/migrations/sql/20251119_add_encrypted_data_to_files_down.sql b/migrations/sql/20251119_add_encrypted_data_to_files_down.sql new file mode 100644 index 0000000..5e15a36 --- /dev/null +++ b/migrations/sql/20251119_add_encrypted_data_to_files_down.sql @@ -0,0 +1,25 @@ +-- Remove encrypted_data column from files table + +SET @column_exists = ( + SELECT COUNT(*) + FROM INFORMATION_SCHEMA.COLUMNS + WHERE TABLE_SCHEMA = DATABASE() + AND TABLE_NAME = 'files' + AND COLUMN_NAME = 'encrypted_data' +); + +SET @drop_column_sql = IF(@column_exists > 0, + 'ALTER TABLE files DROP COLUMN encrypted_data', + 'SELECT "Column encrypted_data does not exist" AS Info' +); + +PREPARE stmt FROM @drop_column_sql; +EXECUTE stmt; +DEALLOCATE PREPARE stmt; + + + + + + + diff --git a/migrations/sql/20251119_migrate_operation_types.sql b/migrations/sql/20251119_migrate_operation_types.sql new file mode 100644 index 0000000..416e206 --- /dev/null +++ b/migrations/sql/20251119_migrate_operation_types.sql @@ -0,0 +1,40 @@ +-- Migrate existing operation_type values to new schema +-- Date: 2025-11-19 + +-- 1. is_deleted=1 && file_hash IS NULL → DELETE (deletion history records) +UPDATE files +SET operation_type = 'DELETE' +WHERE is_deleted = 1 + AND (file_hash IS NULL OR file_hash = '' OR file_hash = UNHEX(SHA256(''))); + +-- 2. is_deleted=1 && file_hash IS NOT NULL → UPDATE (previous versions) +UPDATE files +SET operation_type = 'UPDATE' +WHERE is_deleted = 1 + AND file_hash IS NOT NULL + AND file_hash != '' + AND file_hash != UNHEX(SHA256('')); + +-- 3. is_deleted=0 && revision=1 && operation_type='UPLOAD' → CREATE (first version) +UPDATE files +SET operation_type = 'CREATE' +WHERE is_deleted = 0 + AND revision = 1 + AND operation_type = 'UPLOAD'; + +-- 4. is_deleted=0 && revision>1 && operation_type='UPLOAD' → UPDATE (updated versions) +UPDATE files +SET operation_type = 'UPDATE' +WHERE is_deleted = 0 + AND revision > 1 + AND operation_type = 'UPLOAD'; + +-- 5. Keep RENAME as is (no change needed) +-- RENAME operations remain as RENAME + + + + + + + diff --git a/migrations/sql/20251119_migrate_operation_types_down.sql b/migrations/sql/20251119_migrate_operation_types_down.sql new file mode 100644 index 0000000..86c642b --- /dev/null +++ b/migrations/sql/20251119_migrate_operation_types_down.sql @@ -0,0 +1,16 @@ +-- Rollback: Reset all operation_type values to UPLOAD (original state) +-- Date: 2025-11-19 + +-- Reset all CREATE, UPDATE, DELETE, RESTORE to UPLOAD +UPDATE files +SET operation_type = 'UPLOAD' +WHERE operation_type IN ('CREATE', 'UPDATE', 'DELETE', 'RESTORE'); + +-- Keep RENAME as is + + + + + + + diff --git a/migrations/sql/20251119_redefine_operation_type.sql b/migrations/sql/20251119_redefine_operation_type.sql new file mode 100644 index 0000000..69c366b --- /dev/null +++ b/migrations/sql/20251119_redefine_operation_type.sql @@ -0,0 +1,15 @@ +-- Redefine operation_type ENUM to support new operations +-- Date: 2025-11-19 + +-- Change operation_type column to support CREATE, UPDATE, DELETE, RENAME, RESTORE +ALTER TABLE files +MODIFY COLUMN operation_type ENUM('CREATE', 'UPDATE', 'DELETE', 'RENAME', 'RESTORE', 'UPLOAD') +DEFAULT 'CREATE' +COMMENT 'File operation type: CREATE=new file, UPDATE=new version, DELETE=deleted, RENAME=renamed, RESTORE=restored from deletion, UPLOAD=legacy'; + + + + + + + diff --git a/migrations/sql/20251119_redefine_operation_type_down.sql b/migrations/sql/20251119_redefine_operation_type_down.sql new file mode 100644 index 0000000..0d9bf8c --- /dev/null +++ b/migrations/sql/20251119_redefine_operation_type_down.sql @@ -0,0 +1,13 @@ +-- Rollback: Restore original operation_type ENUM +-- Date: 2025-11-19 + +ALTER TABLE files +MODIFY COLUMN operation_type ENUM('UPLOAD', 'DELETE', 'RENAME') +DEFAULT 'UPLOAD'; + + + + + + + diff --git a/proto/sync.proto b/proto/sync.proto index ce12e0d..242e136 100755 --- a/proto/sync.proto +++ b/proto/sync.proto @@ -32,7 +32,8 @@ service SyncService { // File synchronization functions rpc UploadFile(UploadFileRequest) returns (UploadFileResponse); rpc DeleteFile(DeleteFileRequest) returns (DeleteFileResponse); - rpc RenameFile(RenameFileRequest) returns (RenameFileResponse); // NEW: Rename file + rpc RenameFile(RenameFileRequest) returns (RenameFileResponse); // Rename file + rpc RestoreFile(RestoreFileRequest) returns (RestoreFileResponse); // Restore deleted file rpc DownloadFile(DownloadFileRequest) returns (DownloadFileResponse); rpc ListFiles(ListFilesRequest) returns (ListFilesResponse); // Streaming variants for large payloads (hybrid approach) @@ -501,9 +502,26 @@ message DeleteFileRequest { message DeleteFileResponse { bool success = 1; string return_message = 2; + uint64 delete_record_id = 3; // file_id of DELETE operation_type record (for recovery) + int64 new_revision = 4; // New revision after delete operation } -// NEW: Rename file request +// Restore deleted file request +message RestoreFileRequest { + string auth_token = 1; + string account_hash = 2; + uint64 delete_record_id = 3; // file_id of DELETE operation_type record +} + +// Restore deleted file response +message RestoreFileResponse { + bool success = 1; + string return_message = 2; + uint64 restored_file_id = 3; // file_id of restored file + FileInfo file_info = 4; +} + +// Rename file request message RenameFileRequest { string account_hash = 1; string device_hash = 2; diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 6e4e469..6f1ac8d 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -77,10 +77,10 @@ pub async fn handle_delete_file( } match delete_result { - Ok(_) => { + Ok((deletion_record_id, new_revision)) => { info!( - "File deleted successfully: filename={}, file_id={}", - req.filename, file_id + "File deleted successfully: filename={}, file_id={}, deletion_record_id={}, new_revision={}", + req.filename, file_id, deletion_record_id, new_revision ); // Publish cross-instance file deleted and version deleted events @@ -90,7 +90,7 @@ pub async fn handle_delete_file( file_path: req.file_path.clone(), filename: req.filename.clone(), file_id, - revision: req.revision + 1, + revision: new_revision, }; publish_file_deleted_event(&handler.app_state.event_bus, &event_data).await; @@ -98,6 +98,8 @@ pub async fn handle_delete_file( Ok(Response::new(response::file_delete_success( "File deleted successfully", + deletion_record_id, + new_revision, ))) } Err(e) => { diff --git a/src/handlers/file/find.rs b/src/handlers/file/find.rs index eb13123..9233852 100644 --- a/src/handlers/file/find.rs +++ b/src/handlers/file/find.rs @@ -1,5 +1,5 @@ use tonic::{Response, Status}; -use tracing::{debug, error}; +use tracing::{debug, error, info}; use super::super::file_handler::FileHandler; use crate::sync::{FindFileRequest, FindFileResponse}; @@ -8,6 +8,7 @@ pub async fn handle_find_file_by_criteria( handler: &FileHandler, req: FindFileRequest, ) -> Result, Status> { + info!("🔍 [v2025.11.19-FIXED] FindFileByCriteria handler called"); debug!( "FindFileByCriteria request: account={}, file_path={}, file_name={}", req.account_hash, req.file_path, req.file_name diff --git a/src/handlers/file/get_info.rs b/src/handlers/file/get_info.rs index 7e0b144..d3f3467 100644 --- a/src/handlers/file/get_info.rs +++ b/src/handlers/file/get_info.rs @@ -9,7 +9,7 @@ pub async fn handle_get_file_info( req: GetFileInfoRequest, ) -> Result, Status> { debug!( - "GetFileInfo request: account={}, device={}, file_id={}", + "🔍 [GetFileInfo] Request received: account={}, device={}, file_id={}", req.account_hash, req.device_hash, req.file_id ); @@ -55,8 +55,8 @@ pub async fn handle_get_file_info( }; debug!( - "✅ GetFileInfo successful: file_id={}, filename={}, is_encrypted={}", - file_info.file_id, file_info.filename, file_info.is_encrypted + "✅ [GetFileInfo] Success: file_id={}, filename={}, revision={}, is_encrypted={}", + file_info.file_id, file_info.filename, file_info.revision, file_info.is_encrypted ); Ok(Response::new(GetFileInfoResponse { diff --git a/src/handlers/file/mod.rs b/src/handlers/file/mod.rs index da01489..74d0e73 100644 --- a/src/handlers/file/mod.rs +++ b/src/handlers/file/mod.rs @@ -5,4 +5,5 @@ pub mod find; pub mod get_info; pub mod list; pub mod rename; +pub mod restore; pub mod upload; diff --git a/src/handlers/file/restore.rs b/src/handlers/file/restore.rs new file mode 100644 index 0000000..27c6ebd --- /dev/null +++ b/src/handlers/file/restore.rs @@ -0,0 +1,80 @@ +use tonic::{Response, Status}; +use tracing::{debug, error, info}; + +use super::super::file_handler::FileHandler; +use crate::models::file::FileInfo; +use crate::sync::{RestoreFileRequest, RestoreFileResponse}; + +pub async fn handle_restore_file( + handler: &FileHandler, + req: RestoreFileRequest, +) -> Result, Status> { + info!( + "RestoreFile request: account={}, delete_record_id={}", + req.account_hash, req.delete_record_id + ); + + // Verify authentication + match handler.app_state.oauth.verify_token(&req.auth_token).await { + Ok(v) if v.valid => {} + _ => { + return Ok(Response::new(RestoreFileResponse { + success: false, + return_message: "Authentication failed".to_string(), + restored_file_id: 0, + file_info: None, + })); + } + } + + // Restore file + match handler + .app_state + .file + .restore_file(&req.account_hash, req.delete_record_id) + .await + { + Ok(file_info) => { + let proto_file_info = crate::sync::FileInfo { + file_id: file_info.file_id, + filename: file_info.filename.clone(), + file_hash: file_info.file_hash.clone(), + device_hash: file_info.device_hash.clone(), + group_id: file_info.group_id, + watcher_id: file_info.watcher_id, + is_encrypted: file_info.is_encrypted, + file_path: file_info.file_path.clone(), + updated_time: Some(file_info.updated_time), + revision: file_info.revision, + file_size: file_info.size, + key_id: file_info.key_id.clone().unwrap_or_default(), + unix_permissions: file_info.unix_permissions, + }; + + info!( + "File restored successfully: delete_record_id={}, restored_file_id={}", + req.delete_record_id, file_info.file_id + ); + + Ok(Response::new(RestoreFileResponse { + success: true, + return_message: "File restored successfully".to_string(), + restored_file_id: file_info.file_id, + file_info: Some(proto_file_info), + })) + } + Err(e) => { + error!( + "File restore failed: delete_record_id={}, error={}", + req.delete_record_id, e + ); + + Ok(Response::new(RestoreFileResponse { + success: false, + return_message: format!("File restore failed: {}", e), + restored_file_id: 0, + file_info: None, + })) + } + } +} diff --git a/src/handlers/file_handler.rs b/src/handlers/file_handler.rs index 504f31d..0ea11af 100644 --- a/src/handlers/file_handler.rs +++ b/src/handlers/file_handler.rs @@ -252,6 +252,15 @@ impl FileHandler { super::file::delete::handle_delete_file(self, req).await } + /// Restore file from deletion record + pub async fn restore_file( + &self, + request: Request, + ) -> Result, Status> { + let req = request.into_inner(); + super::file::restore::handle_restore_file(self, req).await + } + /// Validate file for deletion pub(crate) async fn validate_file_for_deletion( &self, @@ -273,6 +282,8 @@ impl FileHandler { warn!("File already deleted: file_id={}", file_id); return Err(Response::new(response::file_delete_success( "File already deleted", + 0, // Already deleted, no deletion_record_id + 0, // No new_revision for already deleted ))); } Ok(file_id) diff --git a/src/main.rs b/src/main.rs index 38d8b18..7fd29f4 100644 --- a/src/main.rs +++ b/src/main.rs @@ -76,6 +76,16 @@ async fn start_with_container() -> Result<()> { /// Start server in legacy method async fn start_legacy() -> Result<()> { + // Display version information + info!("🚀 ═══════════════════════════════════════════════════"); + info!("🚀 COSMIC Sync Server v2025.11.19-fix-find-deleted"); + info!("🚀 Package Version: {}", env!("CARGO_PKG_VERSION")); + info!("🚀 ═══════════════════════════════════════════════════"); + info!("✅ find_file_by_criteria: UPDATED with is_deleted=FALSE filter"); + info!("✅ Query sorting: ORDER BY updated_time DESC, revision DESC, id DESC"); + info!("✅ Double validation: is_deleted check enabled"); + info!("🚀 ═══════════════════════════════════════════════════"); + // Build configuration with validation let config = build_config().await?; diff --git a/src/server/service.rs b/src/server/service.rs index 776f760..378518b 100644 --- a/src/server/service.rs +++ b/src/server/service.rs @@ -21,13 +21,14 @@ use crate::sync::{ OAuthExchangeRequest, OAuthExchangeResponse, RegisterDeviceRequest, RegisterDeviceResponse, RegisterWatcherGroupRequest, RegisterWatcherGroupResponse, RegisterWatcherPresetRequest, RegisterWatcherPresetResponse, RenameFileRequest, RenameFileResponse, - RequestEncryptionKeyRequest, RequestEncryptionKeyResponse, RestoreFileVersionRequest, - RestoreFileVersionResponse, SubscribeRequest, SyncConfigurationRequest, - SyncConfigurationResponse, UpdateDeviceInfoRequest, UpdateDeviceInfoResponse, - UpdateWatcherGroupRequest, UpdateWatcherGroupResponse, UpdateWatcherPresetRequest, - UpdateWatcherPresetResponse, UploadFileChunk, UploadFileRequest, UploadFileResponse, - ValidateTokenRequest, ValidateTokenResponse, VerifyLoginRequest, VerifyLoginResponse, - VersionUpdateNotification, WatcherGroupUpdateNotification, WatcherPresetUpdateNotification, + RequestEncryptionKeyRequest, RequestEncryptionKeyResponse, RestoreFileRequest, + RestoreFileResponse, RestoreFileVersionRequest, RestoreFileVersionResponse, SubscribeRequest, + SyncConfigurationRequest, SyncConfigurationResponse, UpdateDeviceInfoRequest, + UpdateDeviceInfoResponse, UpdateWatcherGroupRequest, UpdateWatcherGroupResponse, + UpdateWatcherPresetRequest, UpdateWatcherPresetResponse, UploadFileChunk, UploadFileRequest, + UploadFileResponse, ValidateTokenRequest, ValidateTokenResponse, VerifyLoginRequest, + VerifyLoginResponse, VersionUpdateNotification, WatcherGroupUpdateNotification, + WatcherPresetUpdateNotification, }; use base64::Engine as _; use futures::Stream; @@ -811,6 +812,14 @@ impl SyncService for SyncServiceImpl { self.file_handler.handle_rename_file(request).await } + async fn restore_file( + &self, + request: Request, + ) -> Result, Status> { + debug!("File restore request received"); + self.file_handler.restore_file(request).await + } + async fn find_file_by_criteria( &self, request: Request, diff --git a/src/services/file_service.rs b/src/services/file_service.rs index 87cb4d9..e5a2736 100644 --- a/src/services/file_service.rs +++ b/src/services/file_service.rs @@ -581,7 +581,8 @@ impl FileService { } /// Handle file deletion - pub async fn delete_file(&self, file_id: u64) -> Result<(), StorageError> { + /// Returns (deletion_record_id, new_revision) + pub async fn delete_file(&self, file_id: u64) -> Result<(u64, i64), StorageError> { info!("Start file deletion processing: file_id={}", file_id); // Query file information (include deletion status) @@ -627,11 +628,11 @@ impl FileService { .delete_file(&active_file.account_hash, active_file.file_id) .await { - Ok(_) => { + Ok((deletion_record_id, new_revision)) => { self.files.lock().await.remove(&active_file.file_id); info!( - "✅ Successfully deleted actual active file: file_id={}, path={}", - active_file.file_id, file_info.file_path + "✅ Successfully deleted actual active file: file_id={}, path={}, deletion_record_id={}", + active_file.file_id, file_info.file_path, deletion_record_id ); // Send notification for the actual deleted file @@ -647,7 +648,7 @@ impl FileService { let _ = nm.broadcast_file_update(notification).await; } - return Ok(()); + return Ok((deletion_record_id, new_revision)); } Err(e) => { error!( @@ -660,14 +661,14 @@ impl FileService { } Ok(None) => { info!("No active file found with same path, file is truly deleted: file_id={}", file_id); - return Ok(()); + return Ok((0, 0)); // Already deleted, no deletion_record_id or revision } Err(e) => { warn!( "Error searching for active file: {}, treating as already deleted", e ); - return Ok(()); + return Ok((0, 0)); // Already deleted, no deletion_record_id or revision } } } @@ -683,7 +684,7 @@ impl FileService { .delete_file(&file_info.account_hash, file_id) .await { - Ok(_) => { + Ok((deletion_record_id, new_revision)) => { // Delete from memory cache self.files.lock().await.remove(&file_id); @@ -710,13 +711,14 @@ impl FileService { } info!( - "File deletion complete: file_id={}, account_hash={}, file_path={}, filename={}", + "File deletion complete: file_id={}, account_hash={}, file_path={}, filename={}, deletion_record_id={}", file_id, file_info.account_hash, file_info.file_path, - file_info.filename + file_info.filename, + deletion_record_id ); - Ok(()) + Ok((deletion_record_id, new_revision)) } Err(e) => { error!("File deletion failed: file_id={}, error={}", file_id, e); @@ -734,6 +736,21 @@ impl FileService { } } + /// Restore file from deletion record + pub async fn restore_file( + &self, + account_hash: &str, + delete_record_id: u64, + ) -> Result { + info!( + "Restoring file: account_hash={}, delete_record_id={}", + account_hash, delete_record_id + ); + self.storage + .restore_file(account_hash, delete_record_id) + .await + } + /// Device registration: use register_device everywhere pub async fn register_device(&self, device: &Device) -> Result<(), StorageError> { debug!("Registering device: device_hash={}", device.device_hash); diff --git a/src/storage/memory.rs b/src/storage/memory.rs index cd26478..acba188 100644 --- a/src/storage/memory.rs +++ b/src/storage/memory.rs @@ -375,19 +375,29 @@ impl Storage for MemoryStorage { } /// Delete a file including its metadata and content - async fn delete_file(&self, account_hash: &str, file_id: u64) -> crate::storage::Result<()> { + /// Returns (dummy_deletion_record_id, dummy_revision) for MemoryStorage (testing only) + async fn delete_file( + &self, + account_hash: &str, + file_id: u64, + ) -> crate::storage::Result<(u64, i64)> { let mut data = self.data.lock().await; if let Some(file) = data.files.get(&file_id) { // Check if file belongs to specific account if file.account_hash == account_hash { + let current_revision = file.revision; + // Delete file data data.file_data.remove(&file_id); // Delete file metadata data.files.remove(&file_id); - Ok(()) + // Return dummy deletion_record_id and new_revision for testing + let dummy_deletion_record_id = rand::random::(); + let new_revision = current_revision + 1; + Ok((dummy_deletion_record_id, new_revision)) } else { Err(StorageError::PermissionDenied(format!( "File not owned by user: {}", @@ -402,6 +412,18 @@ impl Storage for MemoryStorage { } } + /// Restore deleted file (MemoryStorage stub - not fully implemented) + async fn restore_file( + &self, + _account_hash: &str, + _delete_record_id: u64, + ) -> crate::storage::Result { + // MemoryStorage is for testing only; restore is not implemented + Err(StorageError::NotImplemented( + "restore_file not implemented for MemoryStorage".to_string(), + )) + } + /// Store file data async fn store_file_data( &self, diff --git a/src/storage/mod.rs b/src/storage/mod.rs index be0a14d..b3a29b7 100644 --- a/src/storage/mod.rs +++ b/src/storage/mod.rs @@ -308,7 +308,10 @@ pub trait Storage: Sync + Send { exclude_device_hash: &str, upload_time_from: Option, ) -> Result>; - async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<()>; + async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<(u64, i64)>; + + /// Restore deleted file from deletion record + async fn restore_file(&self, account_hash: &str, delete_record_id: u64) -> Result; // Optimized file search methods async fn find_file_by_path_and_name( diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 10a8368..3ca9d03 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1447,10 +1447,14 @@ impl Storage for MySqlStorage { } /// Delete file - async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<()> { + async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<(u64, i64)> { MySqlFileExt::delete_file(self, account_hash, file_id).await } + async fn restore_file(&self, account_hash: &str, delete_record_id: u64) -> Result { + MySqlFileExt::restore_file(self, account_hash, delete_record_id).await + } + /// List files async fn list_files( &self, @@ -1702,8 +1706,8 @@ impl Storage for MySqlStorage { new_file_path.as_bytes().to_vec() }; - // Update file_path with encrypted bytes (VARBINARY) - let query = "UPDATE files SET file_path = ?, device_hash = ?, revision = ?, updated_time = NOW() WHERE file_id = ?"; + // Update file_path with encrypted bytes (VARBINARY) and set operation_type to RENAME + let query = "UPDATE files SET file_path = ?, device_hash = ?, revision = ?, operation_type = 'RENAME', updated_time = NOW() WHERE file_id = ?"; sqlx::query(query) .bind(&new_file_path_bytes) .bind(device_hash) diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index 20251d9..7eed27a 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -54,7 +54,11 @@ pub trait MySqlFileExt { ) -> Result>; /// Delete file - async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<()>; + /// Returns (deletion_record_id, new_revision) + async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<(u64, i64)>; + + /// Restore deleted file from deletion record + async fn restore_file(&self, account_hash: &str, delete_record_id: u64) -> Result; /// List files async fn list_files( @@ -270,12 +274,19 @@ impl MySqlFileExt for MySqlStorage { (fpb, fnb, eq, tp) }; + // Determine operation_type based on revision + let operation_type = if new_revision == 1 { + "CREATE" + } else { + "UPDATE" + }; + sqlx::query( r#"INSERT INTO files ( file_id, account_hash, device_hash, file_path, filename, file_hash, size, is_deleted, revision, created_time, updated_time, group_id, watcher_id, - server_group_id, server_watcher_id, eq_index, token_path, key_id, unix_permissions - ) VALUES (?, ?, ?, ?, ?, ?, ?, FALSE, ?, FROM_UNIXTIME(?), FROM_UNIXTIME(?), ?, ?, ?, ?, ?, ?, ?, ?)"# + server_group_id, server_watcher_id, eq_index, token_path, key_id, unix_permissions, operation_type + ) VALUES (?, ?, ?, ?, ?, ?, ?, FALSE, ?, FROM_UNIXTIME(?), FROM_UNIXTIME(?), ?, ?, ?, ?, ?, ?, ?, ?, ?)"# ) .bind(file_info.file_id as i64) .bind(&file_info.account_hash) @@ -295,6 +306,7 @@ impl MySqlFileExt for MySqlStorage { .bind(&token_path) .bind(file_info.key_id.as_deref()) .bind(file_info.unix_permissions) + .bind(operation_type) .execute(&mut *tx) .await .map_err(|e| { error!("❌ New file information insertion failed(sqlx): {}", e); StorageError::Database(format!("New file information insertion failed: {}", e)) })?; @@ -312,21 +324,30 @@ impl MySqlFileExt for MySqlStorage { /// Query file information async fn get_file_info(&self, file_id: u64) -> Result> { use sqlx::Row; - debug!("Querying file information: file_id={}", file_id); + debug!( + "🔍 [GetFileInfo] Querying file information: file_id={}", + file_id + ); + // Optimized query: only select needed columns, exclude DELETE operation_type records let row_opt = sqlx::query( r#"SELECT file_id, account_hash, device_hash, file_path, filename, file_hash, - UNIX_TIMESTAMP(created_time) AS created_ts, UNIX_TIMESTAMP(updated_time) AS updated_ts, group_id, watcher_id, is_deleted, revision, size, key_id, unix_permissions FROM files - WHERE file_id = ? AND is_deleted = FALSE"#, + WHERE file_id = ? + AND is_deleted = FALSE + AND operation_type NOT IN ('DELETE')"#, ) .bind(file_id) .fetch_optional(self.get_sqlx_pool()) .await .map_err(|e| { + error!( + "❌ [GetFileInfo] Query failed: file_id={}, error={}", + file_id, e + ); StorageError::Database(format!("File information query failed(sqlx): {}", e)) })?; @@ -365,11 +386,14 @@ impl MySqlFileExt for MySqlStorage { key_id: key_id_opt, unix_permissions: row.try_get("unix_permissions").ok(), }; - debug!("File information query successful: file_id={}", file_id); + debug!( + "✅ [GetFileInfo] Query successful: file_id={}, revision={}", + file_id, revision + ); Ok(Some(file_info)) } else { debug!( - "No file information or already deleted: file_id={}", + "❌ [GetFileInfo] File not found or deleted: file_id={}", file_id ); Ok(None) @@ -649,7 +673,9 @@ impl MySqlFileExt for MySqlStorage { group_id, watcher_id, revision, size, key_id, unix_permissions, is_deleted FROM files - WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE + WHERE account_hash = ? AND eq_index = ? + AND is_deleted = FALSE + AND operation_type NOT IN ('DELETE') ORDER BY updated_time DESC, revision DESC, id DESC LIMIT 1"#, ) @@ -733,7 +759,8 @@ impl MySqlFileExt for MySqlStorage { } /// Delete file (metadata and content) - async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<()> { + /// Returns (deletion_record_id, new_revision) + async fn delete_file(&self, account_hash: &str, file_id: u64) -> Result<(u64, i64)> { info!( "Deleting file: account_hash={}, file_id={}", account_hash, file_id @@ -745,11 +772,12 @@ impl MySqlFileExt for MySqlStorage { )) })?; - // Check if file exists and belongs to the user + // Check if file exists and query all data needed for recovery // Use Vec for VARBINARY columns (file_path, filename) let row_opt = sqlx::query( - r#"SELECT file_id, revision, file_path, filename, device_hash, group_id, watcher_id, server_group_id, server_watcher_id - FROM files WHERE file_id = ? AND account_hash = ?"#, + r#"SELECT file_id, revision, file_path, filename, file_hash, size, encrypted_data, key_id, + device_hash, group_id, watcher_id, server_group_id, server_watcher_id, unix_permissions + FROM files WHERE file_id = ? AND account_hash = ? AND is_deleted = FALSE"#, ) .bind(file_id) .bind(account_hash) @@ -778,11 +806,16 @@ impl MySqlFileExt for MySqlStorage { let current_revision: i64 = row.try_get("revision").unwrap_or(0); let file_path_bytes: Vec = row.try_get("file_path").unwrap_or_default(); let filename_bytes: Vec = row.try_get("filename").unwrap_or_default(); + let file_hash_bytes: Vec = row.try_get("file_hash").unwrap_or_default(); + let size: u64 = row.try_get::("size").unwrap_or(0) as u64; + let encrypted_data: Option> = row.try_get("encrypted_data").ok(); + let key_id: Option = row.try_get("key_id").ok(); let device_hash: String = row.try_get("device_hash").unwrap_or_default(); let group_id: i32 = row.try_get("group_id").unwrap_or(0); let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); let server_group_id: i32 = row.try_get("server_group_id").unwrap_or(0); let server_watcher_id: i32 = row.try_get("server_watcher_id").unwrap_or(0); + let unix_permissions: Option = row.try_get("unix_permissions").ok(); let new_revision = current_revision + 1; @@ -798,106 +831,216 @@ impl MySqlFileExt for MySqlStorage { let now = Utc::now().timestamp(); - // 1. Update existing file record to is_deleted=1 - sqlx::query(r#"UPDATE files SET is_deleted = 1 WHERE file_id = ?"#) + // 1. Mark original file as deleted (is_deleted=TRUE) for TTL cleanup + sqlx::query(r#"UPDATE files SET is_deleted = TRUE WHERE file_id = ?"#) .bind(file_id) .execute(&mut *tx) .await .map_err(|e| { - StorageError::Database(format!( - "Existing file deletion marking failed (schema/database error): {}", - e - )) + StorageError::Database(format!("Original file deletion marking failed: {}", e)) })?; - // 2. Update all previous revisions with the same file path and name to is_deleted=1 - // Use Vec for VARBINARY columns to avoid type mismatch + // 2. Create deletion history record with operation_type=DELETE + debug!( + "Creating deletion history: file_path={}, filename={}, deleted_file_id={}", + file_path_for_log, filename_for_log, file_id + ); + + // Generate new file_id for deletion history record + let deletion_record_id = rand::random::(); + + // INSERT deletion history record with all data from original (for recovery) + // operation_type='DELETE', is_deleted=FALSE (so it can be recovered) sqlx::query( - r#"UPDATE files SET is_deleted = 1 - WHERE account_hash = ? AND file_path = ? AND filename = ? AND group_id = ?"#, + r#"INSERT INTO files + (file_id, account_hash, device_hash, file_path, filename, file_hash, size, + encrypted_data, key_id, unix_permissions, server_group_id, server_watcher_id, + group_id, watcher_id, revision, is_deleted, operation_type, deleted_file_id, + created_time, updated_time) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, FALSE, 'DELETE', ?, FROM_UNIXTIME(?), FROM_UNIXTIME(?))"#, ) + .bind(deletion_record_id) .bind(account_hash) + .bind(&device_hash) .bind(&file_path_bytes) .bind(&filename_bytes) + .bind(&file_hash_bytes) // Copy from original for recovery + .bind(size as i64) // Copy from original for recovery + .bind(encrypted_data) // Copy from original for recovery + .bind(key_id) // Copy from original for recovery + .bind(unix_permissions) + .bind(server_group_id) + .bind(server_watcher_id) .bind(group_id) + .bind(watcher_id) + .bind(new_revision) + .bind(file_id) // Reference to original file_id + .bind(now) + .bind(now) .execute(&mut *tx) .await - .map_err(|e| { + .map_err(|e| StorageError::Database(format!("Deletion history creation failed: {}", e)))?; + + // Commit transaction + tx.commit().await.map_err(|e| { StorageError::Database(format!( - "Previous version file deletion marking failed (schema/VARBINARY type mismatch): {}", + "Transaction commit failed (schema/database error): {}", e )) })?; - // 3. Adding deletion history - debug!( - "Adding deletion history: file_path={}, filename={}", - file_path_for_log, filename_for_log + info!( + "File deletion complete: file_id={}, new_revision={}, deletion_history_file_id={}", + file_id, new_revision, deletion_record_id + ); + Ok((deletion_record_id, new_revision)) + } + + /// Restore deleted file from deletion record + async fn restore_file(&self, account_hash: &str, delete_record_id: u64) -> Result { + info!( + "Restoring file: account_hash={}, delete_record_id={}", + account_hash, delete_record_id + ); + + let mut tx = self + .get_sqlx_pool() + .begin() + .await + .map_err(|e| StorageError::Database(format!("Transaction start failed: {}", e)))?; + + // 1. Query deletion record (operation_type='DELETE') + use sqlx::Row; + let delete_row_opt = sqlx::query( + r#"SELECT file_id, file_path, filename, file_hash, size, encrypted_data, key_id, + device_hash, group_id, watcher_id, server_group_id, server_watcher_id, + unix_permissions, revision, deleted_file_id, operation_type + FROM files + WHERE file_id = ? AND account_hash = ? AND is_deleted = FALSE"#, + ) + .bind(delete_record_id) + .bind(account_hash) + .fetch_optional(&mut *tx) + .await + .map_err(|e| StorageError::Database(format!("Deletion record query failed: {}", e)))?; + + let delete_row = delete_row_opt.ok_or_else(|| { + StorageError::NotFound(format!("Deletion record not found: {}", delete_record_id)) + })?; + + // Validate this is a DELETE record + let operation_type: String = delete_row.try_get("operation_type").unwrap_or_default(); + if operation_type != "DELETE" { + return Err(StorageError::Database(format!( + "Record {} is not a deletion record (operation_type={})", + delete_record_id, operation_type + ))); + } + + // Extract all fields from deletion record + let file_path_bytes: Vec = delete_row.try_get("file_path").unwrap_or_default(); + let filename_bytes: Vec = delete_row.try_get("filename").unwrap_or_default(); + let file_hash_bytes: Vec = delete_row.try_get("file_hash").unwrap_or_default(); + let size: u64 = delete_row.try_get::("size").unwrap_or(0) as u64; + let encrypted_data: Option> = delete_row.try_get("encrypted_data").ok(); + let key_id: Option = delete_row.try_get("key_id").ok(); + let device_hash: String = delete_row.try_get("device_hash").unwrap_or_default(); + let group_id: i32 = delete_row.try_get("group_id").unwrap_or(0); + let watcher_id: i32 = delete_row.try_get("watcher_id").unwrap_or(0); + let server_group_id: i32 = delete_row.try_get("server_group_id").unwrap_or(0); + let server_watcher_id: i32 = delete_row.try_get("server_watcher_id").unwrap_or(0); + let unix_permissions: Option = delete_row.try_get("unix_permissions").ok(); + let delete_revision: i64 = delete_row.try_get("revision").unwrap_or(0); + + let new_revision = delete_revision + 1; + + // Decrypt for logging + let file_path_for_log = + self.decrypt_text(account_hash, group_id, watcher_id, file_path_bytes.clone()); + let filename_for_log = + self.decrypt_text(account_hash, group_id, watcher_id, filename_bytes.clone()); + + info!( + "Restoring file: path={}, filename={}, revision={}", + file_path_for_log, filename_for_log, new_revision ); - // Generate new file_id (random value) - let new_file_id = rand::random::(); + let now = Utc::now().timestamp(); + + // 2. Mark deletion record as deleted (is_deleted=TRUE) + sqlx::query(r#"UPDATE files SET is_deleted = TRUE WHERE file_id = ?"#) + .bind(delete_record_id) + .execute(&mut *tx) + .await + .map_err(|e| { + StorageError::Database(format!("Deletion record marking failed: {}", e)) + })?; + + // 3. Create restored file record (operation_type='RESTORE') + let restored_file_id = rand::random::(); - // INSERT with explicitly specified file_id field - // Use Vec for VARBINARY columns to avoid type mismatch sqlx::query( r#"INSERT INTO files - (file_id, account_hash, device_hash, file_path, filename, file_hash, size, unix_permissions, server_group_id, server_watcher_id) - VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)"#, + (file_id, account_hash, device_hash, file_path, filename, file_hash, size, + encrypted_data, key_id, unix_permissions, server_group_id, server_watcher_id, + group_id, watcher_id, revision, is_deleted, operation_type, + created_time, updated_time) + VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, FALSE, 'RESTORE', FROM_UNIXTIME(?), FROM_UNIXTIME(?))"#, ) - .bind(new_file_id) + .bind(restored_file_id) .bind(account_hash) .bind(&device_hash) .bind(&file_path_bytes) .bind(&filename_bytes) - .bind(&file_path_bytes) // file_hash uses file_path_bytes - .bind(0i64) - .bind(None::) + .bind(&file_hash_bytes) + .bind(size as i64) + .bind(encrypted_data.clone()) + .bind(key_id.clone()) + .bind(unix_permissions) .bind(server_group_id) .bind(server_watcher_id) - .execute(&mut *tx) - .await - .map_err(|e| StorageError::Database(format!("Deletion history addition failed (step 1, schema/VARBINARY type mismatch): {}", e)))?; - - // Update remaining fields - sqlx::query( - r#"UPDATE files SET - is_deleted = 1, - revision = ?, - created_time = FROM_UNIXTIME(?), - updated_time = FROM_UNIXTIME(?), - group_id = ?, - watcher_id = ? - WHERE file_id = ?"#, - ) + .bind(group_id) + .bind(watcher_id) .bind(new_revision) .bind(now) .bind(now) - .bind(group_id) - .bind(watcher_id) - .bind(new_file_id) .execute(&mut *tx) .await - .map_err(|e| { - StorageError::Database(format!( - "Deletion history addition failed (step 2, schema/database error): {}", - e - )) - })?; + .map_err(|e| StorageError::Database(format!("Restored file creation failed: {}", e)))?; // Commit transaction - tx.commit().await.map_err(|e| { - StorageError::Database(format!( - "Transaction commit failed (schema/database error): {}", - e - )) - })?; + tx.commit() + .await + .map_err(|e| StorageError::Database(format!("Transaction commit failed: {}", e)))?; info!( - "File deletion complete: file_id={}, new_revision={}, deletion_history_file_id={}", - file_id, new_revision, new_file_id + "File restore complete: delete_record_id={}, restored_file_id={}, revision={}", + delete_record_id, restored_file_id, new_revision ); - Ok(()) + + // Return FileInfo for the restored file + let file_path_str = self.decrypt_text(account_hash, group_id, watcher_id, file_path_bytes); + let filename_str = self.decrypt_text(account_hash, group_id, watcher_id, filename_bytes); + + Ok(FileInfo { + file_id: restored_file_id, + filename: filename_str, + file_hash: hex::encode(&file_hash_bytes), + device_hash, + group_id, + watcher_id, + is_encrypted: encrypted_data.is_some(), + file_path: file_path_str, + updated_time: prost_types::Timestamp { + seconds: now, + nanos: 0, + }, + revision: new_revision, + account_hash: account_hash.to_string(), + size, + key_id, + unix_permissions, + }) } /// List files @@ -1166,6 +1309,9 @@ impl MySqlFileExt for MySqlStorage { filename: &str, ) -> Result> { use sqlx::Row; + info!("🔍 [v2025.11.19-FIXED] find_file_by_criteria called"); + info!("🔍 [QUERY] Using: WHERE is_deleted = FALSE"); + info!("🔍 [QUERY] Sorting: ORDER BY updated_time DESC, revision DESC, id DESC"); debug!("🔍 find_file_by_criteria called:"); debug!(" account_hash: {}", account_hash); debug!(" group_id: {}", group_id); @@ -1219,13 +1365,15 @@ impl MySqlFileExt for MySqlStorage { group_id, watcher_id, revision, size, key_id, unix_permissions, is_deleted FROM files - WHERE account_hash = ? AND server_group_id = ? AND server_watcher_id = ? AND is_deleted = FALSE + WHERE account_hash = ? AND server_group_id = ? AND server_watcher_id = ? + AND is_deleted = FALSE + AND operation_type NOT IN ('DELETE') AND ( (file_path = ? AND filename = ?) OR (file_path = ? AND filename = ?) ) ORDER BY updated_time DESC, revision DESC, id DESC - LIMIT 1"# + LIMIT 1"#, ) .bind(account_hash) .bind(group_id) @@ -1236,7 +1384,16 @@ impl MySqlFileExt for MySqlStorage { .bind(filename) .fetch_optional(self.get_sqlx_pool()) .await - .map_err(|e| { error!("❌ File search query execution failed(sqlx): {}", e); StorageError::Database(format!("File search query execution failed: {}", e)) })?; + .map_err(|e| { + error!("❌ File search query execution failed(sqlx): {}", e); + StorageError::Database(format!("File search query execution failed: {}", e)) + })?; + + info!("🔍 [QUERY EXECUTED] is_deleted=FALSE filter applied"); + info!( + "🔍 [QUERY RESULT] Row count: {}", + if row.is_some() { "1" } else { "0" } + ); if let Some(row) = row { // Extract is_deleted first for validation @@ -1254,6 +1411,7 @@ impl MySqlFileExt for MySqlStorage { // Critical validation: Double-check is_deleted if is_deleted { error!("❌ CRITICAL BUG: Query returned deleted file!"); + error!("❌ [v2025.11.19] This should NEVER happen with new query!"); error!( " file_id={}, revision={}, is_deleted={}", file_id, revision, is_deleted @@ -1263,6 +1421,10 @@ impl MySqlFileExt for MySqlStorage { return Ok(None); } + info!( + "✅ [VALIDATION PASSED] file_id={}, revision={}, is_deleted=false", + file_id, revision + ); debug!("✅ Verified file is not deleted: file_id={}", file_id); // Extract remaining fields from Row object diff --git a/src/utils/response.rs b/src/utils/response.rs index 9503cf4..763de86 100644 --- a/src/utils/response.rs +++ b/src/utils/response.rs @@ -44,14 +44,22 @@ pub fn file_delete_error(message: impl Into) -> DeleteFileResponse { DeleteFileResponse { success: false, return_message: message.into(), + delete_record_id: 0, // No deletion record for errors + new_revision: 0, // No revision for errors } } /// Create success response for file deletion -pub fn file_delete_success(message: impl Into) -> DeleteFileResponse { +pub fn file_delete_success( + message: impl Into, + delete_record_id: u64, + new_revision: i64, +) -> DeleteFileResponse { DeleteFileResponse { success: true, return_message: message.into(), + delete_record_id, + new_revision, } }