diff --git a/migrations/create_reset_monthly_quota_procedure.sql b/migrations/create_reset_monthly_quota_procedure.sql new file mode 100644 index 0000000..8309d00 --- /dev/null +++ b/migrations/create_reset_monthly_quota_procedure.sql @@ -0,0 +1,52 @@ +-- Create reset_monthly_quota stored procedure +-- Execute this file manually using: mysql -h 127.0.0.1 -P 63306 -u genesis76 -p cosmic_sync < create_reset_monthly_quota_procedure.sql + +USE cosmic_sync; + +DROP PROCEDURE IF EXISTS reset_monthly_quota; + +DELIMITER $$ + +CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) +BEGIN + DECLARE v_current_month VARCHAR(7); + DECLARE v_next_reset_date DATE; + + SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); + SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); + + -- Reset API call counter + UPDATE usage_storage + SET api_calls_count = 0, + quota_reset_date = v_next_reset_date, + updated_at = NOW() + WHERE account_hash = p_account_hash; + + -- Initialize next month bandwidth record + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, upload_bytes, download_bytes, + upload_count, download_count + ) + SELECT + account_hash, + DATE_FORMAT(v_next_reset_date, '%Y-%m'), + 0, 0, 0, 0 + FROM usage_storage + WHERE account_hash = p_account_hash + ON DUPLICATE KEY UPDATE updated_at = NOW(); + + -- Log quota reset event + INSERT INTO quota_events ( + account_hash, event_type, current_value, limit_value, + severity, message + ) VALUES ( + p_account_hash, 'QUOTA_RESET', 0, 0, + 'INFO', 'Monthly quota has been reset' + ); +END$$ + +DELIMITER ; + +-- Verify procedure was created +SHOW PROCEDURE STATUS WHERE Db = 'cosmic_sync' AND Name = 'reset_monthly_quota'; + diff --git a/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql b/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql new file mode 100644 index 0000000..9bb50b8 --- /dev/null +++ b/migrations/sql/20250118_add_reset_monthly_quota_procedure.sql @@ -0,0 +1,50 @@ +-- Add reset_monthly_quota stored procedure for monthly quota reset +-- This procedure is called by the quota maintenance background task + +DROP PROCEDURE IF EXISTS reset_monthly_quota; + +DELIMITER $$ + +CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) +BEGIN + DECLARE v_current_month VARCHAR(7); + DECLARE v_next_reset_date DATE; + + SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); + SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); + + -- Archive current month data (already in usage_bandwidth_monthly) + -- Just need to reset for next month + + -- Reset API call counter + UPDATE usage_storage + SET api_calls_count = 0, + quota_reset_date = v_next_reset_date, + updated_at = NOW() + WHERE account_hash = p_account_hash; + + -- Initialize next month bandwidth record + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, upload_bytes, download_bytes, + upload_count, download_count + ) + SELECT + account_hash, + DATE_FORMAT(v_next_reset_date, '%Y-%m'), + 0, 0, 0, 0 + FROM usage_storage + WHERE account_hash = p_account_hash + ON DUPLICATE KEY UPDATE updated_at = NOW(); + + -- Log quota reset event + INSERT INTO quota_events ( + account_hash, event_type, current_value, limit_value, + severity, message + ) VALUES ( + p_account_hash, 'QUOTA_RESET', 0, 0, + 'INFO', 'Monthly quota has been reset' + ); +END$$ + +DELIMITER ; + diff --git a/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql b/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql new file mode 100644 index 0000000..0796505 --- /dev/null +++ b/migrations/sql/20250118_add_reset_monthly_quota_procedure_down.sql @@ -0,0 +1,4 @@ +-- Rollback: Remove reset_monthly_quota stored procedure + +DROP PROCEDURE IF EXISTS reset_monthly_quota; + diff --git a/proto/sync.proto b/proto/sync.proto index 1ed0c46..ce12e0d 100755 --- a/proto/sync.proto +++ b/proto/sync.proto @@ -712,6 +712,7 @@ message FindFileResponse { uint64 file_id = 3; int64 revision = 4; FileInfo file_info = 5; // Includes full file information + bool is_deleted = 6; // File deletion status for client-side validation } // Check if file exists (including deleted files) diff --git a/src/handlers/file/delete.rs b/src/handlers/file/delete.rs index 4525817..6e4e469 100644 --- a/src/handlers/file/delete.rs +++ b/src/handlers/file/delete.rs @@ -101,11 +101,41 @@ pub async fn handle_delete_file( ))) } Err(e) => { - error!("File deletion failed: file_id={}, error={}", file_id, e); - Ok(Response::new(response::file_delete_error(format!( - "File deletion failed: {}", - e - )))) + // Determine error category for logging and client classification + let error_msg = format!("{}", e); + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") + || error_msg.contains("permission") + || error_msg.contains("access denied") + || error_msg.contains("type mismatch") + || error_msg.contains("mismatched types"); + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") + || error_msg.contains("File not found") + || error_msg.contains("already deleted") + || error_msg.contains("File data not found"); + + error!( + "Delete operation failed - file_id: {}, path: {}, error: {}, error_type: {:?}, is_permanent: {}", + file_id, + req.file_path, + error_msg, + if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, + is_permanent_error + ); + + // Format error message for client + let client_error_msg = if is_not_found { + format!("File not found: already deleted") + } else if is_permanent_error { + format!("DB schema error: column type mismatch - {}", error_msg) + } else { + format!("File deletion failed: {}", error_msg) + }; + + Ok(Response::new(response::file_delete_error(client_error_msg))) } } } diff --git a/src/handlers/file/find.rs b/src/handlers/file/find.rs index 8c54fc3..eb13123 100644 --- a/src/handlers/file/find.rs +++ b/src/handlers/file/find.rs @@ -22,6 +22,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })); } } @@ -41,6 +42,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })); } Err(e) => { @@ -51,6 +53,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })); } } @@ -101,6 +104,7 @@ pub async fn handle_find_file_by_criteria( file_id: file_info.file_id, revision: file_info.revision, file_info: Some(proto_file_info), + is_deleted: false, // Always false - only non-deleted files reach here })) } Ok(None) => Ok(Response::new(FindFileResponse { @@ -109,6 +113,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })), Err(e) => Ok(Response::new(FindFileResponse { success: false, @@ -116,6 +121,7 @@ pub async fn handle_find_file_by_criteria( file_id: 0, revision: 0, file_info: None, + is_deleted: false, })), } } diff --git a/src/handlers/file/rename.rs b/src/handlers/file/rename.rs index 160593d..9b0f7e9 100644 --- a/src/handlers/file/rename.rs +++ b/src/handlers/file/rename.rs @@ -48,7 +48,7 @@ pub async fn handle_rename_file( Ok(None) => { return Ok(Response::new(RenameFileResponse { success: false, - return_message: "File not found".to_string(), + return_message: "File not found: already deleted".to_string(), new_revision: 0, conflict: Some(ConflictInfo { r#type: ConflictType::FileNotFound as i32, @@ -181,10 +181,43 @@ pub async fn handle_rename_file( })) } Err(e) => { - error!("Failed to rename file: {}", e); + // Determine error category for logging and client classification + let error_msg = format!("{}", e); + let is_permanent_error = error_msg.contains("VARBINARY") + || error_msg.contains("VARCHAR") + || error_msg.contains("schema") + || error_msg.contains("permission") + || error_msg.contains("access denied") + || error_msg.contains("type mismatch") + || error_msg.contains("mismatched types"); + + let is_not_found = error_msg.contains("not found") + || error_msg.contains("No such file") + || error_msg.contains("File not found") + || error_msg.contains("already deleted"); + + error!( + "Rename operation failed - file_id: {}, old_path: {}, new_path: {}, error: {}, error_type: {:?}, is_permanent: {}", + req.file_id, + req.old_file_path, + req.new_file_path, + error_msg, + if is_permanent_error { "permanent" } else if is_not_found { "not_found" } else { "temporary" }, + is_permanent_error + ); + + // Format error message for client + let client_error_msg = if is_not_found { + format!("File not found: already deleted") + } else if is_permanent_error { + format!("DB schema error: column type mismatch - {}", error_msg) + } else { + format!("Failed to rename file: {}", error_msg) + }; + Ok(Response::new(RenameFileResponse { success: false, - return_message: format!("Failed to rename file: {}", e), + return_message: client_error_msg, new_revision: file_info.revision, conflict: None, })) diff --git a/src/storage/mysql.rs b/src/storage/mysql.rs index 02bc245..232332d 100644 --- a/src/storage/mysql.rs +++ b/src/storage/mysql.rs @@ -1047,6 +1047,88 @@ impl MySqlStorage { } } + // Check and create reset_monthly_quota procedure if it doesn't exist + let procedure_exists: bool = sqlx::query_scalar( + r#"SELECT COUNT(*) > 0 FROM information_schema.routines + WHERE routine_schema = DATABASE() + AND routine_name = 'reset_monthly_quota' + AND routine_type = 'PROCEDURE'"#, + ) + .fetch_one(self.get_sqlx_pool()) + .await + .unwrap_or(false); + + if !procedure_exists { + info!("Creating reset_monthly_quota stored procedure"); + + // Create procedure without DELIMITER (sqlx doesn't support it) + // We need to execute the entire procedure definition as a single statement + let procedure_sql = r#" +CREATE PROCEDURE reset_monthly_quota(IN p_account_hash VARCHAR(255)) +BEGIN + DECLARE v_current_month VARCHAR(7); + DECLARE v_next_reset_date DATE; + + SET v_current_month = DATE_FORMAT(CURDATE(), '%Y-%m'); + SET v_next_reset_date = DATE_ADD(LAST_DAY(CURDATE()), INTERVAL 1 DAY); + + UPDATE usage_storage + SET api_calls_count = 0, + quota_reset_date = v_next_reset_date, + updated_at = NOW() + WHERE account_hash = p_account_hash; + + INSERT INTO usage_bandwidth_monthly ( + account_hash, usage_month, upload_bytes, download_bytes, + upload_count, download_count + ) + SELECT + account_hash, + DATE_FORMAT(v_next_reset_date, '%Y-%m'), + 0, 0, 0, 0 + FROM usage_storage + WHERE account_hash = p_account_hash + ON DUPLICATE KEY UPDATE updated_at = NOW(); + + INSERT INTO quota_events ( + account_hash, event_type, current_value, limit_value, + severity, message + ) VALUES ( + p_account_hash, 'QUOTA_RESET', 0, 0, + 'INFO', 'Monthly quota has been reset' + ); +END"#; + + // Use raw query to execute procedure creation + // Note: sqlx may have issues with procedure creation, so we use query_drop + if let Err(e) = sqlx::query(procedure_sql) + .execute(self.get_sqlx_pool()) + .await + { + // If procedure creation fails, try with DROP first + let _ = sqlx::query("DROP PROCEDURE IF EXISTS reset_monthly_quota") + .execute(self.get_sqlx_pool()) + .await; + + if let Err(e2) = sqlx::query(procedure_sql) + .execute(self.get_sqlx_pool()) + .await + { + warn!( + "Failed to create reset_monthly_quota procedure: {}. Error: {}", + e, e2 + ); + // Don't fail the entire migration if procedure creation fails + } else { + info!("✅ reset_monthly_quota procedure created successfully"); + } + } else { + info!("✅ reset_monthly_quota procedure created successfully"); + } + } else { + debug!("reset_monthly_quota procedure already exists"); + } + info!("Database schema migration complete"); Ok(()) } @@ -1577,15 +1659,66 @@ impl Storage for MySqlStorage { device_hash: &str, new_revision: i64, ) -> Result<()> { - let query = "UPDATE files SET file_path = ?, device_hash = ?, revision = ?, updated_at = NOW() WHERE id = ?"; + use sqlx::Row; + + // First, get file information to obtain account_hash, group_id, watcher_id for encryption + let row_opt = sqlx::query( + r#"SELECT account_hash, group_id, watcher_id FROM files WHERE file_id = ? LIMIT 1"#, + ) + .bind(file_id) + .fetch_optional(&self.sqlx_pool) + .await + .map_err(|e| { + StorageError::Database(format!( + "Failed to get file info for rename (schema/database error): {}", + e + )) + })?; + + let (account_hash, group_id, watcher_id) = match row_opt { + Some(row) => { + let account_hash: String = row.try_get("account_hash").unwrap_or_default(); + let group_id: i32 = row.try_get("group_id").unwrap_or(0); + let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); + (account_hash, group_id, watcher_id) + } + None => { + return Err(StorageError::NotFound(format!( + "File not found: {}", + file_id + ))); + } + }; + + // Encrypt new_file_path for VARBINARY storage + let cfg = crate::server::app_state::AppState::get_config(); + let new_file_path_bytes = if let Some(kv) = cfg.server_encode_key.as_ref() { + if kv.len() == 32 { + let key: &[u8; 32] = kv.as_slice().try_into().expect("len checked"); + let aad = format!("{}:{}:{}", account_hash, group_id, watcher_id); + crate::utils::crypto::aead_encrypt(key, new_file_path.as_bytes(), aad.as_bytes()) + } else { + new_file_path.as_bytes().to_vec() + } + } else { + new_file_path.as_bytes().to_vec() + }; + + // Update file_path with encrypted bytes (VARBINARY) + let query = "UPDATE files SET file_path = ?, device_hash = ?, revision = ?, updated_time = NOW() WHERE file_id = ?"; sqlx::query(query) - .bind(new_file_path) + .bind(&new_file_path_bytes) .bind(device_hash) .bind(new_revision) .bind(file_id) .execute(&self.sqlx_pool) .await - .map_err(|e| StorageError::General(format!("Failed to rename file: {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "Failed to rename file (schema/VARBINARY type mismatch): {}", + e + )) + })?; info!( "File renamed in database: file_id={}, new_path={}, new_revision={}", diff --git a/src/storage/mysql_file.rs b/src/storage/mysql_file.rs index fa79dd7..95025ec 100644 --- a/src/storage/mysql_file.rs +++ b/src/storage/mysql_file.rs @@ -648,7 +648,8 @@ impl MySqlFileExt for MySqlStorage { file_id, account_hash, device_hash, file_path, filename, file_hash, UNIX_TIMESTAMP(created_time) as created_ts, UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted FROM files WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE AND revision = ? ORDER BY revision DESC LIMIT 1"#, @@ -667,7 +668,8 @@ impl MySqlFileExt for MySqlStorage { file_id, account_hash, device_hash, file_path, filename, file_hash, UNIX_TIMESTAMP(created_time) as created_ts, UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted FROM files WHERE account_hash = ? AND eq_index = ? AND is_deleted = FALSE ORDER BY revision DESC LIMIT 1"#, @@ -682,7 +684,27 @@ impl MySqlFileExt for MySqlStorage { }; if let Some(row) = row_opt { + // Extract is_deleted first for validation + let is_deleted: bool = row.try_get("is_deleted").unwrap_or(false); let file_id: u64 = row.try_get("file_id").unwrap_or(0); + let revision: i64 = row.try_get("revision").unwrap_or(0); + + debug!("📊 find_file_by_path_and_name query returned: file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted); + + // Critical validation: Double-check is_deleted + if is_deleted { + error!("❌ CRITICAL BUG: find_file_by_path_and_name returned deleted file!"); + error!( + " file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted + ); + error!(" This should NEVER happen - database inconsistency detected"); + return Ok(None); + } + + debug!("✅ Verified file is not deleted: file_id={}", file_id); + let acc_hash: String = row.try_get("account_hash").unwrap_or_default(); let device_hash: String = row.try_get("device_hash").unwrap_or_default(); let file_path_b: Vec = row.try_get("file_path").unwrap_or_default(); @@ -691,7 +713,6 @@ impl MySqlFileExt for MySqlStorage { let updated_ts: Option = row.try_get("updated_ts").unwrap_or(None); let group_id: i32 = row.try_get("group_id").unwrap_or(0); let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); - let revision: i64 = row.try_get("revision").unwrap_or(0); let size: u64 = row.try_get("size").unwrap_or(0); let key_id_opt: Option = row.try_get("key_id").ok(); @@ -737,11 +758,15 @@ impl MySqlFileExt for MySqlStorage { account_hash, file_id ); let mut tx = self.get_sqlx_pool().begin().await.map_err(|e| { - StorageError::Database(format!("Transaction start failed(sqlx): {}", e)) + StorageError::Database(format!( + "Transaction start failed (schema/database error): {}", + e + )) })?; // Check if file exists and belongs to the user - let file_exists: Option<(u64, i64, String, String, String, i32, i32)> = sqlx::query_as( + // Use Vec for VARBINARY columns (file_path, filename) + let row_opt = sqlx::query( r#"SELECT file_id, revision, file_path, filename, device_hash, group_id, watcher_id FROM files WHERE file_id = ? AND account_hash = ?"#, ) @@ -749,9 +774,14 @@ impl MySqlFileExt for MySqlStorage { .bind(account_hash) .fetch_optional(&mut *tx) .await - .map_err(|e| StorageError::Database(format!("File existence check failed(sqlx): {}", e)))?; + .map_err(|e| { + StorageError::Database(format!( + "File existence check failed (schema/database error): {}", + e + )) + })?; - if file_exists.is_none() { + if row_opt.is_none() { debug!( "File to delete does not exist or does not belong to the user: file_id={}, account_hash={}", file_id, account_hash @@ -762,20 +792,25 @@ impl MySqlFileExt for MySqlStorage { ))); } - let (_, current_revision, file_path, filename, device_hash, group_id, watcher_id) = - if let Some(rec) = file_exists { - rec - } else { - return Err(StorageError::NotFound(format!( - "Cannot find file: {}", - file_id - ))); - }; + use sqlx::Row; + let row = row_opt.unwrap(); + let current_revision: i64 = row.try_get("revision").unwrap_or(0); + let file_path_bytes: Vec = row.try_get("file_path").unwrap_or_default(); + let filename_bytes: Vec = row.try_get("filename").unwrap_or_default(); + let device_hash: String = row.try_get("device_hash").unwrap_or_default(); + let group_id: i32 = row.try_get("group_id").unwrap_or(0); + let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); + let new_revision = current_revision + 1; + // Decrypt for logging purposes only + let file_path_for_log = + self.decrypt_text(account_hash, group_id, watcher_id, file_path_bytes.clone()); + let filename_for_log = + self.decrypt_text(account_hash, group_id, watcher_id, filename_bytes.clone()); debug!( "Processing file deletion: file_id={}, file_path={}, filename={}, current_revision={}, new_revision={}", - file_id, file_path, filename, current_revision, new_revision + file_id, file_path_for_log, filename_for_log, current_revision, new_revision ); let now = Utc::now().timestamp(); @@ -787,25 +822,26 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { StorageError::Database(format!( - "Existing file deletion marking failed(sqlx): {}", + "Existing file deletion marking failed (schema/database error): {}", e )) })?; // 2. Update all previous revisions with the same file path and name to is_deleted=1 + // Use Vec for VARBINARY columns to avoid type mismatch sqlx::query( r#"UPDATE files SET is_deleted = 1 WHERE account_hash = ? AND file_path = ? AND filename = ? AND group_id = ?"#, ) .bind(account_hash) - .bind(&file_path) - .bind(&filename) + .bind(&file_path_bytes) + .bind(&filename_bytes) .bind(group_id) .execute(&mut *tx) .await .map_err(|e| { StorageError::Database(format!( - "Previous version file deletion marking failed(sqlx): {}", + "Previous version file deletion marking failed (schema/VARBINARY type mismatch): {}", e )) })?; @@ -813,13 +849,14 @@ impl MySqlFileExt for MySqlStorage { // 3. Adding deletion history debug!( "Adding deletion history: file_path={}, filename={}", - file_path, filename + file_path_for_log, filename_for_log ); // Generate new file_id (random value) let new_file_id = rand::random::(); // INSERT with explicitly specified file_id field + // Use Vec for VARBINARY columns to avoid type mismatch sqlx::query( r#"INSERT INTO files (file_id, account_hash, device_hash, file_path, filename, file_hash, size, unix_permissions) @@ -828,14 +865,14 @@ impl MySqlFileExt for MySqlStorage { .bind(new_file_id) .bind(account_hash) .bind(&device_hash) - .bind(&file_path) - .bind(&filename) - .bind(&file_path) + .bind(&file_path_bytes) + .bind(&filename_bytes) + .bind(&file_path_bytes) // file_hash uses file_path_bytes .bind(0i64) .bind(None::) .execute(&mut *tx) .await - .map_err(|e| StorageError::Database(format!("Deletion history addition failed (step 1, sqlx): {}", e)))?; + .map_err(|e| StorageError::Database(format!("Deletion history addition failed (step 1, schema/VARBINARY type mismatch): {}", e)))?; // Update remaining fields sqlx::query( @@ -858,14 +895,17 @@ impl MySqlFileExt for MySqlStorage { .await .map_err(|e| { StorageError::Database(format!( - "Deletion history addition failed (step 2, sqlx): {}", + "Deletion history addition failed (step 2, schema/database error): {}", e )) })?; // Commit transaction tx.commit().await.map_err(|e| { - StorageError::Database(format!("Transaction commit failed(sqlx): {}", e)) + StorageError::Database(format!( + "Transaction commit failed (schema/database error): {}", + e + )) })?; info!( @@ -1191,7 +1231,8 @@ impl MySqlFileExt for MySqlStorage { file_id, account_hash, device_hash, file_path, filename, file_hash, UNIX_TIMESTAMP(created_time) as created_ts, UNIX_TIMESTAMP(updated_time) as updated_ts, - group_id, watcher_id, revision, size, key_id, unix_permissions + group_id, watcher_id, revision, size, key_id, unix_permissions, + is_deleted FROM files WHERE account_hash = ? AND server_group_id = ? AND server_watcher_id = ? AND is_deleted = FALSE AND ( @@ -1213,9 +1254,32 @@ impl MySqlFileExt for MySqlStorage { .map_err(|e| { error!("❌ File search query execution failed(sqlx): {}", e); StorageError::Database(format!("File search query execution failed: {}", e)) })?; if let Some(row) = row { - debug!("✅ File found!"); - // Extract required fields from Row object + // Extract is_deleted first for validation + let is_deleted: bool = row.try_get("is_deleted").unwrap_or(false); let file_id: u64 = row.try_get("file_id").unwrap_or(0); + let revision: i64 = row.try_get("revision").unwrap_or(0); + + debug!("✅ File found!"); + debug!( + "📊 Query returned: file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted + ); + + // Critical validation: Double-check is_deleted + if is_deleted { + error!("❌ CRITICAL BUG: Query returned deleted file!"); + error!( + " file_id={}, revision={}, is_deleted={}", + file_id, revision, is_deleted + ); + error!(" This should NEVER happen - database inconsistency detected"); + error!(" Query had is_deleted = FALSE filter but returned deleted file"); + return Ok(None); + } + + debug!("✅ Verified file is not deleted: file_id={}", file_id); + + // Extract remaining fields from Row object let acc_hash: String = row.try_get("account_hash").unwrap_or_default(); let device_hash: String = row.try_get("device_hash").unwrap_or_default(); let file_path: String = row.try_get("file_path").unwrap_or_default(); @@ -1224,7 +1288,6 @@ impl MySqlFileExt for MySqlStorage { let updated_ts: Option = row.try_get("updated_ts").unwrap_or(None); let group_id: i32 = row.try_get("group_id").unwrap_or(0); let watcher_id: i32 = row.try_get("watcher_id").unwrap_or(0); - let revision: i64 = row.try_get("revision").unwrap_or(0); let size: u64 = row.try_get("size").unwrap_or(0); let key_id_opt: Option = row.try_get("key_id").ok();