Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
47 changes: 47 additions & 0 deletions .env.cloud_test
Original file line number Diff line number Diff line change
@@ -0,0 +1,47 @@
# Temporary cloud DB configuration for migration testing
DB_USER="genesis76"
DB_PASS="VC1OGzY4ukM5DaCrVZJb6832XWYc08U072k4yp0bZ"
DB_NAME="cosmic_sync"
DB_HOST="127.0.0.1"
DB_PORT=63306
DB_POOL=5

SERVER_HOST=0.0.0.0
SERVER_PORT=50051
WORKER_THREADS=4
HEARTBEAT_INTERVAL_SECS=30
AUTH_TOKEN_EXPIRY_HOURS=24

OAUTH_CLIENT_ID=cosmic-sync
OAUTH_CLIENT_SECRET=cosmicsecretsocmicsecret
OAUTH_REDIRECT_URI=http://10.241.62.167:8080/oauth/callback
OAUTH_AUTH_URL=http://10.241.62.167:4000/oauth/authorize
OAUTH_TOKEN_URL=http://10.241.62.167:4000/oauth/token
OAUTH_USER_INFO_URL=http://10.241.62.167:4000/api/settings
OAUTH_SCOPE=profile:read

MAX_CONCURRENT_REQUESTS=100
MAX_FILE_SIZE=52428800

RUST_LOG=cosmic_sync_server=debug,info
LOG_LEVEL=debug
LOG_TO_FILE=false

STORAGE_TYPE="s3"
AWS_REGION="us-east-2"
S3_BUCKET="cosmic-sync-files"
S3_KEY_PREFIX="files/"
AWS_ACCESS_KEY_ID="minioadmin"
AWS_SECRET_ACCESS_KEY="minioadmin"
S3_ENDPOINT_URL="http://127.0.0.1:9000"
S3_FORCE_PATH_STYLE="true"
S3_TIMEOUT_SECONDS="30"
S3_MAX_RETRIES="3"

COSMIC_SYNC_DEV_MODE="1"
COSMIC_SYNC_TEST_MODE="1"
COSMIC_SYNC_DEBUG_MODE="1"

SERVER_ENCODE_KEY=c3e15e2f727cf777380f23a9f9fa8156c5f4f7f3e697f6dc95a47372e76ac6bf

RABBITMQ_ENABLED=false
1 change: 0 additions & 1 deletion Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,6 @@ RUN rm -f src/main.rs src/lib.rs
COPY src ./src

# Clean and rebuild with actual source
RUN touch src/main.rs src/lib.rs
RUN cargo build --release --bin cosmic-sync-server --features redis-cache --target ${RUST_TARGET}

# Runtime stage - use Ubuntu 24.04 for newer glibc compatibility
Expand Down
9 changes: 9 additions & 0 deletions env_home.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash
# Switch to home network (192.168.50.100)

sed -i 's/10.17.89.63/192.168.50.100/g' .env

mysql -h 127.0.0.1 -P 3306 -u root -precognizer --ssl-mode=DISABLED recognizer_dev -e \
"UPDATE oauth_applications SET redirect_uri = 'http://192.168.50.100:8080/oauth/callback' WHERE redirect_uri LIKE '%/oauth/callback';"

echo "Switched to home network (192.168.50.100)"
9 changes: 9 additions & 0 deletions env_office.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,9 @@
#!/bin/bash
# Switch to office network (10.17.89.63)

sed -i 's/192.168.50.100/10.17.89.63/g' .env

mysql -h 127.0.0.1 -P 3306 -u root -precognizer --ssl-mode=DISABLED recognizer_dev -e \
"UPDATE oauth_applications SET redirect_uri = 'http://10.17.89.63:8080/oauth/callback' WHERE redirect_uri LIKE '%/oauth/callback';"

echo "Switched to office network (10.17.89.63)"
15 changes: 15 additions & 0 deletions migrations/add_key_status_table.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,15 @@
-- Migration: Add key_status table for encryption key invalidation feature
-- Date: 2025-12-12

CREATE TABLE IF NOT EXISTS key_status (
id BIGINT AUTO_INCREMENT PRIMARY KEY,
account_hash VARCHAR(64) NOT NULL,
key_id VARCHAR(32) NOT NULL,
status VARCHAR(20) NOT NULL DEFAULT 'active',
reason VARCHAR(50),
invalidated_at TIMESTAMP NULL,
invalidated_by_device VARCHAR(64),
created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP,
UNIQUE INDEX idx_key_status_account_key (account_hash, key_id),
INDEX idx_key_status_account (account_hash)
) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_unicode_ci;
64 changes: 64 additions & 0 deletions migrations/add_soft_delete.sql
Original file line number Diff line number Diff line change
@@ -0,0 +1,64 @@
-- Soft Delete Migration for Watcher Sync
-- This migration adds is_deleted and deleted_at columns to support soft delete functionality
-- for watchers, watcher_groups, and files tables.

-- ============================================================================
-- 1. watcher_groups 테이블 수정
-- ============================================================================
ALTER TABLE watcher_groups
ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
ADD COLUMN deleted_at TIMESTAMP NULL;

-- ============================================================================
-- 2. watchers 테이블 수정
-- ============================================================================
ALTER TABLE watchers
ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
ADD COLUMN deleted_at TIMESTAMP NULL;

-- ============================================================================
-- 3. files 테이블 수정
-- ============================================================================
ALTER TABLE files
ADD COLUMN is_deleted BOOLEAN NOT NULL DEFAULT FALSE,
ADD COLUMN deleted_at TIMESTAMP NULL;

-- ============================================================================
-- 4. 성능 인덱스 추가 (조회 최적화)
-- ============================================================================

-- watcher_groups: account별 활성 그룹 조회용
CREATE INDEX idx_watcher_groups_account_active
ON watcher_groups(account_hash, is_deleted);

-- watchers: account별 활성 watcher 조회용
CREATE INDEX idx_watchers_account_active
ON watchers(account_hash, is_deleted);

-- watchers: group별 활성 watcher 조회용
CREATE INDEX idx_watchers_group_active
ON watchers(group_id, is_deleted);

-- files: watcher별 활성 파일 조회용
CREATE INDEX idx_files_watcher_active
ON files(server_watcher_id, is_deleted);

-- files: account별 활성 파일 조회용
CREATE INDEX idx_files_account_active
ON files(account_hash, is_deleted);

-- ============================================================================
-- 5. Cleanup job용 인덱스 (삭제 대상 조회 최적화)
-- ============================================================================

-- watcher_groups: 삭제 대상 조회용
CREATE INDEX idx_watcher_groups_deleted_at
ON watcher_groups(deleted_at);

-- watchers: 삭제 대상 조회용
CREATE INDEX idx_watchers_deleted_at
ON watchers(deleted_at);

-- files: 삭제 대상 조회용
CREATE INDEX idx_files_deleted_at
ON files(deleted_at);
49 changes: 48 additions & 1 deletion proto/sync.proto
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,9 @@ service SyncService {
rpc GetUsageStats(GetUsageStatsRequest) returns (GetUsageStatsResponse);
rpc GetQuotaInfo(GetQuotaInfoRequest) returns (GetQuotaInfoResponse);
rpc CheckQuotaStatus(CheckQuotaStatusRequest) returns (CheckQuotaStatusResponse);

// Key invalidation
rpc InvalidateKeyId(InvalidateKeyIdRequest) returns (InvalidateKeyIdResponse);
}

// Service provided by the client daemon
Expand Down Expand Up @@ -458,6 +461,7 @@ message DownloadFileResponse {
uint64 file_size = 9;
string key_id = 10;
optional uint32 unix_permissions = 11;
int32 error_code = 12;
}

// Server-streaming download chunk
Expand Down Expand Up @@ -554,6 +558,7 @@ message RenameFileRequest {
optional string operation_type = 11; // e.g., "RENAME" or "MOVE"
ConflictInfo.ResolutionStrategy conflict_resolution = 12; // How to resolve conflicts (default: MANUAL)
google.protobuf.Timestamp updated_time = 13; // Client's last known update time for timestamp-based conflict resolution
optional bool overwrite = 14; // If true, tombstone conflicting target before rename/move
}

// NEW: Rename file response
Expand Down Expand Up @@ -630,6 +635,9 @@ enum ErrorCode {
// Database Errors (90-99)
DB_ERROR = 90; // Database operation failed
DB_SCHEMA_ERROR = 91; // Database schema mismatch

// Key Errors (100+)
KEY_INVALIDATED = 100; // Encryption key has been invalidated
}

// Individual Watcher management messages
Expand Down Expand Up @@ -856,6 +864,7 @@ message SyncConfigurationRequest {
bool incremental = 6; // Incremental sync vs full sync
bool force_update = 7; // Force update flag
int64 client_timestamp = 8; // Client timestamp
int64 last_sync_timestamp = 9; // Last successful sync timestamp (for Item-level LWW)
}

message SyncConfigurationResponse {
Expand All @@ -869,16 +878,37 @@ message SyncConfigurationResponse {
repeated string conflict_details = 8; // Conflict details
ActionTaken action_taken = 9; // Action taken during synchronization
bool is_new_account = 10; // Whether this is a new account setup
int32 retention_days = 11; // Soft-deleted items retention period (days)
repeated DeletedItemInfo deleted_items = 12; // Items soft-deleted in this sync
repeated WatcherGroupData recently_added = 13; // Items added by other devices (client should merge)
repeated DeletedItemInfo recently_deleted = 14; // Items deleted by other devices (client should remove)
}

// Information about a soft-deleted item
message DeletedItemInfo {
enum ItemType {
GROUP = 0;
WATCHER = 1;
}
ItemType type = 1; // Type of deleted item
int32 group_id = 2; // Deleted group_id (or parent group_id for watcher)
int32 watcher_id = 3; // Deleted watcher_id (0 if type is GROUP)
string title = 4; // Name of the deleted item
int32 affected_files = 5; // Number of files affected by deletion
}

message SyncStats {
int32 groups_updated = 1; // Number of updated groups
int32 groups_created = 2; // Number of created groups
int32 groups_deleted = 3; // Number of deleted groups
int32 groups_deleted = 3; // Number of deleted groups (soft delete)
int32 presets_updated = 4; // Number of updated presets
int64 sync_timestamp = 5; // Synchronization timestamp
int32 total_operations = 6; // Total number of operations
double sync_duration_ms = 7; // Synchronization duration (milliseconds)
int32 watchers_created = 8; // Number of created watchers
int32 watchers_updated = 9; // Number of updated watchers
int32 watchers_deleted = 10; // Number of deleted watchers (soft delete)
int32 files_soft_deleted = 11; // Number of files soft-deleted due to watcher deletion
}

// File history lookup request
Expand Down Expand Up @@ -1144,3 +1174,20 @@ message BatchOperationsResponse {
int32 successful_operations = 5;
int32 failed_operations = 6;
}

// Key invalidation request
message InvalidateKeyIdRequest {
string auth_token = 1;
string account_hash = 2;
string key_id = 3;
string reason = 4; // e.g., "user_rotation", "security_breach"
string device_hash = 5; // Device that initiated invalidation
}

// Key invalidation response
message InvalidateKeyIdResponse {
bool success = 1;
string return_message = 2;
int32 affected_files_count = 3; // Number of files using this key
int32 error_code = 4;
}
5 changes: 0 additions & 5 deletions src/auth/oauth.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,11 +329,6 @@ impl OAuthService {
pub async fn verify_token(&self, token: &str) -> Result<VerificationResult> {
match self.validate_token(token).await {
Ok(account_hash) => {
debug!(
"✅ Token validation successful: account_hash={}",
account_hash
);

// Check if account exists in local DB
match self.storage.get_account_by_hash(&account_hash).await {
Ok(Some(_)) => {
Expand Down
101 changes: 101 additions & 0 deletions src/handlers/admin_handler.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,101 @@
//! Admin API handlers for maintenance operations

use actix_web::{web, HttpResponse, Result as ActixResult};
use serde::Serialize;
use std::sync::Arc;
use tracing::{error, info};

use crate::server::app_state::AppState;
use crate::storage::CleanupStats;

/// Default retention period for soft-deleted items (in days)
const DEFAULT_RETENTION_DAYS: i32 = 30;

/// Response for cleanup operation
#[derive(Debug, Serialize)]
pub struct CleanupResponse {
pub success: bool,
pub message: String,
pub retention_days: i32,
pub stats: CleanupStats,
}

/// Run cleanup job to permanently delete soft-deleted records older than retention period
///
/// This endpoint permanently deletes:
/// - Files (including S3 objects)
/// - Watchers
/// - Watcher groups
///
/// that have been soft-deleted for longer than the retention period (default 30 days).
///
/// # Request
/// POST /admin/cleanup
///
/// # Response
/// ```json
/// {
/// "success": true,
/// "message": "Cleanup completed successfully",
/// "retention_days": 30,
/// "stats": {
/// "files_deleted": 10,
/// "watchers_deleted": 5,
/// "groups_deleted": 2,
/// "s3_objects_deleted": 10,
/// "errors": []
/// }
/// }
/// ```
pub async fn run_cleanup(app_state: web::Data<Arc<AppState>>) -> ActixResult<HttpResponse> {
info!("Admin cleanup job started");

match app_state
.storage
.cleanup_soft_deleted(DEFAULT_RETENTION_DAYS)
.await
{
Ok(stats) => {
info!(
"Cleanup completed: {} files, {} watchers, {} groups deleted",
stats.files_deleted, stats.watchers_deleted, stats.groups_deleted
);

Ok(HttpResponse::Ok().json(CleanupResponse {
success: true,
message: "Cleanup completed successfully".to_string(),
retention_days: DEFAULT_RETENTION_DAYS,
stats,
}))
}
Err(e) => {
error!("Cleanup failed: {}", e);

Ok(HttpResponse::InternalServerError().json(CleanupResponse {
success: false,
message: format!("Cleanup failed: {}", e),
retention_days: DEFAULT_RETENTION_DAYS,
stats: CleanupStats::default(),
}))
}
}
}

/// Get cleanup status and statistics (read-only, no actual cleanup)
///
/// # Request
/// GET /admin/cleanup/status
///
/// # Response
/// Returns information about pending cleanup items
pub async fn cleanup_status(app_state: web::Data<Arc<AppState>>) -> ActixResult<HttpResponse> {
// For now, just return the retention configuration
// In the future, could query counts of items pending cleanup
let _ = app_state; // Will be used when we add pending count queries

Ok(HttpResponse::Ok().json(serde_json::json!({
"retention_days": DEFAULT_RETENTION_DAYS,
"description": "Items soft-deleted more than 30 days ago will be permanently deleted on cleanup",
"trigger_endpoint": "POST /admin/cleanup"
})))
}
Loading