From: PhiTux Date: Wed, 16 Jul 2025 12:06:11 +0000 (+0200) Subject: migration-progress is visible in frontend X-Git-Url: http://git.99rst.org/?a=commitdiff_plain;h=6e1018edd97dc983338659ae0088165b213ec6cc;p=DailyTxT.git migration-progress is visible in frontend --- diff --git a/backend/go.mod b/backend/go.mod index a202156..9724864 100644 --- a/backend/go.mod +++ b/backend/go.mod @@ -9,5 +9,6 @@ require ( require ( github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.3 // indirect golang.org/x/sys v0.17.0 // indirect ) diff --git a/backend/go.sum b/backend/go.sum index e1cf713..450ba80 100644 --- a/backend/go.sum +++ b/backend/go.sum @@ -2,6 +2,8 @@ github.com/golang-jwt/jwt/v5 v5.2.0 h1:d/ix8ftRUorsN+5eMIlF4T6J8CAt9rch3My2winC1 github.com/golang-jwt/jwt/v5 v5.2.0/go.mod h1:pqrtFR0X4osieyHYxtmOUWsAWrfe1Q5UVIyoH402zdk= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= golang.org/x/crypto v0.19.0 h1:ENy+Az/9Y1vSrlrvBSyna3PITt4tiZLf7sgCjZBX7Wo= golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU= golang.org/x/sys v0.17.0 h1:25cE3gD+tdBA7lp7QfhuV+rJiE9YXTcS3VG1SqssI/Y= diff --git a/backend/handlers/users.go b/backend/handlers/users.go index 480923f..d09239b 100644 --- a/backend/handlers/users.go +++ b/backend/handlers/users.go @@ -5,6 +5,7 @@ import ( "encoding/json" "fmt" "net/http" + "strings" "sync" "time" @@ -154,9 +155,9 @@ func Login(w http.ResponseWriter, r *http.Request) { // Convert from utils.MigrationProgress to handlers.MigrationProgress migrationProgress[req.Username] = MigrationProgress{ Phase: progress.Phase, - CurrentItem: progress.CurrentItem, ProcessedItems: progress.ProcessedItems, TotalItems: progress.TotalItems, + ErrorCount: progress.ErrorCount, } migrationProgressMutex.Unlock() } @@ -172,8 +173,6 @@ func Login(w http.ResponseWriter, r *http.Request) { return } - utils.Logger.Printf("Migration completed for user '%s'", req.Username) - // Mark migration as completed activeMigrationsMutex.Lock() activeMigrations[req.Username] = false @@ -281,7 +280,7 @@ func Register(username string, password string) (bool, error) { continue } - if username_from_file, ok := user["username"].(string); ok && username_from_file == username { + if username_from_file, ok := user["username"].(string); ok && strings.EqualFold(username_from_file, username) { utils.Logger.Printf("Registration failed. Username '%s' already exists", username) return false, fmt.Errorf("username already exists: %d", http.StatusBadRequest) } @@ -583,9 +582,9 @@ func SaveUserSettings(w http.ResponseWriter, r *http.Request) { // MigrationProgress stores the progress of user data migration type MigrationProgress struct { Phase string `json:"phase"` // Current migration phase - CurrentItem string `json:"current_item"` // Current item being migrated ProcessedItems int `json:"processed_items"` // Number of items processed TotalItems int `json:"total_items"` // Total number of items to process + ErrorCount int `json:"error_count"` // Number of errors encountered during migration } // migrationProgress keeps track of migration progress for all users @@ -594,84 +593,38 @@ var migrationProgressMutex sync.Mutex var activeMigrations = make(map[string]bool) var activeMigrationsMutex sync.RWMutex -// CheckMigrationProgress checks the progress of a user migration -func CheckMigrationProgress(w http.ResponseWriter, r *http.Request) { - // Get username from query parameters - username := r.URL.Query().Get("username") - if username == "" { - http.Error(w, "Username is required", http.StatusBadRequest) - return - } - - // Get progress - migrationProgressMutex.Lock() - progress, exists := migrationProgress[username] - migrationProgressMutex.Unlock() - - if !exists { - utils.JSONResponse(w, http.StatusOK, map[string]interface{}{ - "progress": 0, - "status": "not_started", - }) - return - } - - // Return progress - status := "in_progress" - if progress.TotalItems > 0 && progress.ProcessedItems >= progress.TotalItems { - status = "completed" - } - - utils.JSONResponse(w, http.StatusOK, map[string]interface{}{ - "progress": progress, - "status": status, - }) -} - // GetMigrationProgress returns the migration progress for a user func GetMigrationProgress(w http.ResponseWriter, r *http.Request) { - // Parse the request body - var req struct { - Username string `json:"username"` - } - if err := json.NewDecoder(r.Body).Decode(&req); err != nil { - http.Error(w, "Invalid request body", http.StatusBadRequest) + username := r.URL.Query().Get("username") + if username == "" { + utils.Logger.Printf("username: %s", username) + http.Error(w, "Unauthorized", http.StatusUnauthorized) return } // Get migration progress migrationProgressMutex.Lock() - progress, exists := migrationProgress[req.Username] + progress, exists := migrationProgress[username] migrationProgressMutex.Unlock() // Check if migration is actually active activeMigrationsMutex.RLock() - isActive := activeMigrations[req.Username] + isActive := activeMigrations[username] activeMigrationsMutex.RUnlock() if !exists { - utils.JSONResponse(w, http.StatusOK, map[string]interface{}{ + utils.JSONResponse(w, http.StatusOK, map[string]any{ "migration_in_progress": false, - "status": "not_started", + "progress": map[string]string{"phase": "not_started"}, }) return } // Check if migration is completed - migrationCompleted := progress.Phase == "completed" || (progress.ProcessedItems >= progress.TotalItems && progress.TotalItems > 0) + migrationCompleted := progress.Phase == "completed" - // Return progress - status := "in_progress" - if migrationCompleted { - status = "completed" - } else if !isActive { - // If migration is not active but not completed, it might have failed - status = "failed" - } - - utils.JSONResponse(w, http.StatusOK, map[string]interface{}{ + utils.JSONResponse(w, http.StatusOK, map[string]any{ "migration_in_progress": isActive && !migrationCompleted, "progress": progress, - "status": status, }) } diff --git a/backend/main.go b/backend/main.go index 6d166b7..6ab09a3 100644 --- a/backend/main.go +++ b/backend/main.go @@ -32,13 +32,14 @@ func main() { // Register routes mux.HandleFunc("POST /users/login", handlers.Login) + mux.HandleFunc("GET /users/migrationWebSocket", handlers.HandleMigrationWebSocket) + mux.HandleFunc("GET /users/migrationProgress", handlers.GetMigrationProgress) mux.HandleFunc("GET /users/isRegistrationAllowed", handlers.IsRegistrationAllowed) mux.HandleFunc("POST /users/register", handlers.RegisterHandler) mux.HandleFunc("GET /users/logout", handlers.Logout) mux.HandleFunc("GET /users/check", middleware.RequireAuth(handlers.CheckLogin)) mux.HandleFunc("GET /users/getUserSettings", middleware.RequireAuth(handlers.GetUserSettings)) mux.HandleFunc("POST /users/saveUserSettings", middleware.RequireAuth(handlers.SaveUserSettings)) - mux.HandleFunc("POST /users/migrationProgress", handlers.GetMigrationProgress) mux.HandleFunc("POST /logs/saveLog", middleware.RequireAuth(handlers.SaveLog)) mux.HandleFunc("GET /logs/getLog", middleware.RequireAuth(handlers.GetLog)) diff --git a/backend/utils/file_handling.go b/backend/utils/file_handling.go index 5fba82f..098063a 100644 --- a/backend/utils/file_handling.go +++ b/backend/utils/file_handling.go @@ -1,9 +1,7 @@ package utils import ( - "crypto/cipher" "crypto/rand" - "encoding/base64" "encoding/json" "fmt" "io" @@ -12,8 +10,6 @@ import ( "strconv" "strings" "sync" - - "golang.org/x/crypto/chacha20poly1305" ) // Mutexes für Dateizugriffe @@ -457,185 +453,3 @@ func GetMonths(userID int, year string) ([]string, error) { return months, nil } - -// CreateAEAD creates an AEAD cipher for encryption/decryption -func CreateAEAD(key []byte) (cipher.AEAD, error) { - return chacha20poly1305.New(key) -} - -// EncryptText encrypts text using the provided key -func EncryptText(text, key string) (string, error) { - // Decode key - keyBytes, err := base64.URLEncoding.DecodeString(key) - if err != nil { - return "", fmt.Errorf("error decoding key: %v", err) - } - - // Create AEAD cipher - aead, err := chacha20poly1305.New(keyBytes) - if err != nil { - return "", fmt.Errorf("error creating cipher: %v", err) - } - - // Create nonce - nonce := make([]byte, aead.NonceSize()) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return "", fmt.Errorf("error creating nonce: %v", err) - } - - // Encrypt text - ciphertext := aead.Seal(nonce, nonce, []byte(text), nil) - return base64.URLEncoding.EncodeToString(ciphertext), nil -} - -// DecryptText decrypts text using the provided key -func DecryptText(ciphertext, key string) (string, error) { - // Decode key and ciphertext - keyBytes, err := base64.URLEncoding.DecodeString(key) - if err != nil { - return "", fmt.Errorf("error decoding key: %v", err) - } - - ciphertextBytes, err := base64.URLEncoding.DecodeString(ciphertext) - if err != nil { - return "", fmt.Errorf("error decoding ciphertext: %v", err) - } - - // Create AEAD cipher - aead, err := chacha20poly1305.New(keyBytes) - if err != nil { - return "", fmt.Errorf("error creating cipher: %v", err) - } - - // Extract nonce from ciphertext - if len(ciphertextBytes) < aead.NonceSize() { - return "", fmt.Errorf("ciphertext too short") - } - nonce, ciphertextBytes := ciphertextBytes[:aead.NonceSize()], ciphertextBytes[aead.NonceSize():] - - // Decrypt text - plaintext, err := aead.Open(nil, nonce, ciphertextBytes, nil) - if err != nil { - return "", fmt.Errorf("error decrypting ciphertext: %v", err) - } - - return string(plaintext), nil -} - -// EncryptFile encrypts a file using the provided key -func EncryptFile(data []byte, key string) ([]byte, error) { - // Decode key - keyBytes, err := base64.URLEncoding.DecodeString(key) - if err != nil { - return nil, fmt.Errorf("error decoding key: %v", err) - } - - // Create AEAD cipher - aead, err := chacha20poly1305.New(keyBytes) - if err != nil { - return nil, fmt.Errorf("error creating cipher: %v", err) - } - - // Create nonce - nonce := make([]byte, aead.NonceSize()) - if _, err := io.ReadFull(rand.Reader, nonce); err != nil { - return nil, fmt.Errorf("error creating nonce: %v", err) - } - - // Encrypt file - ciphertext := aead.Seal(nonce, nonce, data, nil) - return ciphertext, nil -} - -// DecryptFile decrypts a file using the provided key -func DecryptFile(ciphertext []byte, key string) ([]byte, error) { - // Decode key - keyBytes, err := base64.URLEncoding.DecodeString(key) - if err != nil { - return nil, fmt.Errorf("error decoding key: %v", err) - } - - // Create AEAD cipher - aead, err := chacha20poly1305.New(keyBytes) - if err != nil { - return nil, fmt.Errorf("error creating cipher: %v", err) - } - - // Extract nonce from ciphertext - if len(ciphertext) < aead.NonceSize() { - return nil, fmt.Errorf("ciphertext too short") - } - nonce, ciphertext := ciphertext[:aead.NonceSize()], ciphertext[aead.NonceSize():] - - // Decrypt file - plaintext, err := aead.Open(nil, nonce, ciphertext, nil) - if err != nil { - return nil, fmt.Errorf("error decrypting ciphertext: %v", err) - } - - return plaintext, nil -} - -// GetEncryptionKey retrieves the encryption key for a specific user -func GetEncryptionKey(userID int, derivedKey string) (string, error) { - // Get users - users, err := GetUsers() - if err != nil { - return "", fmt.Errorf("error retrieving users: %v", err) - } - - // Find user - usersList, ok := users["users"].([]any) - if !ok { - return "", fmt.Errorf("users.json is not in the correct format") - } - - for _, u := range usersList { - user, ok := u.(map[string]any) - if !ok { - continue - } - - if id, ok := user["user_id"].(float64); ok && int(id) == userID { - encEncKey, ok := user["enc_enc_key"].(string) - if !ok { - return "", fmt.Errorf("user data is not in the correct format") - } - - // Decode derived key - derivedKeyBytes, err := base64.StdEncoding.DecodeString(derivedKey) - if err != nil { - return "", fmt.Errorf("error decoding derived key: %v", err) - } - - // Create Fernet cipher - aead, err := CreateAEAD(derivedKeyBytes) - if err != nil { - return "", fmt.Errorf("error creating cipher: %v", err) - } - - // Decode encrypted key - encEncKeyBytes, err := base64.StdEncoding.DecodeString(encEncKey) - if err != nil { - return "", fmt.Errorf("error decoding encrypted key: %v", err) - } - - // Extract nonce from encrypted key - if len(encEncKeyBytes) < aead.NonceSize() { - return "", fmt.Errorf("encrypted key too short") - } - nonce, encKeyBytes := encEncKeyBytes[:aead.NonceSize()], encEncKeyBytes[aead.NonceSize():] - - // Decrypt key - keyBytes, err := aead.Open(nil, nonce, encKeyBytes, nil) - if err != nil { - return "", fmt.Errorf("error decrypting key: %v", err) - } - - // Return base64-encoded key - return base64.URLEncoding.EncodeToString(keyBytes), nil - } - } - - return "", fmt.Errorf("user not found") -} diff --git a/backend/utils/migration.go b/backend/utils/migration.go index 65feb16..51093ad 100644 --- a/backend/utils/migration.go +++ b/backend/utils/migration.go @@ -10,7 +10,6 @@ import ( "encoding/base64" "encoding/json" "fmt" - "net/http" "os" "path/filepath" "strconv" @@ -19,27 +18,26 @@ import ( "time" ) -// Mutexes für Dateizugriffe +// Mutexes for file access var ( - activeMigrationsMutex sync.RWMutex // Für die Map der aktiven Migrationen - oldUsersFileMutex sync.RWMutex // Für old/users.json - templatesMutex sync.RWMutex // Für templates.json - tagsMutex sync.RWMutex // Für tags.json - logsMutex sync.RWMutex // Für Logs - filesMutex sync.RWMutex // Für Dateien im files-Verzeichnis + activeMigrationsMutex sync.RWMutex // For the map of active migrations + oldUsersFileMutex sync.RWMutex // For old/users.json + templatesMutex sync.RWMutex // For templates.json + logsMutex sync.RWMutex // For logs + filesMutex sync.RWMutex // For files in the files directory ) -// Map zur Verfolgung aktiver Migrationen (username -> bool) +// Map to track active migrations (username -> bool) var activeMigrations = make(map[string]bool) -// IsUserMigrating prüft, ob für einen Benutzer bereits eine Migration läuft +// IsUserMigrating checks if a migration is already in progress for a user func IsUserMigrating(username string) bool { activeMigrationsMutex.RLock() defer activeMigrationsMutex.RUnlock() return activeMigrations[username] } -// SetUserMigrating markiert einen Benutzer als migrierend oder nicht migrierend +// SetUserMigrating marks a user as migrating or not migrating func SetUserMigrating(username string, migrating bool) { activeMigrationsMutex.Lock() defer activeMigrationsMutex.Unlock() @@ -187,19 +185,39 @@ func VerifyOldPassword(password, hash string) bool { // MigrateUserData migrates a user's data from the old format to the new format func MigrateUserData(username, password string, registerFunc RegisterUserFunc, progressChan chan<- MigrationProgress) error { - // Prüfen, ob bereits eine Migration für diesen Benutzer läuft + // Check if a migration is already in progress for this user if IsUserMigrating(username) { Logger.Printf("Migration for user %s is already in progress", username) return fmt.Errorf("migration already in progress for user %s", username) } - // Benutzer als migrierend markieren + // Mark user as migrating SetUserMigrating(username, true) - // Sicherstellen, dass der Benutzer am Ende nicht mehr als migrierend markiert ist + // Ensure the user is no longer marked as migrating when this function ends defer SetUserMigrating(username, false) + // Initialize migration progress + currentProgress := MigrationProgress{ + Phase: "creating_new_user", + ProcessedItems: 0, + ErrorCount: 0, + } + + if progressChan != nil { + progressChan <- currentProgress // Send initial progress + } + + // Error handling function for consistent error handling + handleError := func(errMsg string, err error) error { + errorMessage := fmt.Sprintf("%s: %v", errMsg, err) + Logger.Printf("Migration error for user %s: %s", username, errorMessage) + + // Send final update with Success=false + return fmt.Errorf("%s: %v", errMsg, err) + } + start := time.Now() - Logger.Printf("Starting migration for user %s with password %s", username, password) + Logger.Printf("Starting migration for user %s", username) // Get old users oldUsersFileMutex.RLock() @@ -208,13 +226,13 @@ func MigrateUserData(username, password string, registerFunc RegisterUserFunc, p oldUsersFileMutex.RUnlock() if err != nil { - return fmt.Errorf("error reading old users: %v", err) + return handleError("Error reading old users", err) } // Parse old users var oldUsers map[string]any if err := json.Unmarshal(oldUsersBytes, &oldUsers); err != nil { - return fmt.Errorf("error parsing old users: %v", err) + return handleError("Error parsing old users", err) } // Find the old user by username @@ -229,75 +247,62 @@ func MigrateUserData(username, password string, registerFunc RegisterUserFunc, p } if oldUser == nil { - return fmt.Errorf("user %s not found in old data", username) + return handleError(fmt.Sprintf("User %s not found in old data", username), nil) } oldUserID = int(oldUser["user_id"].(float64)) - Logger.Printf("Found old user ID: %d", oldUserID) - // Update progress - if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "creating_new_user", - CurrentItem: "", - ProcessedItems: 1, - TotalItems: 5, - } - } - // Verify username matches oldUsername, ok := oldUser["username"].(string) if !ok || oldUsername != username { - return fmt.Errorf("username mismatch: expected %s, got %s", username, oldUsername) + return handleError(fmt.Sprintf("Username mismatch: expected %s, got %s", username, oldUsername), nil) } // Get encryption related data from old user oldSalt, ok := oldUser["salt"].(string) if !ok { - return fmt.Errorf("old user data is missing salt") + return handleError("Old user data is missing salt", nil) } oldEncEncKey, ok := oldUser["enc_enc_key"].(string) if !ok { - return fmt.Errorf("old user data is missing encrypted key") + return handleError("Old user data is missing encrypted key", nil) } // Derive key from password and salt oldDerivedKey := DeriveKeyFromOldPassword(password, oldSalt) _, err = base64.StdEncoding.DecodeString(base64.URLEncoding.EncodeToString(oldDerivedKey)) if err != nil { - return fmt.Errorf("error decoding old derived key: %v", err) + return handleError("Error decoding old derived key", err) } // Decode the old encrypted key (just for validation) _, err = base64.URLEncoding.DecodeString(oldEncEncKey) if err != nil { - return fmt.Errorf("error decoding old encrypted key: %v", err) + return handleError("Error decoding old encrypted key", err) } // Decrypt the old encryption key oldEncKey, err := FernetDecrypt(oldEncEncKey, oldDerivedKey) if err != nil { - return fmt.Errorf("error decrypting old encryption key: %v", err) + return handleError("Error decrypting old encryption key", err) } - // Debug: Zeige den Schlüssel - fmt.Printf("Old encryption key: %s\n", oldEncKey) - - // Registriere den Benutzer mit der übergebenen Funktion + // Register the user with the provided function success, err := registerFunc(username, password) if err != nil { - return fmt.Errorf("error registering new user: %v", err) + return handleError("Error registering new user", err) } if !success { - return fmt.Errorf("failed to register new user") + return handleError("Failed to register new user", nil) } users, err := GetUsers() if err != nil { - return fmt.Errorf("error getting users: %v", err) + return handleError("Error getting users", err) } + // Find the new user ID newUserID := 0 newDerivedKey := "" @@ -309,14 +314,13 @@ func MigrateUserData(username, password string, registerFunc RegisterUserFunc, p // Verify password if !VerifyPassword(password, u["password"].(string), u["salt"].(string)) { - Logger.Printf("Login failed. Password for user '%s' is incorrect", username) - return fmt.Errorf("user/Password combination not found: %d", http.StatusNotFound) + return handleError(fmt.Sprintf("Login failed. Password for user '%s' is incorrect", username), nil) } // Get intermediate key derivedKey, err := DeriveKeyFromPassword(password, u["salt"].(string)) if err != nil { - return fmt.Errorf("internal Server Error: %d", http.StatusInternalServerError) + return handleError("Internal Server Error", err) } newDerivedKey = base64.StdEncoding.EncodeToString(derivedKey) @@ -325,18 +329,7 @@ func MigrateUserData(username, password string, registerFunc RegisterUserFunc, p } } if newUserID <= 0 { - return fmt.Errorf("new user ID not found for username: %s", username) - } - - fmt.Printf("New derived key: %s\n", newDerivedKey) - - // Update progress - if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "writing_user_data", - ProcessedItems: 3, - TotalItems: 5, - } + return handleError(fmt.Sprintf("New user ID not found for username: %s", username), nil) } // Now migrate all the data @@ -345,38 +338,40 @@ func MigrateUserData(username, password string, registerFunc RegisterUserFunc, p // Create new data directory if err := os.MkdirAll(newDataDir, 0755); err != nil { - return fmt.Errorf("error creating new data directory: %v", err) + return handleError("Error creating new data directory", err) } encKey, err := GetEncryptionKey(newUserID, string(newDerivedKey)) if err != nil { - return fmt.Errorf("error getting encryption key: %v", err) + return handleError("Error getting encryption key", err) } - fmt.Printf("New encryption key: %s\n", encKey) + time.Sleep(800 * time.Millisecond) // Simulate some delay for migration // Migrate templates - if err := migrateTemplates(oldDataDir, newDataDir, oldEncKey, encKey, progressChan); err != nil { - return fmt.Errorf("error migrating templates: %v", err) + if err := migrateTemplates(oldDataDir, newDataDir, oldEncKey, encKey, ¤tProgress, progressChan); err != nil { + return handleError("Error migrating templates", err) } + time.Sleep(800 * time.Millisecond) // Simulate some delay for migration + // Migrate logs (years/months) - if err := migrateLogs(oldDataDir, newDataDir, oldEncKey, encKey, progressChan); err != nil { - return fmt.Errorf("error migrating logs: %v", err) + if err := migrateLogs(oldDataDir, newDataDir, oldEncKey, encKey, ¤tProgress, progressChan); err != nil { + return handleError("Error migrating logs", err) } // Migrate files - if err := migrateFiles(filepath.Join(Settings.DataPath, "old", "files"), newDataDir, oldEncKey, encKey, progressChan); err != nil { - return fmt.Errorf("error migrating files: %v", err) + if err := migrateFiles(filepath.Join(Settings.DataPath, "old", "files"), newDataDir, oldEncKey, encKey, ¤tProgress, progressChan); err != nil { + return handleError("Error migrating files", err) } // Set final progress + currentProgress.Phase = "completed" + currentProgress.ProcessedItems = 0 + currentProgress.TotalItems = 0 + if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "completed", - ProcessedItems: 5, - TotalItems: 5, - } + progressChan <- currentProgress // Send final progress update } Logger.Printf("Migration completed for user %s (Old ID: %d, New ID: %d) after %v", username, oldUserID, newUserID, time.Since(start)) @@ -398,9 +393,9 @@ func DeriveKeyFromOldPassword(password, salt string) []byte { // MigrationProgress contains information about the migration progress type MigrationProgress struct { Phase string `json:"phase"` // Current migration phase - CurrentItem string `json:"current_item"` // Current item being migrated ProcessedItems int `json:"processed_items"` // Number of already processed items TotalItems int `json:"total_items"` // Total number of items to migrate + ErrorCount int `json:"error_count"` // Number of errors encountered during migration } // RegisterUserFunc is a function type for user registration @@ -408,7 +403,7 @@ type RegisterUserFunc func(username, password string) (bool, error) // Helper functions for migration -func migrateTemplates(oldDir, newDir string, oldKey string, newKey string, progressChan chan<- MigrationProgress) error { +func migrateTemplates(oldDir, newDir string, oldKey string, newKey string, progress *MigrationProgress, progressChan chan<- MigrationProgress) error { // Check if old templates exist templatesMutex.RLock() oldTemplatesPath := filepath.Join(oldDir, "templates.json") @@ -425,12 +420,13 @@ func migrateTemplates(oldDir, newDir string, oldKey string, newKey string, progr } // Update progress + progress.Phase = "migrating_templates" + progress.ProcessedItems = 0 + progress.TotalItems = 1 // Just one template file to migrate + + // Send initial progress update if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "migrating_templates", - ProcessedItems: 1, - TotalItems: 2, - } + progressChan <- *progress } // Read old templates @@ -483,10 +479,16 @@ func migrateTemplates(oldDir, newDir string, oldKey string, newKey string, progr file.Close() templatesMutex.Unlock() + // Update progress and send final update + progress.ProcessedItems = 1 + if progressChan != nil { + progressChan <- *progress + } + return nil } -func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressChan chan<- MigrationProgress) error { +func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progress *MigrationProgress, progressChan chan<- MigrationProgress) error { // Count all month files in all year directories var allMonthFiles []struct { yearDir string @@ -538,12 +540,13 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh } // Update progress with total number of months + progress.Phase = "migrating_logs" + progress.ProcessedItems = 0 + progress.TotalItems = totalMonths + + // Send initial progress update if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "migrating_logs", - ProcessedItems: 0, - TotalItems: totalMonths, - } + progressChan <- *progress } processedMonths := 0 @@ -551,12 +554,13 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh // Process all months for _, monthInfo := range allMonthFiles { - // Update progress with total number of months - if progressChan != nil && processedMonths%5 == 0 { - progressChan <- MigrationProgress{ - Phase: "migrating_logs", - ProcessedItems: processedMonths, - TotalItems: totalMonths, + // Update progress with number of months + progress.ProcessedItems = processedMonths + + // Send progress update every 5 months or at the end + if processedMonths%5 == 0 || processedMonths == totalMonths-1 { + if progressChan != nil { + progressChan <- *progress } } @@ -581,6 +585,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh if err != nil { Logger.Printf("Error reading old month %s: %v", oldMonthPath, err) + progress.ErrorCount++ continue } @@ -588,6 +593,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh var monthData map[string]any if err := json.Unmarshal(oldMonthBytes, &monthData); err != nil { Logger.Printf("Error parsing old month %s: %v", oldMonthPath, err) + progress.ErrorCount++ continue } @@ -595,12 +601,14 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh days, ok := monthData["days"].([]any) if !ok { Logger.Printf("Month %s has unexpected format - missing 'days' array", oldMonthPath) + progress.ErrorCount++ continue } oldKeyBytes, err := base64.URLEncoding.DecodeString(oldKey) if err != nil { Logger.Printf("Error decoding oldKey %v", err) + progress.ErrorCount++ continue } @@ -618,6 +626,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh plaintext, err = FernetDecrypt(encryptedText, oldKeyBytes) if err != nil { Logger.Printf("Error decrypting content for day %f in %s: %v", day["day"].(float64), oldMonthPath, err) + progress.ErrorCount++ continue } } @@ -625,6 +634,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh newEncrypted, err := EncryptText(plaintext, newKey) if err != nil { Logger.Printf("Error encrypting content for day %d in %s: %v", i, oldMonthPath, err) + progress.ErrorCount++ continue } @@ -636,6 +646,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh newEncrypted, err := EncryptText(dateWritten, newKey) if err != nil { Logger.Printf("Error encrypting date_written for day %d in %s: %v", i, oldMonthPath, err) + progress.ErrorCount++ continue } day["date_written"] = newEncrypted @@ -657,6 +668,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh plaintext, err = FernetDecrypt(encryptedText, oldKeyBytes) if err != nil { Logger.Printf("Error decrypting history item %f for day %d in %s: %v", historyItem["version"].(float64), day["day"].(int), oldMonthPath, err) + progress.ErrorCount++ continue } } @@ -665,6 +677,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh newEncrypted, err := EncryptText(plaintext, newKey) if err != nil { Logger.Printf("Error encrypting history item %d for day %d in %s: %v", j, i, oldMonthPath, err) + progress.ErrorCount++ continue } @@ -676,6 +689,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh newEncrypted, err := EncryptText(dateWritten, newKey) if err != nil { Logger.Printf("Error encrypting date_written for history item %d in day %d of %s: %v", j, i, oldMonthPath, err) + progress.ErrorCount++ continue } historyItem["date_written"] = newEncrypted @@ -692,6 +706,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh if err != nil { logsMutex.Unlock() Logger.Printf("Error creating directory for %s: %v", newMonthPath, err) + progress.ErrorCount++ continue } @@ -700,6 +715,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh if err != nil { logsMutex.Unlock() Logger.Printf("Error creating file %s: %v", newMonthPath, err) + progress.ErrorCount++ continue } @@ -716,6 +732,7 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh file.Close() logsMutex.Unlock() Logger.Printf("Error encoding month data for %s: %v", newMonthPath, err) + progress.ErrorCount++ continue } @@ -723,12 +740,19 @@ func migrateLogs(oldDir, newDir string, oldKey string, newKey string, progressCh logsMutex.Unlock() processedMonths++ + time.Sleep(20 * time.Millisecond) // Simulate some delay for migration + } + + // Final progress update + progress.ProcessedItems = processedMonths + if progressChan != nil { + progressChan <- *progress } return nil } -func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, progressChan chan<- MigrationProgress) error { +func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, progress *MigrationProgress, progressChan chan<- MigrationProgress) error { // Check if old files directory exists filesMutex.RLock() _, err := os.Stat(oldFilesDir) @@ -749,6 +773,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog filesMutex.Lock() if err := os.MkdirAll(newFilesDir, 0755); err != nil { filesMutex.Unlock() + progress.ErrorCount++ return fmt.Errorf("error creating new files directory: %v", err) } filesMutex.Unlock() @@ -756,6 +781,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog // Convert oldKey from base64 to []byte for decryption oldKeyBytes, err := base64.URLEncoding.DecodeString(oldKey) if err != nil { + progress.ErrorCount++ return fmt.Errorf("error decoding oldKey: %v", err) } @@ -764,6 +790,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog yearEntries, err := os.ReadDir(newDir) logsMutex.RUnlock() if err != nil { + progress.ErrorCount++ return fmt.Errorf("error reading new user directory: %v", err) } @@ -796,6 +823,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog monthEntries, err := os.ReadDir(yearPath) logsMutex.RUnlock() if err != nil { + progress.ErrorCount++ Logger.Printf("Error reading year directory %s: %v", yearPath, err) continue } @@ -815,6 +843,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog logsMutex.RUnlock() if err != nil { Logger.Printf("Error reading month file %s: %v", monthPath, err) + progress.ErrorCount++ continue } @@ -822,6 +851,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog var monthData map[string]any if err := json.Unmarshal(monthBytes, &monthData); err != nil { Logger.Printf("Error parsing month data %s: %v", monthPath, err) + progress.ErrorCount++ continue } @@ -882,23 +912,16 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog totalFiles := len(fileRefs) Logger.Printf("Found %d files to migrate", totalFiles) - if totalFiles == 0 { - if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "migrating_files", - ProcessedItems: 0, - TotalItems: 0, - } - } - return nil // No files to migrate - } + progress.Phase = "migrating_files" + progress.ProcessedItems = 0 + progress.TotalItems = totalFiles if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "migrating_files", - ProcessedItems: 0, - TotalItems: totalFiles, - } + progressChan <- *progress // Send initial progress update + } + + if totalFiles == 0 { + return nil // No files to migrate } // Second pass: migrate each file @@ -906,13 +929,9 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog fileIDMap := make(map[string]string) // Map original file IDs to new file IDs for i, fileRef := range fileRefs { - // Update progress occasionally - if progressChan != nil && (i%5 == 0 || i == 0) { - progressChan <- MigrationProgress{ - Phase: "migrating_files", - ProcessedItems: processedFiles, - TotalItems: totalFiles, - } + progress.ProcessedItems = processedFiles + if progressChan != nil { + progressChan <- *progress // Send progress update } // Check if we already have a mapping for this file ID @@ -925,6 +944,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog NewUUID, err := GenerateUUID() if err != nil { Logger.Printf("Error generating UUID for file %s: %v", fileRef.OrigUUID, err) + progress.ErrorCount++ continue } @@ -939,13 +959,15 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog filesMutex.RUnlock() if err != nil { Logger.Printf("Error reading old file %s: %v", oldFilePath, err) + progress.ErrorCount++ continue } - // Decrypt file with old key - der Dateiinhalt ist bereits ein Fernet-Token + // Decrypt file with old key - the file content is already a Fernet token plaintext, err := FernetDecrypt(string(oldFileBytes), oldKeyBytes) if err != nil { Logger.Printf("Error decrypting file %s: %v", fileRef.OrigUUID, err) + progress.ErrorCount++ continue } @@ -958,6 +980,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog newEncrypted, err := EncryptFile(plaintextBytes, newKey) if err != nil { Logger.Printf("Error encrypting file %s: %v", fileRef.OrigUUID, err) + progress.ErrorCount++ continue } @@ -968,10 +991,20 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog filesMutex.Unlock() if err != nil { Logger.Printf("Error writing new file %s: %v", newFilePath, err) + progress.ErrorCount++ continue } processedFiles++ + + // Update progress occasionally + if i%5 == 0 || i == len(fileRefs)-1 { + progress.ProcessedItems = processedFiles + if progressChan != nil { + progressChan <- *progress + } + } + time.Sleep(100 * time.Millisecond) // Simulate some delay for migration } // Third pass: update all month files with new file IDs @@ -991,6 +1024,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog logsMutex.RUnlock() if err != nil { Logger.Printf("Error reading month file %s: %v", monthPath, err) + progress.ErrorCount++ continue } @@ -998,6 +1032,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog var monthData map[string]any if err := json.Unmarshal(monthBytes, &monthData); err != nil { Logger.Printf("Error parsing month data %s: %v", monthPath, err) + progress.ErrorCount++ continue } @@ -1035,11 +1070,11 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog // If we have a mapping for this file UUID, update it if newID, exists := fileIDMap[fileUUID]; exists { - // Entferne das alte Format und ersetze es durch das neue Format + // Remove the old format and replace it with the new format delete(file, "id") file["uuid_filename"] = newID - // Finde die korrekte Größe für diese Datei + // Find the correct size for this file var fileSize uint64 for _, ref := range fileRefs { if ref.OrigUUID == fileUUID { @@ -1057,6 +1092,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog plainName, err = FernetDecrypt(encName, oldKeyBytes) if err != nil { Logger.Printf("Error decrypting filename for %s: %v", fileUUID, err) + progress.ErrorCount++ continue } @@ -1065,6 +1101,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog newEncName, err = EncryptText(plainName, newKey) if err != nil { Logger.Printf("Error encrypting filename for %s: %v", fileUUID, err) + progress.ErrorCount++ continue } @@ -1096,6 +1133,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog if err != nil { logsMutex.Unlock() Logger.Printf("Error creating file %s: %v", monthPath, err) + progress.ErrorCount++ continue } @@ -1112,6 +1150,7 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog file.Close() logsMutex.Unlock() Logger.Printf("Error encoding month data for %s: %v", monthPath, err) + progress.ErrorCount++ continue } @@ -1124,12 +1163,9 @@ func migrateFiles(oldFilesDir, newDir string, oldKey string, newKey string, prog } // Final progress update + progress.ProcessedItems = processedFiles if progressChan != nil { - progressChan <- MigrationProgress{ - Phase: "migrating_files", - ProcessedItems: processedFiles, - TotalItems: totalFiles, - } + progressChan <- *progress } Logger.Printf("Completed migrating %d/%d files", processedFiles, totalFiles) diff --git a/backend/utils/security.go b/backend/utils/security.go index ed1ddce..8f72092 100644 --- a/backend/utils/security.go +++ b/backend/utils/security.go @@ -1,6 +1,7 @@ package utils import ( + "crypto/cipher" "crypto/rand" "encoding/base64" "fmt" @@ -10,6 +11,7 @@ import ( "github.com/golang-jwt/jwt/v5" "github.com/google/uuid" "golang.org/x/crypto/argon2" + "golang.org/x/crypto/chacha20poly1305" ) // Claims represents the JWT claims @@ -151,3 +153,185 @@ func GenerateUUID() (string, error) { return encodedUUID, nil } + +// CreateAEAD creates an AEAD cipher for encryption/decryption +func CreateAEAD(key []byte) (cipher.AEAD, error) { + return chacha20poly1305.New(key) +} + +// EncryptText encrypts text using the provided key +func EncryptText(text, key string) (string, error) { + // Decode key + keyBytes, err := base64.URLEncoding.DecodeString(key) + if err != nil { + return "", fmt.Errorf("error decoding key: %v", err) + } + + // Create AEAD cipher + aead, err := chacha20poly1305.New(keyBytes) + if err != nil { + return "", fmt.Errorf("error creating cipher: %v", err) + } + + // Create nonce + nonce := make([]byte, aead.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return "", fmt.Errorf("error creating nonce: %v", err) + } + + // Encrypt text + ciphertext := aead.Seal(nonce, nonce, []byte(text), nil) + return base64.URLEncoding.EncodeToString(ciphertext), nil +} + +// DecryptText decrypts text using the provided key +func DecryptText(ciphertext, key string) (string, error) { + // Decode key and ciphertext + keyBytes, err := base64.URLEncoding.DecodeString(key) + if err != nil { + return "", fmt.Errorf("error decoding key: %v", err) + } + + ciphertextBytes, err := base64.URLEncoding.DecodeString(ciphertext) + if err != nil { + return "", fmt.Errorf("error decoding ciphertext: %v", err) + } + + // Create AEAD cipher + aead, err := chacha20poly1305.New(keyBytes) + if err != nil { + return "", fmt.Errorf("error creating cipher: %v", err) + } + + // Extract nonce from ciphertext + if len(ciphertextBytes) < aead.NonceSize() { + return "", fmt.Errorf("ciphertext too short") + } + nonce, ciphertextBytes := ciphertextBytes[:aead.NonceSize()], ciphertextBytes[aead.NonceSize():] + + // Decrypt text + plaintext, err := aead.Open(nil, nonce, ciphertextBytes, nil) + if err != nil { + return "", fmt.Errorf("error decrypting ciphertext: %v", err) + } + + return string(plaintext), nil +} + +// EncryptFile encrypts a file using the provided key +func EncryptFile(data []byte, key string) ([]byte, error) { + // Decode key + keyBytes, err := base64.URLEncoding.DecodeString(key) + if err != nil { + return nil, fmt.Errorf("error decoding key: %v", err) + } + + // Create AEAD cipher + aead, err := chacha20poly1305.New(keyBytes) + if err != nil { + return nil, fmt.Errorf("error creating cipher: %v", err) + } + + // Create nonce + nonce := make([]byte, aead.NonceSize()) + if _, err := io.ReadFull(rand.Reader, nonce); err != nil { + return nil, fmt.Errorf("error creating nonce: %v", err) + } + + // Encrypt file + ciphertext := aead.Seal(nonce, nonce, data, nil) + return ciphertext, nil +} + +// DecryptFile decrypts a file using the provided key +func DecryptFile(ciphertext []byte, key string) ([]byte, error) { + // Decode key + keyBytes, err := base64.URLEncoding.DecodeString(key) + if err != nil { + return nil, fmt.Errorf("error decoding key: %v", err) + } + + // Create AEAD cipher + aead, err := chacha20poly1305.New(keyBytes) + if err != nil { + return nil, fmt.Errorf("error creating cipher: %v", err) + } + + // Extract nonce from ciphertext + if len(ciphertext) < aead.NonceSize() { + return nil, fmt.Errorf("ciphertext too short") + } + nonce, ciphertext := ciphertext[:aead.NonceSize()], ciphertext[aead.NonceSize():] + + // Decrypt file + plaintext, err := aead.Open(nil, nonce, ciphertext, nil) + if err != nil { + return nil, fmt.Errorf("error decrypting ciphertext: %v", err) + } + + return plaintext, nil +} + +// GetEncryptionKey retrieves the encryption key for a specific user +func GetEncryptionKey(userID int, derivedKey string) (string, error) { + // Get users + users, err := GetUsers() + if err != nil { + return "", fmt.Errorf("error retrieving users: %v", err) + } + + // Find user + usersList, ok := users["users"].([]any) + if !ok { + return "", fmt.Errorf("users.json is not in the correct format") + } + + for _, u := range usersList { + user, ok := u.(map[string]any) + if !ok { + continue + } + + if id, ok := user["user_id"].(float64); ok && int(id) == userID { + encEncKey, ok := user["enc_enc_key"].(string) + if !ok { + return "", fmt.Errorf("user data is not in the correct format") + } + + // Decode derived key + derivedKeyBytes, err := base64.StdEncoding.DecodeString(derivedKey) + if err != nil { + return "", fmt.Errorf("error decoding derived key: %v", err) + } + + // Create Fernet cipher + aead, err := CreateAEAD(derivedKeyBytes) + if err != nil { + return "", fmt.Errorf("error creating cipher: %v", err) + } + + // Decode encrypted key + encEncKeyBytes, err := base64.StdEncoding.DecodeString(encEncKey) + if err != nil { + return "", fmt.Errorf("error decoding encrypted key: %v", err) + } + + // Extract nonce from encrypted key + if len(encEncKeyBytes) < aead.NonceSize() { + return "", fmt.Errorf("encrypted key too short") + } + nonce, encKeyBytes := encEncKeyBytes[:aead.NonceSize()], encEncKeyBytes[aead.NonceSize():] + + // Decrypt key + keyBytes, err := aead.Open(nil, nonce, encKeyBytes, nil) + if err != nil { + return "", fmt.Errorf("error decrypting key: %v", err) + } + + // Return base64-encoded key + return base64.URLEncoding.EncodeToString(keyBytes), nil + } + } + + return "", fmt.Errorf("user not found") +} diff --git a/frontend/src/routes/login/+page.svelte b/frontend/src/routes/login/+page.svelte index c8cd222..1f0b8d9 100644 --- a/frontend/src/routes/login/+page.svelte +++ b/frontend/src/routes/login/+page.svelte @@ -21,6 +21,24 @@ let registration_allowed = $state(true); + let migration_phases = $state([ + 'creating_new_user', + 'migrating_templates', + 'migrating_logs', + 'migrating_files', + 'completed' + ]); + + let migration_phase = $state(''); + let migration_progress_total = $state(0); + let migration_progress = $state(0); + let migration_error_count = $state(0); + + let active_phase = $derived( + // find the current phase in migration_phases + migration_phases.indexOf(migration_phase) + ); + onMount(() => { // if params error=440 or error=401, show toast if (window.location.search.includes('error=440')) { @@ -47,6 +65,43 @@ }); } + function handleMigrationProgress(username) { + // Poll the server for migration progress + const interval = setInterval(() => { + axios + .get(API_URL + '/users/migrationProgress', { params: { username } }) + .then((response) => { + const progress = response.data.progress; + if (progress) { + migration_phase = progress.phase; + migration_progress_total = progress.total_items; + migration_progress = progress.processed_items; + + // Stop polling when migration is complete + if (progress.phase === 'completed') { + console.log('Migration completed successfully'); + is_migrating = false; + migration_error_count = progress.error_count; + clearInterval(interval); + } + } + + if ( + !response.data.migration_in_progress && + !response.data.progress.phase === 'not_started' + ) { + console.log('Migration stopped'); + is_migrating = false; + clearInterval(interval); + } + }) + .catch((error) => { + console.error('Error fetching migration progress:', error); + clearInterval(interval); // Stop polling on error + }); + }, 500); // Poll every 500ms + } + function handleLogin(event) { event.preventDefault(); @@ -64,18 +119,22 @@ } is_logging_in = true; + console.log(API_URL); axios .post(API_URL + '/users/login', { username, password }) .then((response) => { if (response.data.migration_started) { is_migrating = true; + + handleMigrationProgress(response.data.username); } else { localStorage.setItem('user', JSON.stringify(response.data.username)); goto('/write'); } }) .catch((error) => { + console.log(error); if (error.response.status === 404) { show_login_failed = true; } @@ -180,14 +239,112 @@ /> - {#if is_migrating} + {#if is_migrating || migration_phase == 'completed'} {/if} {#if show_login_failed} @@ -203,7 +360,7 @@ {/if}