diff --git a/tools/migrations/26-02-10-a--add_monthly_activity_stats_cache.sql b/tools/migrations/26-02-10-a--add_monthly_activity_stats_cache.sql new file mode 100644 index 00000000..2cc0e370 --- /dev/null +++ b/tools/migrations/26-02-10-a--add_monthly_activity_stats_cache.sql @@ -0,0 +1,13 @@ +-- Cache table for monthly activity statistics by type +-- Historical months are cached permanently, current month refreshed periodically + +CREATE TABLE IF NOT EXISTS monthly_activity_stats_cache ( + id INT AUTO_INCREMENT PRIMARY KEY, + `year_month` VARCHAR(7) NOT NULL UNIQUE COMMENT 'Format: YYYY-MM', + `exercise_minutes` INT NOT NULL DEFAULT 0, + `reading_minutes` INT NOT NULL DEFAULT 0, + `browsing_minutes` INT NOT NULL DEFAULT 0, + `audio_minutes` INT NOT NULL DEFAULT 0, + `computed_at` DATETIME NOT NULL, + INDEX idx_year_month (`year_month`) +) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 COLLATE=utf8mb4_bin; diff --git a/zeeguu/api/endpoints/article.py b/zeeguu/api/endpoints/article.py index 43eed856..a96a3593 100644 --- a/zeeguu/api/endpoints/article.py +++ b/zeeguu/api/endpoints/article.py @@ -627,19 +627,50 @@ def clear_article_cache(article_id): ) bookmark_count = len(bookmarks) + bookmark_ids_to_delete = {b.id for b in bookmarks} - # First pass: clear all preferred_bookmark_id references + # Track UserWords that need cleanup + user_words_to_check = {} for bookmark in bookmarks: user_word = bookmark.user_word - if user_word and user_word.preferred_bookmark_id == bookmark.id: - user_word.preferred_bookmark_id = None + if user_word: + user_words_to_check[user_word.id] = user_word + # Clear preferred_bookmark if it's one we're deleting + if user_word.preferred_bookmark_id == bookmark.id: + user_word.preferred_bookmark_id = None + db_session.flush() # Commit the nullifications before deleting - # Second pass: delete bookmarks + # Delete bookmarks for bookmark in bookmarks: db_session.delete(bookmark) + db_session.flush() + + # Handle UserWords that are now orphaned or need new preferred_bookmark + user_words_marked_unfit = 0 + for user_word in user_words_to_check.values(): + # Check for remaining bookmarks not in our delete set + remaining_bookmarks = ( + Bookmark.query + .filter(Bookmark.user_word_id == user_word.id) + .filter(Bookmark.id.notin_(bookmark_ids_to_delete)) + .all() + ) + + if remaining_bookmarks: + # Set a new preferred bookmark if needed + if user_word.preferred_bookmark is None: + user_word.preferred_bookmark = remaining_bookmarks[0] + else: + # No bookmarks left - keep UserWord for history but mark unfit for study + user_word.set_unfit_for_study(db_session) + user_words_marked_unfit += 1 + db_session.commit() + if user_words_marked_unfit > 0: + log(f"[DEV] Marked {user_words_marked_unfit} UserWords as unfit for study (no bookmarks)") + log(f"[DEV] Cleared cache and {bookmark_count} bookmarks for article {article_id}") return json_result({ diff --git a/zeeguu/api/endpoints/bookmarks_and_words.py b/zeeguu/api/endpoints/bookmarks_and_words.py index 903f0498..8d89d96a 100644 --- a/zeeguu/api/endpoints/bookmarks_and_words.py +++ b/zeeguu/api/endpoints/bookmarks_and_words.py @@ -241,7 +241,8 @@ def delete_bookmark(bookmark_id): # in the future we can generate an example for this user word with the help of the robots! user_word.set_unfit_for_study(db_session) else: - # No other bookmarks exist - ALWAYS keep the user_word for historical data + # No other bookmarks exist - keep UserWord for historical data + # but mark as unfit for study (won't appear in exercises) user_word.set_unfit_for_study(db_session) # Delete any ExampleSentenceContext records that reference this bookmark diff --git a/zeeguu/api/endpoints/exercises.py b/zeeguu/api/endpoints/exercises.py index 383da166..a0128924 100644 --- a/zeeguu/api/endpoints/exercises.py +++ b/zeeguu/api/endpoints/exercises.py @@ -309,7 +309,6 @@ def _user_words_as_json_result(user_words): log(f"Failed to get tokenized context for user_word {uw.id}: {e}") dicts = [] - words_to_delete = [] for user_word in user_words: try: @@ -320,28 +319,9 @@ def _user_words_as_json_result(user_words): schedule=schedule, pre_tokenized_context=tokenized_context )) - except ValueError as e: - # This means validate_data_integrity() couldn't repair the issue - # (i.e., UserWord has no bookmarks at all) - log(f"UserWord {user_word.id} failed validation and cannot be repaired: {str(e)}") - words_to_delete.append(user_word) except Exception as e: - # Log any other unexpected errors and skip + # Log unexpected errors and skip (orphaned UserWords are handled gracefully) log(f"Unexpected error processing UserWord {user_word.id}: {str(e)}") continue - # Delete UserWords that couldn't be repaired - if words_to_delete: - for word in words_to_delete: - try: - db.session.delete(word) - log(f"Deleted UserWord {word.id} due to unrepairable data integrity issues") - except: - log(f"Failed to delete UserWord {word.id}") - try: - db.session.commit() - except: - db.session.rollback() - log("Failed to commit UserWord deletions") - return json_result(dicts) diff --git a/zeeguu/api/endpoints/user_stats.py b/zeeguu/api/endpoints/user_stats.py index 5c7eaa76..39c8587f 100644 --- a/zeeguu/api/endpoints/user_stats.py +++ b/zeeguu/api/endpoints/user_stats.py @@ -2215,8 +2215,416 @@ def stats_index():