Signed-off-by: mbrucedogs <mbrucedogs@gmail.com>

This commit is contained in:
mbrucedogs 2025-07-30 18:20:10 -05:00
parent 391408e4d4
commit d184724c70
8 changed files with 4552 additions and 139 deletions

View File

@ -15,6 +15,192 @@ from matching import SongMatcher
from report import ReportGenerator
def merge_history_objects(data_dir: str, args) -> None:
"""Merge history objects that match on artist, title, and path, summing their count properties."""
history_path = os.path.join(data_dir, 'history.json')
if not os.path.exists(history_path):
print(f"History file not found: {history_path}")
return
try:
# Load current history
history_items = load_json_file(history_path)
if not history_items:
print("No history items found to merge")
return
print(f"\n🔄 Merging history objects...")
print(f"Processing {len(history_items):,} history entries...")
# Create a dictionary to group items by artist, title, and path
grouped_items = {}
merged_count = 0
total_merged_entries = 0
for item in history_items:
if not isinstance(item, dict):
continue
artist = item.get('artist', '').strip()
title = item.get('title', '').strip()
path = item.get('path', '').strip()
if not artist or not title or not path:
continue
# Create a key for grouping
key = (artist.lower(), title.lower(), path.lower())
if key not in grouped_items:
grouped_items[key] = []
grouped_items[key].append(item)
# Process groups with multiple items
merged_items = []
for key, items in grouped_items.items():
if len(items) == 1:
# Single item, keep as is
merged_items.append(items[0])
else:
# Multiple items, merge them
artist, title, path = key
# Start with the first item as the base
merged_item = items[0].copy()
# Sum the counts (handle both int and string values)
total_count = 0
for item in items:
count_value = item.get('count', 0)
if isinstance(count_value, str):
try:
total_count += int(count_value)
except ValueError:
total_count += 0
else:
total_count += count_value
merged_item['count'] = total_count
# For boolean properties, if any are True, keep True
merged_item['favorite'] = any(item.get('favorite', False) for item in items)
merged_item['disabled'] = any(item.get('disabled', False) for item in items)
# For other properties, keep the first non-empty value
for prop in ['key', 'original_path', 'genre']:
if prop in merged_item and merged_item[prop]:
continue
for item in items[1:]: # Skip first item since we already have it
if item.get(prop):
merged_item[prop] = item[prop]
break
merged_items.append(merged_item)
merged_count += 1
total_merged_entries += len(items)
if args.verbose:
print(f"Merged {len(items)} entries for '{artist} - {title}': total count = {total_count}")
# Save the merged history
if not args.dry_run:
save_json_file(merged_items, history_path)
print(f"✅ Merged {merged_count} groups ({total_merged_entries} total entries → {len(merged_items)} entries)")
print(f"📁 Saved to: {history_path}")
else:
print(f"DRY RUN: Would merge {merged_count} groups ({total_merged_entries} total entries → {len(merged_items)} entries)")
except Exception as e:
print(f"Error merging history objects: {e}")
def process_favorites_and_history(matcher: SongMatcher, all_songs: List[Dict[str, Any]], data_dir: str, args) -> None:
"""Process favorites and history with priority-based logic to select best versions."""
def process_file(file_type: str, file_path: str) -> List[Dict[str, Any]]:
"""Process a single favorites or history file."""
try:
items = load_json_file(file_path)
if not items:
print(f"No {file_type} found in {file_path}")
return []
print(f"\nProcessing {len(items):,} {file_type} entries...")
# Find matching songs for each item
processed_items = []
updated_count = 0
for i, item in enumerate(items):
if not isinstance(item, dict):
print(f"Warning: Skipping invalid {file_type} item at index {i}")
continue
artist = item.get('artist', '')
title = item.get('title', '')
current_path = item.get('path', '')
if not artist or not title:
print(f"Warning: Skipping {file_type} item with missing artist/title at index {i}")
continue
# Find all matching songs for this artist/title
matching_songs = []
for song in all_songs:
if (song.get('artist', '').lower().strip() == artist.lower().strip() and
song.get('title', '').lower().strip() == title.lower().strip()):
matching_songs.append(song)
if not matching_songs:
print(f"Warning: No matching songs found for {artist} - {title}")
processed_items.append(item)
continue
# Use the same priority logic as duplicates
best_song, skip_songs = matcher.select_best_song(matching_songs, artist, title)
if best_song and best_song['path'] != current_path:
# Update the path to the best version
item['path'] = best_song['path']
item['original_path'] = current_path # Keep track of the original
updated_count += 1
if args.verbose:
print(f"Updated {artist} - {title}: {current_path}{best_song['path']}")
processed_items.append(item)
# Save the updated file
if not args.dry_run:
save_json_file(processed_items, file_path)
print(f"✅ Updated {updated_count:,} {file_type} entries with best versions")
print(f"📁 Saved to: {file_path}")
else:
print(f"DRY RUN: Would update {updated_count:,} {file_type} entries")
return processed_items
except Exception as e:
print(f"Error processing {file_type}: {e}")
return []
# Process favorites if requested
if args.process_favorites:
favorites_path = os.path.join(data_dir, 'favorites.json')
if os.path.exists(favorites_path):
process_file('favorites', favorites_path)
else:
print(f"Favorites file not found: {favorites_path}")
# Process history if requested
if args.process_history:
history_path = os.path.join(data_dir, 'history.json')
if os.path.exists(history_path):
process_file('history', history_path)
else:
print(f"History file not found: {history_path}")
def parse_arguments():
"""Parse command line arguments."""
parser = argparse.ArgumentParser(
@ -27,25 +213,31 @@ Examples:
python main.py --config custom_config.json # Use custom config
python main.py --output-dir ./reports # Save reports to custom directory
python main.py --dry-run # Analyze without generating files
python main.py --process-favorites # Process favorites with priority logic (MP4 over MP3)
python main.py --process-history # Process history with priority logic (MP4 over MP4)
python main.py --process-all # Process everything: duplicates, generate reports, AND update favorites/history with priority logic
python main.py --process-all --dry-run # Preview changes without saving
python main.py --merge-history # Merge history objects that match on artist, title, and path
python main.py --merge-history --dry-run # Preview history merging without saving
"""
)
parser.add_argument(
'--config',
default='config/config.json',
help='Path to configuration file (default: config/config.json)'
default='../config/config.json',
help='Path to configuration file (default: ../config/config.json)'
)
parser.add_argument(
'--input',
default='data/allSongs.json',
help='Path to input songs file (default: data/allSongs.json)'
default='../data/allSongs.json',
help='Path to input songs file (default: ../data/allSongs.json)'
)
parser.add_argument(
'--output-dir',
default='data',
help='Directory for output files (default: data)'
default='../data',
help='Directory for output files (default: ../data)'
)
parser.add_argument(
@ -72,6 +264,30 @@ Examples:
help='Show current configuration and exit'
)
parser.add_argument(
'--process-favorites',
action='store_true',
help='Process favorites with priority-based logic to select best versions (MP4 over MP3)'
)
parser.add_argument(
'--process-history',
action='store_true',
help='Process history with priority-based logic to select best versions (MP4 over MP3)'
)
parser.add_argument(
'--process-all',
action='store_true',
help='Process everything: duplicates, generate reports, AND update favorites/history with priority logic'
)
parser.add_argument(
'--merge-history',
action='store_true',
help='Merge history objects that match on artist, title, and path, summing their count properties'
)
return parser.parse_args()
@ -119,137 +335,172 @@ def main():
reporter.print_report("config", config)
return
# Load songs
songs = load_songs(args.input)
# Initialize components
# Load songs (only if needed for processing)
data_dir = args.output_dir
matcher = SongMatcher(config, data_dir)
reporter = ReportGenerator(config)
songs = None
matcher = None
reporter = None
print("\nStarting song analysis...")
print("=" * 60)
if not args.merge_history:
songs = load_songs(args.input)
matcher = SongMatcher(config, data_dir)
reporter = ReportGenerator(config)
# Process songs
try:
best_songs, skip_songs, stats = matcher.process_songs(songs)
# Process favorites and history if requested
if args.process_favorites or args.process_history or args.process_all:
print("\n🎯 Processing favorites and history with priority logic...")
print("=" * 60)
# Generate reports
# If --process-all is used, set both flags
if args.process_all:
args.process_favorites = True
args.process_history = True
process_favorites_and_history(matcher, songs, data_dir, args)
print("\n" + "=" * 60)
reporter.print_report("summary", stats)
print("Favorites/History processing complete!")
# Add channel priority report
if config.get('channel_priorities'):
channel_report = reporter.generate_channel_priority_report(stats, config['channel_priorities'])
print("\n" + channel_report)
# If --process-all, also do the full duplicate analysis and reporting
if args.process_all:
print("\n🔄 Processing duplicates and generating reports...")
print("=" * 60)
else:
return
if config['output']['verbose']:
duplicate_info = matcher.get_detailed_duplicate_info(songs)
reporter.print_report("duplicates", duplicate_info)
# Merge history objects if requested (separate operation)
if args.merge_history:
print("\n🔄 Merging history objects...")
print("=" * 60)
merge_history_objects(data_dir, args)
print("\n" + "=" * 60)
print("History merging complete!")
return
reporter.print_report("skip_summary", skip_songs)
# If not processing favorites/history OR if --process-all, do the full analysis
if not (args.process_favorites or args.process_history) or args.process_all:
print("\nStarting song analysis...")
print("=" * 60)
# Save skip list if not dry run
if not args.dry_run and skip_songs:
skip_list_path = os.path.join(args.output_dir, 'skipSongs.json')
# Process songs
try:
best_songs, skip_songs, stats = matcher.process_songs(songs)
# Create simplified skip list (just paths and reasons) with deduplication
seen_paths = set()
simple_skip_list = []
duplicate_count = 0
# Generate reports
print("\n" + "=" * 60)
reporter.print_report("summary", stats)
for skip_song in skip_songs:
path = skip_song['path']
if path not in seen_paths:
seen_paths.add(path)
skip_entry = {'path': path}
if config['output']['include_reasons']:
skip_entry['reason'] = skip_song['reason']
simple_skip_list.append(skip_entry)
else:
duplicate_count += 1
# Add channel priority report
if config.get('channel_priorities'):
channel_report = reporter.generate_channel_priority_report(stats, config['channel_priorities'])
print("\n" + channel_report)
save_json_file(simple_skip_list, skip_list_path)
print(f"\nSkip list saved to: {skip_list_path}")
print(f"Total songs to skip: {len(simple_skip_list):,}")
if duplicate_count > 0:
print(f"Removed {duplicate_count:,} duplicate entries from skip list")
elif args.dry_run:
print("\nDRY RUN MODE: No skip list generated")
# Always generate detailed reports (not just when --save-reports is used)
if not args.dry_run:
reports_dir = os.path.join(args.output_dir, 'reports')
os.makedirs(reports_dir, exist_ok=True)
print(f"\n📊 Generating enhanced analysis reports...")
# Analyze skip patterns
skip_analysis = reporter.analyze_skip_patterns(skip_songs)
# Analyze channel optimization
channel_analysis = reporter.analyze_channel_optimization(stats, skip_analysis)
# Generate and save enhanced reports
enhanced_summary = reporter.generate_enhanced_summary_report(stats, skip_analysis)
reporter.save_report_to_file(enhanced_summary, os.path.join(reports_dir, 'enhanced_summary_report.txt'))
channel_optimization = reporter.generate_channel_optimization_report(channel_analysis)
reporter.save_report_to_file(channel_optimization, os.path.join(reports_dir, 'channel_optimization_report.txt'))
duplicate_patterns = reporter.generate_duplicate_pattern_report(skip_analysis)
reporter.save_report_to_file(duplicate_patterns, os.path.join(reports_dir, 'duplicate_pattern_report.txt'))
actionable_insights = reporter.generate_actionable_insights_report(stats, skip_analysis, channel_analysis)
reporter.save_report_to_file(actionable_insights, os.path.join(reports_dir, 'actionable_insights_report.txt'))
# Generate detailed duplicate analysis
detailed_duplicates = reporter.generate_detailed_duplicate_analysis(skip_songs, best_songs)
reporter.save_report_to_file(detailed_duplicates, os.path.join(reports_dir, 'detailed_duplicate_analysis.txt'))
# Save original reports for compatibility
summary_report = reporter.generate_summary_report(stats)
reporter.save_report_to_file(summary_report, os.path.join(reports_dir, 'summary_report.txt'))
skip_report = reporter.generate_skip_list_summary(skip_songs)
reporter.save_report_to_file(skip_report, os.path.join(reports_dir, 'skip_list_summary.txt'))
# Save detailed duplicate report if verbose
if config['output']['verbose']:
duplicate_info = matcher.get_detailed_duplicate_info(songs)
duplicate_report = reporter.generate_duplicate_details(duplicate_info)
reporter.save_report_to_file(duplicate_report, os.path.join(reports_dir, 'duplicate_details.txt'))
reporter.print_report("duplicates", duplicate_info)
# Save analysis data as JSON for further processing
analysis_data = {
'stats': stats,
'skip_analysis': skip_analysis,
'channel_analysis': channel_analysis,
'timestamp': __import__('datetime').datetime.now().isoformat()
}
save_json_file(analysis_data, os.path.join(reports_dir, 'analysis_data.json'))
reporter.print_report("skip_summary", skip_songs)
# Save full skip list data (this is what the web UI needs)
save_json_file(skip_songs, os.path.join(reports_dir, 'skip_songs_detailed.json'))
# Save skip list if not dry run
if not args.dry_run and skip_songs:
skip_list_path = os.path.join(args.output_dir, 'skipSongs.json')
print(f"✅ Enhanced reports saved to: {reports_dir}")
print(f"📋 Generated reports:")
print(f" • enhanced_summary_report.txt - Comprehensive analysis")
print(f" • channel_optimization_report.txt - Priority optimization suggestions")
print(f" • duplicate_pattern_report.txt - Duplicate pattern analysis")
print(f" • actionable_insights_report.txt - Recommendations and insights")
print(f" • detailed_duplicate_analysis.txt - Specific songs and their duplicates")
print(f" • analysis_data.json - Raw analysis data for further processing")
print(f" • skip_songs_detailed.json - Web UI data (always generated)")
elif args.dry_run:
print("\nDRY RUN MODE: No reports generated")
# Create simplified skip list (just paths and reasons) with deduplication
seen_paths = set()
simple_skip_list = []
duplicate_count = 0
print("\n" + "=" * 60)
print("Analysis complete!")
for skip_song in skip_songs:
path = skip_song['path']
if path not in seen_paths:
seen_paths.add(path)
skip_entry = {'path': path}
if config['output']['include_reasons']:
skip_entry['reason'] = skip_song['reason']
simple_skip_list.append(skip_entry)
else:
duplicate_count += 1
except Exception as e:
print(f"\nError during processing: {e}")
sys.exit(1)
save_json_file(simple_skip_list, skip_list_path)
print(f"\nSkip list saved to: {skip_list_path}")
print(f"Total songs to skip: {len(simple_skip_list):,}")
if duplicate_count > 0:
print(f"Removed {duplicate_count:,} duplicate entries from skip list")
elif args.dry_run:
print("\nDRY RUN MODE: No skip list generated")
# Always generate detailed reports (not just when --save-reports is used)
if not args.dry_run:
reports_dir = os.path.join(args.output_dir, 'reports')
os.makedirs(reports_dir, exist_ok=True)
print(f"\n📊 Generating enhanced analysis reports...")
# Analyze skip patterns
skip_analysis = reporter.analyze_skip_patterns(skip_songs)
# Analyze channel optimization
channel_analysis = reporter.analyze_channel_optimization(stats, skip_analysis)
# Generate and save enhanced reports
enhanced_summary = reporter.generate_enhanced_summary_report(stats, skip_analysis)
reporter.save_report_to_file(enhanced_summary, os.path.join(reports_dir, 'enhanced_summary_report.txt'))
channel_optimization = reporter.generate_channel_optimization_report(channel_analysis)
reporter.save_report_to_file(channel_optimization, os.path.join(reports_dir, 'channel_optimization_report.txt'))
duplicate_patterns = reporter.generate_duplicate_pattern_report(skip_analysis)
reporter.save_report_to_file(duplicate_patterns, os.path.join(reports_dir, 'duplicate_pattern_report.txt'))
actionable_insights = reporter.generate_actionable_insights_report(stats, skip_analysis, channel_analysis)
reporter.save_report_to_file(actionable_insights, os.path.join(reports_dir, 'actionable_insights_report.txt'))
# Generate detailed duplicate analysis
detailed_duplicates = reporter.generate_detailed_duplicate_analysis(skip_songs, best_songs)
reporter.save_report_to_file(detailed_duplicates, os.path.join(reports_dir, 'detailed_duplicate_analysis.txt'))
# Save original reports for compatibility
summary_report = reporter.generate_summary_report(stats)
reporter.save_report_to_file(summary_report, os.path.join(reports_dir, 'summary_report.txt'))
skip_report = reporter.generate_skip_list_summary(skip_songs)
reporter.save_report_to_file(skip_report, os.path.join(reports_dir, 'skip_list_summary.txt'))
# Save detailed duplicate report if verbose
if config['output']['verbose']:
duplicate_info = matcher.get_detailed_duplicate_info(songs)
duplicate_report = reporter.generate_duplicate_details(duplicate_info)
reporter.save_report_to_file(duplicate_report, os.path.join(reports_dir, 'duplicate_details.txt'))
# Save analysis data as JSON for further processing
analysis_data = {
'stats': stats,
'skip_analysis': skip_analysis,
'channel_analysis': channel_analysis,
'timestamp': __import__('datetime').datetime.now().isoformat()
}
save_json_file(analysis_data, os.path.join(reports_dir, 'analysis_data.json'))
# Save full skip list data (this is what the web UI needs)
save_json_file(skip_songs, os.path.join(reports_dir, 'skip_songs_detailed.json'))
print(f"✅ Enhanced reports saved to: {reports_dir}")
print(f"📋 Generated reports:")
print(f" • enhanced_summary_report.txt - Comprehensive analysis")
print(f" • channel_optimization_report.txt - Priority optimization suggestions")
print(f" • duplicate_pattern_report.txt - Duplicate pattern analysis")
print(f" • actionable_insights_report.txt - Recommendations and insights")
print(f" • detailed_duplicate_analysis.txt - Specific songs and their duplicates")
print(f" • analysis_data.json - Raw analysis data for further processing")
print(f" • skip_songs_detailed.json - Web UI data (always generated)")
elif args.dry_run:
print("\nDRY RUN MODE: No reports generated")
print("\n" + "=" * 60)
print("Analysis complete!")
except Exception as e:
print(f"\nError during processing: {e}")
sys.exit(1)
if __name__ == "__main__":

344
cli/playlist_validator.py Normal file
View File

@ -0,0 +1,344 @@
#!/usr/bin/env python3
"""
Playlist validation module for the Karaoke Song Library Cleanup Tool.
Validates playlist songs against the song library using exact and fuzzy matching.
"""
import json
import os
from typing import Dict, List, Any, Tuple, Optional
from collections import defaultdict
import difflib
try:
from fuzzywuzzy import fuzz
FUZZY_AVAILABLE = True
except ImportError:
FUZZY_AVAILABLE = False
from utils import (
normalize_artist_title,
extract_channel_from_path,
get_file_extension,
parse_multi_artist,
validate_song_data
)
from matching import SongMatcher
class PlaylistValidator:
"""Validates playlist songs against the song library."""
def __init__(self, config: Dict[str, Any], data_dir: str = "../data"):
self.config = config
self.data_dir = data_dir
self.song_matcher = SongMatcher(config, data_dir)
self.fuzzy_threshold = config.get('matching', {}).get('fuzzy_threshold', 0.8)
# Load song library
self.all_songs = self._load_all_songs()
if not self.all_songs:
raise ValueError("Could not load song library from allSongs.json")
# Create lookup dictionaries for faster matching
self._build_lookup_tables()
def _load_all_songs(self) -> List[Dict[str, Any]]:
"""Load the song library from allSongs.json."""
all_songs_path = os.path.join(self.data_dir, 'allSongs.json')
try:
with open(all_songs_path, 'r', encoding='utf-8') as f:
return json.load(f)
except Exception as e:
print(f"Error loading song library: {e}")
return []
def _build_lookup_tables(self):
"""Build lookup tables for faster exact matching."""
self.exact_lookup = {}
self.artist_title_lookup = {}
for song in self.all_songs:
if not validate_song_data(song):
continue
# Handle multi-artist songs
artists = parse_multi_artist(song['artist'])
if not artists:
artists = [song['artist']]
# Create exact match keys
for artist in artists:
normalized_key = normalize_artist_title(artist, song['title'], False)
if normalized_key not in self.exact_lookup:
self.exact_lookup[normalized_key] = []
self.exact_lookup[normalized_key].append(song)
# Also store by artist-title for fuzzy matching
artist_title_key = f"{artist.lower()} - {song['title'].lower()}"
if artist_title_key not in self.artist_title_lookup:
self.artist_title_lookup[artist_title_key] = []
self.artist_title_lookup[artist_title_key].append(song)
def find_exact_match(self, artist: str, title: str) -> Optional[List[Dict[str, Any]]]:
"""Find exact matches for artist/title combination."""
normalized_key = normalize_artist_title(artist, title, False)
return self.exact_lookup.get(normalized_key, [])
def find_fuzzy_matches(self, artist: str, title: str, threshold: float = None) -> List[Tuple[Dict[str, Any], float]]:
"""Find fuzzy matches for artist/title combination."""
if not FUZZY_AVAILABLE:
return []
if threshold is None:
threshold = self.fuzzy_threshold
query = f"{artist.lower()} - {title.lower()}"
matches = []
for key, songs in self.artist_title_lookup.items():
similarity = fuzz.ratio(query, key) / 100.0
if similarity >= threshold:
# Get the best song from this group using existing priority logic
best_song, _ = self.song_matcher.select_best_song(songs, artist, title)
matches.append((best_song, similarity))
# Sort by similarity score (highest first)
matches.sort(key=lambda x: x[1], reverse=True)
return matches
def validate_playlist(self, playlist: Dict[str, Any], dry_run: bool = True) -> Dict[str, Any]:
"""Validate a single playlist against the song library."""
results = {
'playlist_title': playlist.get('title', 'Unknown Playlist'),
'total_songs': len(playlist.get('songs', [])),
'exact_matches': [],
'fuzzy_matches': [],
'missing_songs': [],
'summary': {
'exact_match_count': 0,
'fuzzy_match_count': 0,
'missing_count': 0,
'needs_manual_review': 0
}
}
for song in playlist.get('songs', []):
artist = song.get('artist', '')
title = song.get('title', '')
position = song.get('position', 0)
if not artist or not title:
results['missing_songs'].append({
'position': position,
'artist': artist,
'title': title,
'reason': 'Missing artist or title'
})
results['summary']['missing_count'] += 1
continue
# Try exact match first
exact_matches = self.find_exact_match(artist, title)
if exact_matches:
# Get the best song using existing priority logic
best_song, _ = self.song_matcher.select_best_song(exact_matches, artist, title)
results['exact_matches'].append({
'position': position,
'playlist_artist': artist,
'playlist_title': title,
'found_song': best_song,
'match_type': 'exact'
})
results['summary']['exact_match_count'] += 1
else:
# Try fuzzy matching
fuzzy_matches = self.find_fuzzy_matches(artist, title)
if fuzzy_matches:
best_fuzzy_song, similarity = fuzzy_matches[0]
results['fuzzy_matches'].append({
'position': position,
'playlist_artist': artist,
'playlist_title': title,
'found_song': best_fuzzy_song,
'similarity': similarity,
'match_type': 'fuzzy',
'needs_manual_review': True
})
results['summary']['fuzzy_match_count'] += 1
results['summary']['needs_manual_review'] += 1
else:
results['missing_songs'].append({
'position': position,
'artist': artist,
'title': title,
'reason': 'No matches found'
})
results['summary']['missing_count'] += 1
return results
def validate_all_playlists(self, dry_run: bool = True) -> Dict[str, Any]:
"""Validate all playlists in songLists.json."""
playlists_path = os.path.join(self.data_dir, 'songLists.json')
try:
with open(playlists_path, 'r', encoding='utf-8') as f:
playlists = json.load(f)
except Exception as e:
print(f"Error loading playlists: {e}")
return {}
all_results = {
'total_playlists': len(playlists),
'playlist_results': [],
'overall_summary': {
'total_songs': 0,
'exact_matches': 0,
'fuzzy_matches': 0,
'missing_songs': 0,
'needs_manual_review': 0
}
}
for playlist in playlists:
result = self.validate_playlist(playlist, dry_run)
all_results['playlist_results'].append(result)
# Update overall summary
summary = result['summary']
all_results['overall_summary']['total_songs'] += result['total_songs']
all_results['overall_summary']['exact_matches'] += summary['exact_match_count']
all_results['overall_summary']['fuzzy_matches'] += summary['fuzzy_match_count']
all_results['overall_summary']['missing_songs'] += summary['missing_count']
all_results['overall_summary']['needs_manual_review'] += summary['needs_manual_review']
return all_results
def update_playlist_song(self, playlist_index: int, song_position: int,
new_artist: str, new_title: str, dry_run: bool = True) -> bool:
"""Update a playlist song with corrected artist/title."""
playlists_path = os.path.join(self.data_dir, 'songLists.json')
try:
with open(playlists_path, 'r', encoding='utf-8') as f:
playlists = json.load(f)
except Exception as e:
print(f"Error loading playlists: {e}")
return False
if playlist_index >= len(playlists):
print(f"Invalid playlist index: {playlist_index}")
return False
playlist = playlists[playlist_index]
songs = playlist.get('songs', [])
# Find the song by position
for song in songs:
if song.get('position') == song_position:
if dry_run:
print(f"DRY RUN: Would update playlist '{playlist['title']}' song {song_position}")
print(f" From: {song['artist']} - {song['title']}")
print(f" To: {new_artist} - {new_title}")
else:
song['artist'] = new_artist
song['title'] = new_title
# Save the updated playlists
try:
with open(playlists_path, 'w', encoding='utf-8') as f:
json.dump(playlists, f, indent=2, ensure_ascii=False)
print(f"Updated playlist '{playlist['title']}' song {song_position}")
return True
except Exception as e:
print(f"Error saving playlists: {e}")
return False
break
else:
print(f"Song with position {song_position} not found in playlist")
return False
return True
def main():
"""Main function for CLI usage."""
import argparse
parser = argparse.ArgumentParser(description='Validate playlists against song library')
parser.add_argument('--config', default='../config/config.json', help='Configuration file path')
parser.add_argument('--data-dir', default='../data', help='Data directory path')
parser.add_argument('--dry-run', action='store_true', default=True, help='Dry run mode (default)')
parser.add_argument('--apply', action='store_true', help='Apply changes (disable dry run)')
parser.add_argument('--playlist-index', type=int, help='Validate specific playlist by index')
parser.add_argument('--output', help='Output results to JSON file')
args = parser.parse_args()
# Load configuration
try:
with open(args.config, 'r') as f:
config = json.load(f)
except Exception as e:
print(f"Error loading config: {e}")
return
# Create validator
validator = PlaylistValidator(config, args.data_dir)
# Determine dry run mode
dry_run = not args.apply
if args.playlist_index is not None:
# Validate specific playlist
playlists_path = os.path.join(args.data_dir, 'songLists.json')
try:
with open(playlists_path, 'r', encoding='utf-8') as f:
playlists = json.load(f)
except Exception as e:
print(f"Error loading playlists: {e}")
return
if args.playlist_index >= len(playlists):
print(f"Invalid playlist index: {args.playlist_index}")
return
result = validator.validate_playlist(playlists[args.playlist_index], dry_run)
print(f"\nPlaylist: {result['playlist_title']}")
print(f"Total songs: {result['total_songs']}")
print(f"Exact matches: {result['summary']['exact_match_count']}")
print(f"Fuzzy matches: {result['summary']['fuzzy_match_count']}")
print(f"Missing: {result['summary']['missing_count']}")
print(f"Need manual review: {result['summary']['needs_manual_review']}")
else:
# Validate all playlists
results = validator.validate_all_playlists(dry_run)
print(f"\nPlaylist Validation Results:")
print(f"Total playlists: {results['total_playlists']}")
print(f"Total songs: {results['overall_summary']['total_songs']}")
print(f"Exact matches: {results['overall_summary']['exact_matches']}")
print(f"Fuzzy matches: {results['overall_summary']['fuzzy_matches']}")
print(f"Missing: {results['overall_summary']['missing_songs']}")
print(f"Need manual review: {results['overall_summary']['needs_manual_review']}")
if args.output:
try:
with open(args.output, 'w', encoding='utf-8') as f:
json.dump(results, f, indent=2, ensure_ascii=False)
print(f"\nResults saved to: {args.output}")
except Exception as e:
print(f"Error saving results: {e}")
if __name__ == '__main__':
main()

View File

@ -10,6 +10,11 @@ import os
from typing import Dict, List, Any
from datetime import datetime
# Import playlist validator
import sys
sys.path.append('../cli')
from playlist_validator import PlaylistValidator
app = Flask(__name__)
# Configuration
@ -178,7 +183,7 @@ def normalize_path(file_path: str) -> str:
# Simple fix: replace :// with :\
fixed_path = file_path.replace('://', ':\\')
print(f"DEBUG: Fixed path (simple :// to :\ conversion): {fixed_path}")
print(f"DEBUG: Fixed path (simple :// to :\\ conversion): {fixed_path}")
return fixed_path
# If no :// corruption detected, return the path as-is
@ -196,6 +201,16 @@ def remaining_songs():
"""Page showing remaining songs after cleanup."""
return render_template('remaining_songs.html')
@app.route('/favorites')
def favorites():
"""Page showing favorites with matching songs."""
return render_template('favorites.html')
@app.route('/history')
def history():
"""Page showing history with matching songs."""
return render_template('history.html')
@app.route('/api/duplicates')
def get_duplicates():
"""API endpoint to get duplicate data."""
@ -230,23 +245,59 @@ def get_duplicates():
if channel_filter or file_type_filter:
matches_filter = False
# Check kept version
kept_channel = extract_channel(group['kept_version'])
kept_file_type = get_file_type(group['kept_version'])
if (not channel_filter or channel_filter in kept_channel.lower()) and \
(not file_type_filter or file_type_filter in kept_file_type.lower()):
matches_filter = True
# Special handling for mp3-only filter
if file_type_filter == 'mp3-only':
# Check if kept version is MP3 and there are no MP4 alternatives
kept_file_type = get_file_type(group['kept_version'])
has_mp4_alternative = False
# Check skipped versions if kept version doesn't match
if not matches_filter:
# Check if any skipped version is MP4
for version in group['skipped_versions']:
if (not channel_filter or channel_filter in version['channel'].lower()) and \
(not file_type_filter or file_type_filter in version['file_type'].lower()):
matches_filter = True
if version['file_type'].upper() == 'MP4':
has_mp4_alternative = True
break
if not matches_filter:
continue
# Only show if kept version is MP3 and no MP4 alternatives exist
if kept_file_type.upper() != 'MP3' or has_mp4_alternative:
continue
# Apply channel filter if specified
if channel_filter:
kept_channel = extract_channel(group['kept_version'])
if channel_filter not in kept_channel.lower():
continue
elif file_type_filter == 'mp3':
# Special handling for MP3 filter - show songs where primary is MP3 and has alternatives
kept_file_type = get_file_type(group['kept_version'])
# Only show if kept version is MP3 and there are alternatives
if kept_file_type.upper() != 'MP3' or len(group['skipped_versions']) == 0:
continue
# Apply channel filter if specified
if channel_filter:
kept_channel = extract_channel(group['kept_version'])
if channel_filter not in kept_channel.lower():
continue
else:
# Regular file type and channel filtering
# Check kept version
kept_channel = extract_channel(group['kept_version'])
kept_file_type = get_file_type(group['kept_version'])
if (not channel_filter or channel_filter in kept_channel.lower()) and \
(not file_type_filter or file_type_filter in kept_file_type.lower()):
matches_filter = True
# Check skipped versions if kept version doesn't match
if not matches_filter:
for version in group['skipped_versions']:
if (not channel_filter or channel_filter in version['channel'].lower()) and \
(not file_type_filter or file_type_filter in version['file_type'].lower()):
matches_filter = True
break
if not matches_filter:
continue
filtered_groups.append(group)
@ -644,6 +695,491 @@ def load_priority_preferences():
except Exception as e:
return jsonify({'error': f'Error loading priority preferences: {str(e)}'}), 500
def find_matching_songs(item: Dict[str, Any], all_songs: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Find all songs from allSongs.json that match a given item (favorite/history)."""
matching_songs = []
# Validate input
if item is None or not isinstance(item, dict):
print(f"Warning: Invalid item passed to find_matching_songs: {item}")
return matching_songs
item_artist = item.get('artist', '').lower().strip()
item_title = item.get('title', '').lower().strip()
for song in all_songs:
# Skip None songs
if song is None or not isinstance(song, dict):
continue
song_artist = song.get('artist', '').lower().strip()
song_title = song.get('title', '').lower().strip()
# Exact match on artist and title
if song_artist == item_artist and song_title == item_title:
matching_songs.append({
'path': song.get('path', ''),
'artist': song.get('artist', 'Unknown'),
'title': song.get('title', 'Unknown'),
'file_type': get_file_type(song.get('path', '')),
'channel': extract_channel(song.get('path', '')),
'is_current': song.get('path', '') == item.get('path', '')
})
# Sort by priority: current path first, then by file type priority
def sort_key(song):
# Current path gets highest priority
if song['is_current']:
return (0, 0)
# File type priority: MP4 > CDG/MP3 > standalone MP3 > standalone CDG
file_type_priority = {
'MP4': 1,
'MP3': 2,
'Unknown': 3
}
return (1, file_type_priority.get(song['file_type'], 3))
matching_songs.sort(key=sort_key)
return matching_songs
@app.route('/api/favorites')
def get_favorites():
"""Get favorites data with matching songs for each favorite."""
try:
# Load data files
favorites_file = os.path.join(DATA_DIR, 'favorites.json')
all_songs_file = os.path.join(DATA_DIR, 'allSongs.json')
favorites = load_json_file(favorites_file)
all_songs = load_json_file(all_songs_file)
if not favorites or not all_songs:
return jsonify({'error': 'Failed to load data files'}), 500
# Process each favorite
processed_favorites = []
for i, favorite in enumerate(favorites):
# Skip None items or items without required fields
if favorite is None:
print(f"Warning: Skipping None item at index {i} in favorites")
continue
if not isinstance(favorite, dict):
print(f"Warning: Skipping non-dict item at index {i} in favorites: {type(favorite)}")
continue
try:
matching_songs = find_matching_songs(favorite, all_songs)
processed_favorite = {
'index': i,
'original': favorite,
'matching_songs': matching_songs,
'artist': favorite.get('artist', 'Unknown'),
'title': favorite.get('title', 'Unknown'),
'current_path': favorite.get('path', ''),
'count': favorite.get('count', 0),
'favorite': favorite.get('favorite', False),
'disabled': favorite.get('disabled', False)
}
processed_favorites.append(processed_favorite)
except Exception as item_error:
print(f"Error processing favorite item {i}: {item_error}")
print(f"Item data: {favorite}")
continue
# Sort by artist, then by title
processed_favorites.sort(key=lambda x: (x['artist'].lower(), x['title'].lower()))
return jsonify({
'success': True,
'favorites': processed_favorites,
'total': len(processed_favorites)
})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/history')
def get_history():
"""Get history data with matching songs for each history item."""
try:
# Load data files
history_file = os.path.join(DATA_DIR, 'history.json')
all_songs_file = os.path.join(DATA_DIR, 'allSongs.json')
history = load_json_file(history_file)
all_songs = load_json_file(all_songs_file)
if not history or not all_songs:
return jsonify({'error': 'Failed to load data files'}), 500
# Process each history item
processed_history = []
for i, item in enumerate(history):
# Skip None items or items without required fields
if item is None:
print(f"Warning: Skipping None item at index {i} in history")
continue
if not isinstance(item, dict):
print(f"Warning: Skipping non-dict item at index {i} in history: {type(item)}")
continue
try:
matching_songs = find_matching_songs(item, all_songs)
processed_item = {
'index': i,
'original': item,
'matching_songs': matching_songs,
'artist': item.get('artist', 'Unknown'),
'title': item.get('title', 'Unknown'),
'current_path': item.get('path', ''),
'count': item.get('count', 0),
'favorite': item.get('favorite', False),
'disabled': item.get('disabled', False)
}
processed_history.append(processed_item)
except Exception as item_error:
print(f"Error processing history item {i}: {item_error}")
print(f"Item data: {item}")
continue
# Sort by artist, then by title
processed_history.sort(key=lambda x: (x['artist'].lower(), x['title'].lower()))
return jsonify({
'success': True,
'history': processed_history,
'total': len(processed_history)
})
except Exception as e:
print(f"Error in get_history: {e}")
import traceback
print(f"Full traceback: {traceback.format_exc()}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/update-favorite-path', methods=['POST'])
def update_favorite_path():
"""Update the path of a favorite item."""
try:
data = request.get_json()
favorite_index = data.get('index')
new_path = data.get('path')
if favorite_index is None or new_path is None:
return jsonify({'success': False, 'error': 'Missing index or path'}), 400
# Load favorites file
favorites_file = os.path.join(DATA_DIR, 'favorites.json')
favorites = load_json_file(favorites_file)
if not favorites or favorite_index >= len(favorites):
return jsonify({'success': False, 'error': 'Invalid favorite index'}), 400
# Update the path
favorites[favorite_index]['path'] = new_path
# Save back to file
with open(favorites_file, 'w', encoding='utf-8') as f:
json.dump(favorites, f, indent=2, ensure_ascii=False)
return jsonify({'success': True})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/update-history-path', methods=['POST'])
def update_history_path():
"""Update the path of a history item."""
try:
data = request.get_json()
history_index = data.get('index')
new_path = data.get('path')
if history_index is None or new_path is None:
return jsonify({'success': False, 'error': 'Missing index or path'}), 400
# Load history file
history_file = os.path.join(DATA_DIR, 'history.json')
history = load_json_file(history_file)
if not history or history_index >= len(history):
return jsonify({'success': False, 'error': 'Invalid history index'}), 400
# Update the path
history[history_index]['path'] = new_path
# Save back to file
with open(history_file, 'w', encoding='utf-8') as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return jsonify({'success': True})
except Exception as e:
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/update-favorite-property', methods=['POST'])
def update_favorite_property():
"""Update any property of a favorite item."""
try:
data = request.get_json()
index = data.get('index')
property_name = data.get('property')
new_value = data.get('value')
if index is None or property_name is None or new_value is None:
return jsonify({'success': False, 'error': 'Missing index, property, or value'}), 400
# Load current favorites
favorites_file = os.path.join(DATA_DIR, 'favorites.json')
if not os.path.exists(favorites_file):
return jsonify({'success': False, 'error': 'Favorites file not found'}), 404
with open(favorites_file, 'r', encoding='utf-8') as f:
favorites = json.load(f)
# Update the property
if 0 <= index < len(favorites):
favorites[index][property_name] = new_value
# Special handling for path updates
if property_name == 'path' and 'original_path' in favorites[index]:
# Update original_path if it matches the old path
if favorites[index]['original_path'] == favorites[index].get('path'):
favorites[index]['original_path'] = new_value
# Save updated favorites
with open(favorites_file, 'w', encoding='utf-8') as f:
json.dump(favorites, f, indent=2, ensure_ascii=False)
return jsonify({'success': True})
else:
return jsonify({'success': False, 'error': 'Invalid index'}), 400
except Exception as e:
print(f"Error updating favorite property: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/update-history-property', methods=['POST'])
def update_history_property():
"""Update any property of a history item."""
try:
data = request.get_json()
index = data.get('index')
property_name = data.get('property')
new_value = data.get('value')
if index is None or property_name is None or new_value is None:
return jsonify({'success': False, 'error': 'Missing index, property, or value'}), 400
# Load current history
history_file = os.path.join(DATA_DIR, 'history.json')
if not os.path.exists(history_file):
return jsonify({'success': False, 'error': 'History file not found'}), 404
with open(history_file, 'r', encoding='utf-8') as f:
history = json.load(f)
# Update the property
if 0 <= index < len(history):
history[index][property_name] = new_value
# Special handling for path updates
if property_name == 'path' and 'original_path' in history[index]:
# Update original_path if it matches the old path
if history[index]['original_path'] == history[index].get('path'):
history[index]['original_path'] = new_value
# Save updated history
with open(history_file, 'w', encoding='utf-8') as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return jsonify({'success': True})
else:
return jsonify({'success': False, 'error': 'Invalid index'}), 400
except Exception as e:
print(f"Error updating history property: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/delete-favorite', methods=['POST'])
def delete_favorite():
"""Delete a favorite item by index."""
try:
data = request.get_json()
index = data.get('index')
if index is None:
return jsonify({'success': False, 'error': 'Missing index'}), 400
# Load current favorites
favorites_file = os.path.join(DATA_DIR, 'favorites.json')
if not os.path.exists(favorites_file):
return jsonify({'success': False, 'error': 'Favorites file not found'}), 404
with open(favorites_file, 'r', encoding='utf-8') as f:
favorites = json.load(f)
# Delete the item at the specified index
if 0 <= index < len(favorites):
deleted_item = favorites.pop(index)
# Save updated favorites
with open(favorites_file, 'w', encoding='utf-8') as f:
json.dump(favorites, f, indent=2, ensure_ascii=False)
return jsonify({
'success': True,
'deleted_item': deleted_item,
'message': f'Deleted "{deleted_item.get("artist", "Unknown")} - {deleted_item.get("title", "Unknown")}"'
})
else:
return jsonify({'success': False, 'error': 'Invalid index'}), 400
except Exception as e:
print(f"Error deleting favorite: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/delete-history', methods=['POST'])
def delete_history():
"""Delete a history item by index."""
try:
data = request.get_json()
index = data.get('index')
if index is None:
return jsonify({'success': False, 'error': 'Missing index'}), 400
# Load current history
history_file = os.path.join(DATA_DIR, 'history.json')
if not os.path.exists(history_file):
return jsonify({'success': False, 'error': 'History file not found'}), 404
with open(history_file, 'r', encoding='utf-8') as f:
history = json.load(f)
# Delete the item at the specified index
if 0 <= index < len(history):
deleted_item = history.pop(index)
# Save updated history
with open(history_file, 'w', encoding='utf-8') as f:
json.dump(history, f, indent=2, ensure_ascii=False)
return jsonify({
'success': True,
'deleted_item': deleted_item,
'message': f'Deleted "{deleted_item.get("artist", "Unknown")} - {deleted_item.get("title", "Unknown")}"'
})
else:
return jsonify({'success': False, 'error': 'Invalid index'}), 400
except Exception as e:
print(f"Error deleting history item: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/merge-history', methods=['POST'])
def merge_history():
"""Merge history objects that match on artist, title, and path, summing their count properties."""
try:
# Load current history
history_file = os.path.join(DATA_DIR, 'history.json')
if not os.path.exists(history_file):
return jsonify({'success': False, 'error': 'History file not found'}), 404
with open(history_file, 'r', encoding='utf-8') as f:
history_items = json.load(f)
if not history_items:
return jsonify({'success': False, 'error': 'No history items found to merge'}), 400
# Create a dictionary to group items by artist, title, and path
grouped_items = {}
merged_count = 0
total_merged_entries = 0
for item in history_items:
if not isinstance(item, dict):
continue
artist = item.get('artist', '').strip()
title = item.get('title', '').strip()
path = item.get('path', '').strip()
if not artist or not title or not path:
continue
# Create a key for grouping
key = (artist.lower(), title.lower(), path.lower())
if key not in grouped_items:
grouped_items[key] = []
grouped_items[key].append(item)
# Process groups with multiple items
merged_items = []
for key, items in grouped_items.items():
if len(items) == 1:
# Single item, keep as is
merged_items.append(items[0])
else:
# Multiple items, merge them
artist, title, path = key
# Start with the first item as the base
merged_item = items[0].copy()
# Sum the counts (handle both int and string values)
total_count = 0
for item in items:
count_value = item.get('count', 0)
if isinstance(count_value, str):
try:
total_count += int(count_value)
except ValueError:
total_count += 0
else:
total_count += count_value
merged_item['count'] = total_count
# For boolean properties, if any are True, keep True
merged_item['favorite'] = any(item.get('favorite', False) for item in items)
merged_item['disabled'] = any(item.get('disabled', False) for item in items)
# For other properties, keep the first non-empty value
for prop in ['key', 'original_path', 'genre']:
if prop in merged_item and merged_item[prop]:
continue
for item in items[1:]: # Skip first item since we already have it
if item.get(prop):
merged_item[prop] = item[prop]
break
merged_items.append(merged_item)
merged_count += 1
total_merged_entries += len(items)
# Save the merged history
with open(history_file, 'w', encoding='utf-8') as f:
json.dump(merged_items, f, indent=2, ensure_ascii=False)
return jsonify({
'success': True,
'merged_groups': merged_count,
'total_merged_entries': total_merged_entries,
'final_count': len(merged_items),
'message': f'Successfully merged {merged_count} groups ({total_merged_entries} total entries → {len(merged_items)} entries)'
})
except Exception as e:
print(f"Error merging history objects: {e}")
return jsonify({'success': False, 'error': str(e)}), 500
@app.route('/api/video/<path:file_path>')
def serve_video(file_path):
"""Serve video files for playback in the web UI."""
@ -746,5 +1282,171 @@ def serve_video(file_path):
print(f"DEBUG: Full traceback: {traceback.format_exc()}")
return jsonify({'error': f'Error serving video: {str(e)}'}), 500
# Playlist Validation Endpoints
@app.route('/playlist-validation')
def playlist_validation():
"""Playlist validation page."""
return render_template('playlist_validation.html')
@app.route('/api/playlists')
def get_playlists():
"""Get list of all playlists."""
try:
playlists_path = os.path.join(DATA_DIR, 'songLists.json')
with open(playlists_path, 'r', encoding='utf-8') as f:
playlists = json.load(f)
# Return basic playlist info
playlist_list = []
for i, playlist in enumerate(playlists):
playlist_list.append({
'index': i,
'title': playlist.get('title', 'Unknown Playlist'),
'song_count': len(playlist.get('songs', []))
})
return jsonify(playlist_list)
except Exception as e:
return jsonify({'error': f'Error loading playlists: {str(e)}'}), 500
@app.route('/api/validate-playlist/<int:playlist_index>')
def validate_playlist(playlist_index):
"""Validate a specific playlist."""
try:
# Load configuration
config = load_json_file(CONFIG_FILE)
if not config:
return jsonify({'error': 'Could not load configuration'}), 500
# Create validator
validator = PlaylistValidator(config, DATA_DIR)
# Load playlists
playlists_path = os.path.join(DATA_DIR, 'songLists.json')
with open(playlists_path, 'r', encoding='utf-8') as f:
playlists = json.load(f)
if playlist_index >= len(playlists):
return jsonify({'error': f'Invalid playlist index: {playlist_index}'}), 400
# Validate the playlist
result = validator.validate_playlist(playlists[playlist_index], dry_run=True)
return jsonify(result)
except Exception as e:
return jsonify({'error': f'Error validating playlist: {str(e)}'}), 500
@app.route('/api/validate-all-playlists')
def validate_all_playlists():
"""Validate all playlists."""
try:
# Load configuration
config = load_json_file(CONFIG_FILE)
if not config:
return jsonify({'error': 'Could not load configuration'}), 500
# Create validator
validator = PlaylistValidator(config, DATA_DIR)
# Validate all playlists
results = validator.validate_all_playlists(dry_run=True)
return jsonify(results)
except Exception as e:
return jsonify({'error': f'Error validating playlists: {str(e)}'}), 500
@app.route('/api/update-playlist-song', methods=['POST'])
def update_playlist_song():
"""Update a playlist song with corrected artist/title."""
try:
data = request.get_json()
playlist_index = data.get('playlist_index')
song_position = data.get('song_position')
new_artist = data.get('new_artist')
new_title = data.get('new_title')
dry_run = data.get('dry_run', True)
if any(x is None for x in [playlist_index, song_position, new_artist, new_title]):
return jsonify({'error': 'Missing required parameters'}), 400
# Load configuration
config = load_json_file(CONFIG_FILE)
if not config:
return jsonify({'error': 'Could not load configuration'}), 500
# Create validator
validator = PlaylistValidator(config, DATA_DIR)
# Update the song
success = validator.update_playlist_song(
playlist_index, song_position, new_artist, new_title, dry_run
)
if success:
return jsonify({'success': True, 'message': 'Playlist song updated successfully'})
else:
return jsonify({'error': 'Failed to update playlist song'}), 500
except Exception as e:
return jsonify({'error': f'Error updating playlist song: {str(e)}'}), 500
@app.route('/api/apply-all-updates', methods=['POST'])
def apply_all_updates():
"""Apply all pending playlist updates in batch."""
try:
data = request.get_json()
changes = data.get('changes', [])
if not changes:
return jsonify({'error': 'No changes to apply'}), 400
# Load configuration
config = load_json_file(CONFIG_FILE)
if not config:
return jsonify({'error': 'Could not load configuration'}), 500
# Create validator
validator = PlaylistValidator(config, DATA_DIR)
# Apply all changes
success_count = 0
failed_count = 0
for change in changes:
success = validator.update_playlist_song(
change['playlistIndex'],
change['songPosition'],
change['newArtist'],
change['newTitle'],
False # Always apply changes (not dry run)
)
if success:
success_count += 1
else:
failed_count += 1
if failed_count == 0:
message = f'Successfully applied {success_count} changes to playlists.'
else:
message = f'Applied {success_count} changes, {failed_count} failed.'
return jsonify({
'success': True,
'message': message,
'applied': success_count,
'failed': failed_count
})
except Exception as e:
return jsonify({'error': f'Error applying updates: {str(e)}'}), 500
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=5000)

1000
web/templates/favorites.html Normal file

File diff suppressed because it is too large Load Diff

1047
web/templates/history.html Normal file

File diff suppressed because it is too large Load Diff

View File

@ -245,9 +245,44 @@
margin-top: 4px;
word-break: break-all;
}
/* Navigation */
.nav-link {
color: #6c757d;
}
.nav-link.active {
color: #007bff;
font-weight: bold;
}
</style>
</head>
<body>
<!-- Navigation -->
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
<div class="container-fluid">
<a class="navbar-brand" href="/">
<i class="fas fa-music"></i> Karaoke Manager
</a>
<div class="navbar-nav">
<a class="nav-link active" href="/">
<i class="fas fa-copy"></i> Duplicates
</a>
<a class="nav-link" href="/favorites">
<i class="fas fa-heart"></i> Favorites
</a>
<a class="nav-link" href="/history">
<i class="fas fa-history"></i> History
</a>
<a class="nav-link" href="/remaining-songs">
<i class="fas fa-list"></i> Remaining Songs
</a>
<a class="nav-link" href="/playlist-validation">
<i class="fas fa-list-check"></i> Playlist Validation
</a>
</div>
</div>
</nav>
<div class="container-fluid">
<!-- Header -->
<div class="row bg-primary text-white p-3 mb-4">
@ -450,7 +485,7 @@
<option value="">All Types</option>
<option value="mp4">MP4</option>
<option value="mp3">MP3</option>
<option value="mp3-only">MP3 Only (No MP4 Alternative)</option>
</select>
</div>
<div class="col-md-2">

File diff suppressed because it is too large Load Diff

View File

@ -48,9 +48,41 @@
.back-button {
margin-bottom: 1rem;
}
/* Navigation */
.nav-link {
color: #6c757d;
}
.nav-link.active {
color: #28a745;
font-weight: bold;
}
</style>
</head>
<body>
<!-- Navigation -->
<nav class="navbar navbar-expand-lg navbar-dark bg-dark">
<div class="container-fluid">
<a class="navbar-brand" href="/">
<i class="fas fa-music"></i> Karaoke Manager
</a>
<div class="navbar-nav">
<a class="nav-link" href="/">
<i class="fas fa-copy"></i> Duplicates
</a>
<a class="nav-link" href="/favorites">
<i class="fas fa-heart"></i> Favorites
</a>
<a class="nav-link" href="/history">
<i class="fas fa-history"></i> History
</a>
<a class="nav-link active" href="/remaining-songs">
<i class="fas fa-list"></i> Remaining Songs
</a>
</div>
</div>
</nav>
<div class="container-fluid">
<!-- Header -->
<div class="row mt-3">