KaraokeVideoDownloader/karaoke_downloader/downloader.py

703 lines
36 KiB
Python

import os
import sys
import subprocess
import json
import re
from pathlib import Path
from datetime import datetime, timedelta
from karaoke_downloader.tracking_manager import TrackingManager, SongStatus, FormatType
from karaoke_downloader.id3_utils import add_id3_tags, extract_artist_title
from karaoke_downloader.songlist_manager import (
load_songlist, load_songlist_tracking, save_songlist_tracking,
is_songlist_song_downloaded, mark_songlist_song_downloaded, normalize_title
)
from karaoke_downloader.server_manager import (
load_server_songs, is_song_on_server, load_server_duplicates_tracking,
check_and_mark_server_duplicate, is_song_marked_as_server_duplicate
)
from karaoke_downloader.youtube_utils import get_channel_info, get_playlist_info
from karaoke_downloader.fuzzy_matcher import get_similarity_function, is_fuzzy_match, is_exact_match, create_song_key, create_video_key
import logging
import hashlib
from karaoke_downloader.download_planner import build_download_plan
from karaoke_downloader.cache_manager import (
get_download_plan_cache_file, load_cached_plan, save_plan_cache, delete_plan_cache
)
from karaoke_downloader.video_downloader import download_video_and_track, is_valid_mp4, execute_download_plan
from karaoke_downloader.channel_manager import reset_channel_downloads, download_from_file
# Constants
DEFAULT_FUZZY_THRESHOLD = 85
DEFAULT_CACHE_EXPIRATION_DAYS = 1
DEFAULT_FILENAME_LENGTH_LIMIT = 100
DEFAULT_ARTIST_LENGTH_LIMIT = 30
DEFAULT_TITLE_LENGTH_LIMIT = 60
DEFAULT_DISPLAY_LIMIT = 10
DATA_DIR = Path("data")
class KaraokeDownloader:
def __init__(self):
self.yt_dlp_path = Path("downloader/yt-dlp.exe")
self.downloads_dir = Path("downloads")
self.logs_dir = Path("logs")
self.downloads_dir.mkdir(exist_ok=True)
self.logs_dir.mkdir(exist_ok=True)
self.tracker = TrackingManager(tracking_file=DATA_DIR / "karaoke_tracking.json", cache_file=DATA_DIR / "channel_cache.json")
self.config = self._load_config()
self.songlist_tracking_file = DATA_DIR / "songlist_tracking.json"
self.songlist_tracking = load_songlist_tracking(str(self.songlist_tracking_file))
# Load server songs for availability checking
self.server_songs = load_server_songs()
def _load_config(self):
config_file = DATA_DIR / "config.json"
if config_file.exists():
try:
with open(config_file, 'r', encoding='utf-8') as f:
return json.load(f)
except (json.JSONDecodeError, FileNotFoundError) as e:
print(f"Warning: Could not load config.json: {e}")
return {
"download_settings": {
"format": "best[height<=720][ext=mp4]/best[height<=720]/best[ext=mp4]/best",
"preferred_resolution": "720p",
"audio_format": "mp3",
"audio_quality": "0",
"subtitle_language": "en",
"subtitle_format": "srt",
"write_metadata": False,
"write_thumbnail": False,
"write_description": False,
"write_annotations": False,
"write_comments": False,
"write_subtitles": False,
"embed_metadata": False,
"add_metadata": False,
"continue_downloads": True,
"no_overwrites": True,
"ignore_errors": True,
"no_warnings": False
},
"folder_structure": {
"downloads_dir": "downloads",
"logs_dir": "logs",
"tracking_file": str(DATA_DIR / "karaoke_tracking.json")
},
"logging": {
"level": "INFO",
"format": "%(asctime)s - %(levelname)s - %(message)s",
"include_console": True,
"include_file": True
},
"yt_dlp_path": "downloader/yt-dlp.exe"
}
def _should_skip_song(self, artist, title, channel_name, video_id, video_title, server_songs=None, server_duplicates_tracking=None):
"""
Centralized method to check if a song should be skipped.
Performs four checks in order:
1. Already downloaded (tracking)
2. File exists on filesystem
3. Already on server
4. Previously failed download (bad file)
Returns:
tuple: (should_skip, reason, total_filtered)
"""
total_filtered = 0
# Check 1: Already downloaded by this system
if self.tracker.is_song_downloaded(artist, title, channel_name, video_id):
return True, "already downloaded", total_filtered
# Check 2: File already exists on filesystem
# Generate the expected filename based on the download mode context
safe_title = title
invalid_chars = ['?', ':', '*', '"', '<', '>', '|', '/', '\\']
for char in invalid_chars:
safe_title = safe_title.replace(char, "")
safe_title = safe_title.replace("...", "").replace("..", "").replace(".", "").strip()
# Try different filename patterns that might exist
possible_filenames = [
f"{artist} - {safe_title}.mp4", # Songlist mode
f"{channel_name} - {safe_title}.mp4", # Latest-per-channel mode
f"{artist} - {safe_title} (Karaoke Version).mp4" # Channel videos mode
]
for filename in possible_filenames:
if len(filename) > DEFAULT_FILENAME_LENGTH_LIMIT:
# Apply length limits if needed
safe_artist = artist.replace("'", "").replace('"', "").strip()
filename = f"{safe_artist[:DEFAULT_ARTIST_LENGTH_LIMIT]} - {safe_title[:DEFAULT_TITLE_LENGTH_LIMIT]}.mp4"
output_path = self.downloads_dir / channel_name / filename
if output_path.exists() and output_path.stat().st_size > 0:
return True, "file exists", total_filtered
# Check 3: Already on server (if server data provided)
if server_songs is not None and server_duplicates_tracking is not None:
from karaoke_downloader.server_manager import check_and_mark_server_duplicate
if check_and_mark_server_duplicate(server_songs, server_duplicates_tracking, artist, title, video_title, channel_name):
total_filtered += 1
return True, "on server", total_filtered
# Check 4: Previously failed download (bad file)
if self.tracker.is_song_failed(artist, title, channel_name, video_id):
return True, "previously failed", total_filtered
return False, None, total_filtered
def _mark_song_failed(self, artist, title, video_id, channel_name, error_message):
"""
Centralized method to mark a song as failed in tracking.
"""
self.tracker.mark_song_failed(artist, title, video_id, channel_name, error_message)
print(f"🏷️ Marked song as failed: {artist} - {title}")
def _handle_download_failure(self, artist, title, video_id, channel_name, error_type, error_details=""):
"""
Centralized method to handle download failures.
Args:
artist: Song artist
title: Song title
video_id: YouTube video ID
channel_name: Channel name
error_type: Type of error (e.g., "yt-dlp failed", "file verification failed")
error_details: Additional error details
"""
error_msg = f"{error_type}"
if error_details:
error_msg += f": {error_details}"
self._mark_song_failed(artist, title, video_id, channel_name, error_msg)
def download_channel_videos(self, url, force_refresh=False, fuzzy_match=False, fuzzy_threshold=DEFAULT_FUZZY_THRESHOLD):
"""Download videos from a channel or playlist URL, respecting songlist-only and limit flags. Supports fuzzy matching."""
channel_name, channel_id = get_channel_info(url)
print(f"\n🎬 Downloading from channel: {channel_name} ({url})")
songlist = load_songlist()
if not songlist:
print("⚠️ No songlist loaded. Skipping.")
return False
# Load server songs and duplicates tracking for availability checking
server_songs = load_server_songs()
server_duplicates_tracking = load_server_duplicates_tracking()
limit = self.config.get('limit', 1)
cmd = [
str(self.yt_dlp_path),
'--flat-playlist',
'--print', '%(title)s|%(id)s|%(url)s',
url
]
try:
result = subprocess.run(cmd, capture_output=True, text=True, check=True)
lines = result.stdout.strip().splitlines()
except subprocess.CalledProcessError as e:
print(f"❌ yt-dlp failed to fetch playlist: {e}")
return False
available_videos = []
for line in lines:
parts = line.split('|')
if len(parts) >= 2:
title, video_id = parts[0].strip(), parts[1].strip()
available_videos.append({'title': title, 'id': video_id})
# Normalize songlist for matching
normalized_songlist = {
create_song_key(s['artist'], s['title']): s for s in songlist
}
matches = []
similarity = get_similarity_function()
for video in available_videos:
artist, title = extract_artist_title(video['title'])
key = create_song_key(artist, title)
if fuzzy_match:
# Fuzzy match against all songlist keys
best_score = 0
best_song = None
for song_key, song in normalized_songlist.items():
score = similarity(key, song_key)
if score > best_score:
best_score = score
best_song = song
if best_score >= fuzzy_threshold and best_song:
# Check if already downloaded or on server
if not is_songlist_song_downloaded(self.songlist_tracking, best_song['artist'], best_song['title']):
# Check if already marked as server duplicate
if not is_song_marked_as_server_duplicate(server_duplicates_tracking, best_song['artist'], best_song['title']):
# Check if already on server and mark for future skipping
if not check_and_mark_server_duplicate(server_songs, server_duplicates_tracking, best_song['artist'], best_song['title'], video['title'], channel_name):
matches.append((video, best_song))
print(f" → Fuzzy match: {artist} - {title} <-> {best_song['artist']} - {best_song['title']} (score: {best_score})")
if len(matches) >= limit:
break
else:
if key in normalized_songlist:
song = normalized_songlist[key]
# Check if already downloaded or on server
if not is_songlist_song_downloaded(self.songlist_tracking, song['artist'], song['title']):
# Check if already marked as server duplicate
if not is_song_marked_as_server_duplicate(server_duplicates_tracking, song['artist'], song['title']):
# Check if already on server and mark for future skipping
if not check_and_mark_server_duplicate(server_songs, server_duplicates_tracking, song['artist'], song['title'], video['title'], channel_name):
matches.append((video, song))
if len(matches) >= limit:
break
if not matches:
print("🎵 No new songlist matches found for this channel.")
return True
# Download only the first N matches
for video, song in matches:
artist, title = song['artist'], song['title']
output_path = self.downloads_dir / channel_name / f"{artist} - {title} (Karaoke Version).mp4"
output_path.parent.mkdir(parents=True, exist_ok=True)
print(f"⬇️ Downloading: {artist} - {title} -> {output_path}")
video_url = f"https://www.youtube.com/watch?v={video['id']}"
cmd = [
str(self.yt_dlp_path),
"-o", str(output_path),
"-f", self.config["download_settings"]["format"],
video_url
]
try:
subprocess.run(cmd, check=True)
except subprocess.CalledProcessError as e:
print(f"❌ yt-dlp failed: {e}")
# Mark song as failed in tracking immediately
self._handle_download_failure(artist, title, video['id'], channel_name, "yt-dlp failed", str(e))
continue
if not output_path.exists() or output_path.stat().st_size == 0:
print(f"❌ Download failed or file is empty: {output_path}")
# Mark song as failed in tracking immediately
self._handle_download_failure(artist, title, video['id'], channel_name, "Download failed", "file does not exist or is empty")
continue
if not is_valid_mp4(output_path):
print(f"❌ File is not a valid MP4: {output_path}")
# Mark song as failed in tracking immediately
self._handle_download_failure(artist, title, video['id'], channel_name, "Download failed", "file is not a valid MP4")
continue
add_id3_tags(output_path, f"{artist} - {title} (Karaoke Version)", channel_name)
mark_songlist_song_downloaded(self.songlist_tracking, artist, title, channel_name, output_path)
print(f"✅ Downloaded and tracked: {artist} - {title}")
print(f"🎉 All post-processing complete for: {output_path}")
return True
def download_songlist_across_channels(self, channel_urls, limit=None, force_refresh_download_plan=False, fuzzy_match=False, fuzzy_threshold=DEFAULT_FUZZY_THRESHOLD):
"""
For each song in the songlist, try each channel in order and download from the first channel where it is found.
Download up to 'limit' songs, skipping any that cannot be found, until the limit is reached or all possible matches are exhausted.
"""
songlist = load_songlist()
if not songlist:
print("⚠️ No songlist loaded. Skipping.")
return False
# Filter for songs not yet downloaded
undownloaded = [s for s in songlist if not is_songlist_song_downloaded(self.songlist_tracking, s['artist'], s['title'])]
print(f"\n🎯 {len(songlist)} total unique songs in songlist.")
print(f"\n🎯 {len(undownloaded)} unique songlist songs to download.")
# Load server songs and duplicates tracking for availability checking
server_songs = load_server_songs()
server_duplicates_tracking = load_server_duplicates_tracking()
# Further filter out songs already on server or marked as duplicates
not_on_server = []
server_available = 0
marked_duplicates = 0
for song in undownloaded:
artist, title = song['artist'], song['title']
# Check if already marked as server duplicate
if is_song_marked_as_server_duplicate(server_duplicates_tracking, artist, title):
marked_duplicates += 1
continue
# Check if already on server and mark for future skipping
if check_and_mark_server_duplicate(server_songs, server_duplicates_tracking, artist, title, f"{artist} - {title}", "songlist"):
server_available += 1
continue
not_on_server.append(song)
if server_available > 0:
print(f"\n🎵 {server_available} songs already available on server, skipping.")
if marked_duplicates > 0:
print(f"\n🏷️ {marked_duplicates} songs previously marked as server duplicates, skipping.")
undownloaded = not_on_server
print(f"\n🎯 {len(undownloaded)} songs need to be downloaded.")
if not undownloaded:
print("🎵 All songlist songs already downloaded.")
return True
# --- FAST MODE: Early exit and deduplication if limit is set ---
if limit is not None:
print("\n⚡ Fast mode enabled: will stop as soon as limit is reached with successful downloads.")
similarity = get_similarity_function()
downloaded_count = 0
unique_keys = set()
total_attempted = 0
for channel_url in channel_urls:
channel_name, channel_id = get_channel_info(channel_url)
available_videos = self.tracker.get_channel_video_list(
channel_url,
yt_dlp_path=str(self.yt_dlp_path),
force_refresh=False
)
for song in undownloaded:
artist, title = song['artist'], song['title']
key = create_song_key(artist, title)
if key in unique_keys:
continue # Already downloaded or queued
# Check if should skip this song during planning phase
should_skip, reason, _ = self._should_skip_song(
artist, title, channel_name, None, f"{artist} - {title}",
server_songs, server_duplicates_tracking
)
if should_skip:
continue
found = False
for video in available_videos:
v_artist, v_title = extract_artist_title(video['title'])
video_key = create_song_key(v_artist, v_title)
if fuzzy_match:
score = similarity(key, video_key)
if score >= fuzzy_threshold:
found = True
else:
if is_exact_match(artist, title, video['title']):
found = True
if found:
print(f"\n⬇️ Downloading {downloaded_count+1} of {limit}:")
print(f" 📋 Songlist: {artist} - {title}")
print(f" 🎬 Video: {video['title']} ({channel_name})")
if fuzzy_match:
print(f" 🎯 Match Score: {score:.1f}%")
# --- Download logic (reuse from below) ---
safe_title = title.replace("(From ", "").replace(")", "").replace(" - ", " ").replace(":", "").replace("'", "").replace('"', "")
safe_artist = artist.replace("'", "").replace('"', "")
invalid_chars = ['?', ':', '*', '"', '<', '>', '|', '/', '\\']
for char in invalid_chars:
safe_title = safe_title.replace(char, "")
safe_artist = safe_artist.replace(char, "")
safe_title = safe_title.replace("...", "").replace("..", "").replace(".", "").strip()
safe_artist = safe_artist.strip()
filename = f"{safe_artist} - {safe_title}.mp4"
# Call the actual download function (simulate the same as in the plan loop)
success = download_video_and_track(
self.yt_dlp_path,
self.config,
self.downloads_dir,
self.songlist_tracking,
channel_name,
channel_url,
video['id'],
video['title'],
artist,
title,
filename
)
total_attempted += 1
if success:
downloaded_count += 1
unique_keys.add(key)
print(f"✅ Downloaded and tracked: {artist} - {title}")
else:
print(f"❌ Download failed: {artist} - {title}")
if downloaded_count >= limit:
print(f"🎉 Reached download limit ({limit}). Stopping early.")
return True
break # Don't try to match this song to other videos in this channel
print(f"🎉 Downloaded {downloaded_count} unique songlist songs (limit was {limit}).")
if downloaded_count < limit:
print(f"⚠️ Only {downloaded_count} songs were downloaded. Some may not have been found or downloads failed.")
return True
# --- ORIGINAL FULL PLAN MODE (no limit) ---
# --- Download plan cache logic ---
plan_mode = "songlist"
# Include all parameters that affect the plan generation
plan_kwargs = {
"limit": limit or "all",
"channels": len(channel_urls),
"fuzzy": fuzzy_match,
"threshold": fuzzy_threshold
}
# Add channel URLs hash to ensure same channels = same cache
channels_hash = hashlib.md5("|".join(sorted(channel_urls)).encode()).hexdigest()[:8]
plan_kwargs["channels_hash"] = channels_hash
cache_file = get_download_plan_cache_file(plan_mode, **plan_kwargs)
use_cache = False
download_plan, unmatched = load_cached_plan(cache_file)
if not force_refresh_download_plan and download_plan is not None:
use_cache = True
if not use_cache:
print("\n🔍 Pre-scanning channels for matches...")
download_plan, unmatched = build_download_plan(
channel_urls,
undownloaded,
self.tracker,
self.yt_dlp_path,
fuzzy_match=fuzzy_match,
fuzzy_threshold=fuzzy_threshold
)
save_plan_cache(cache_file, download_plan, unmatched)
print(f"\n📊 Download plan ready: {len(download_plan)} songs will be downloaded.")
print(f"{len(unmatched)} songs could not be found in any channel.")
if unmatched:
print("Unmatched songs:")
for song in unmatched[:DEFAULT_DISPLAY_LIMIT]:
print(f" - {song['artist']} - {song['title']}")
if len(unmatched) > DEFAULT_DISPLAY_LIMIT:
print(f" ...and {len(unmatched)-DEFAULT_DISPLAY_LIMIT} more.")
# --- Download phase ---
downloaded_count, success = execute_download_plan(
download_plan=download_plan,
unmatched=unmatched,
cache_file=cache_file,
config=self.config,
yt_dlp_path=self.yt_dlp_path,
downloads_dir=self.downloads_dir,
songlist_tracking=self.songlist_tracking,
limit=limit
)
return success
def download_latest_per_channel(self, channel_urls, limit=5, force_refresh_download_plan=False, fuzzy_match=False, fuzzy_threshold=DEFAULT_FUZZY_THRESHOLD):
"""
Download the latest N videos from each channel in channel_urls.
- Pre-scan all channels for their latest N videos.
- Check against local songs file to avoid duplicates.
- Build a per-channel download plan and cache it.
- Resume robustly if interrupted (removes each channel from the plan as it completes).
- Deletes the plan cache when all channels are done.
"""
# Load server songs for availability checking
server_songs = load_server_songs()
server_duplicates_tracking = load_server_duplicates_tracking()
plan_mode = "latest_per_channel"
# Include all parameters that affect the plan generation
plan_kwargs = {
"limit": limit,
"channels": len(channel_urls),
"fuzzy": fuzzy_match,
"threshold": fuzzy_threshold
}
# Add channel URLs hash to ensure same channels = same cache
channels_hash = hashlib.md5("|".join(sorted(channel_urls)).encode()).hexdigest()[:8]
plan_kwargs["channels_hash"] = channels_hash
cache_file = get_download_plan_cache_file(plan_mode, **plan_kwargs)
use_cache = False
if not force_refresh_download_plan and cache_file.exists():
try:
with open(cache_file, 'r', encoding='utf-8') as f:
plan_data = json.load(f)
cache_time = datetime.fromisoformat(plan_data.get('timestamp'))
if datetime.now() - cache_time < timedelta(days=DEFAULT_CACHE_EXPIRATION_DAYS):
print(f"🗂️ Using cached latest-per-channel plan from {cache_time} ({cache_file.name}).")
channel_plans = plan_data['channel_plans']
use_cache = True
except Exception as e:
print(f"⚠️ Could not load latest-per-channel plan cache: {e}")
if not use_cache:
print("\n🔎 Pre-scanning all channels for latest videos...")
channel_plans = []
total_found = 0
total_filtered = 0
total_marked = 0
for channel_url in channel_urls:
channel_name, channel_id = get_channel_info(channel_url)
print(f"\n🚦 Starting channel: {channel_name} ({channel_url})")
available_videos = self.tracker.get_channel_video_list(
channel_url,
yt_dlp_path=str(self.yt_dlp_path),
force_refresh=False
)
print(f" → Found {len(available_videos)} total videos for this channel.")
# Pre-filter: Create a set of known duplicate keys for O(1) lookup
known_duplicate_keys = set()
for song_key in server_duplicates_tracking.keys():
known_duplicate_keys.add(song_key)
# Pre-filter videos to exclude known duplicates before processing
pre_filtered_videos = []
for video in available_videos:
artist, title = extract_artist_title(video['title'])
song_key = create_song_key(artist, title)
if song_key not in known_duplicate_keys:
pre_filtered_videos.append(video)
print(f" → After pre-filtering: {len(pre_filtered_videos)} videos not previously marked as duplicates.")
# Process videos until we reach the limit for this channel
filtered_videos = []
videos_checked = 0
for video in pre_filtered_videos:
if len(filtered_videos) >= limit:
break # We have enough videos for this channel
videos_checked += 1
artist, title = extract_artist_title(video['title'])
# Check if should skip this song during planning phase
should_skip, reason, filtered_count = self._should_skip_song(
artist, title, channel_name, video['id'], video['title'],
server_songs, server_duplicates_tracking
)
if should_skip:
total_filtered += 1
if reason == "on server":
total_marked += filtered_count
continue
filtered_videos.append(video)
print(f" → After processing: {len(filtered_videos)} videos to download (checked {videos_checked} videos, filtered out {videos_checked - len(filtered_videos)} already on server).")
total_found += len(filtered_videos)
channel_plans.append({
'channel_name': channel_name,
'channel_url': channel_url,
'videos': filtered_videos
})
print(f"\n📊 Summary: {total_found} videos to download across {len(channel_plans)} channels (filtered out {total_filtered} already on server, marked {total_marked} new duplicates for future skipping).")
plan_data = {
'timestamp': datetime.now().isoformat(),
'channel_plans': channel_plans
}
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump(plan_data, f, indent=2, ensure_ascii=False)
print(f"🗂️ Saved new latest-per-channel plan cache: {cache_file.name}")
# --- Download phase ---
total_channels = len(channel_plans)
for idx, channel_plan in enumerate(channel_plans):
channel_name = channel_plan['channel_name']
channel_url = channel_plan['channel_url']
videos = channel_plan['videos']
print(f"\n⬇️ Downloading {len(videos)} videos from channel {idx+1} of {total_channels}: {channel_name}")
for v_idx, video in enumerate(videos):
title = video['title']
video_id = video['id']
# Sanitize filename
safe_title = title
invalid_chars = ['?', ':', '*', '"', '<', '>', '|', '/', '\\']
for char in invalid_chars:
safe_title = safe_title.replace(char, "")
safe_title = safe_title.replace("...", "").replace("..", "").replace(".", "").strip()
filename = f"{channel_name} - {safe_title}.mp4"
# Limit filename length to avoid Windows path issues
if len(filename) > DEFAULT_FILENAME_LENGTH_LIMIT:
filename = f"{channel_name[:DEFAULT_ARTIST_LENGTH_LIMIT]} - {safe_title[:DEFAULT_TITLE_LENGTH_LIMIT]}.mp4"
output_path = self.downloads_dir / channel_name / filename
output_path.parent.mkdir(parents=True, exist_ok=True)
print(f" ({v_idx+1}/{len(videos)}) Downloading: {title} -> {output_path}")
video_url = f"https://www.youtube.com/watch?v={video_id}"
dlp_cmd = [
str(self.yt_dlp_path),
"--no-check-certificates",
"--ignore-errors",
"--no-warnings",
"-o", str(output_path),
"-f", self.config["download_settings"]["format"],
video_url
]
try:
result = subprocess.run(dlp_cmd, capture_output=True, text=True, check=True)
print(f" ✅ yt-dlp completed successfully")
except subprocess.CalledProcessError as e:
print(f" ❌ yt-dlp failed with exit code {e.returncode}")
print(f" ❌ yt-dlp stderr: {e.stderr}")
# Mark song as failed in tracking immediately
artist, title_clean = extract_artist_title(title)
self._handle_download_failure(artist, title_clean, video_id, channel_name, "yt-dlp failed", f"exit code {e.returncode}: {e.stderr}")
continue
if not output_path.exists() or output_path.stat().st_size == 0:
print(f" ❌ Download failed or file is empty: {output_path}")
# Mark song as failed in tracking immediately
artist, title_clean = extract_artist_title(title)
self._handle_download_failure(artist, title_clean, video_id, channel_name, "Download failed", "file does not exist or is empty")
continue
# Extract artist and title for tracking
artist, title_clean = extract_artist_title(title)
# Add ID3 tags
add_id3_tags(output_path, title, channel_name)
# Mark as downloaded in tracking system
file_size = output_path.stat().st_size if output_path.exists() else None
self.tracker.mark_song_downloaded(artist, title_clean, video_id, channel_name, output_path, file_size)
print(f" ✅ Downloaded and tagged: {title}")
# After channel is done, remove it from the plan and update cache
channel_plans[idx]['videos'] = []
with open(cache_file, 'w', encoding='utf-8') as f:
json.dump({'timestamp': datetime.now().isoformat(), 'channel_plans': channel_plans}, f, indent=2, ensure_ascii=False)
print(f" 🗑️ Channel {channel_name} completed and removed from plan cache.")
# After all channels are done, delete the cache
if cache_file.exists():
try:
cache_file.unlink()
print(f"🗑️ Deleted latest-per-channel plan cache after completion: {cache_file.name}")
except Exception as e:
print(f"⚠️ Could not delete latest-per-channel plan cache: {e}")
print(f"🎉 All latest videos downloaded for all channels!")
return True
def reset_songlist_all():
"""Delete all files tracked in songlist_tracking.json, clear songlist_tracking.json, and remove songlist songs from karaoke_tracking.json."""
import json
from pathlib import Path
# Load songlist tracking
songlist_tracking_file = Path('data/songlist_tracking.json')
karaoke_tracking_file = Path('data/karaoke_tracking.json')
if songlist_tracking_file.exists():
with open(songlist_tracking_file, 'r', encoding='utf-8') as f:
tracking = json.load(f)
else:
tracking = {}
# Delete all files tracked
for entry in tracking.values():
file_path = entry.get('file_path')
if file_path:
p = Path(file_path)
try:
if p.exists():
p.unlink()
print(f"🗑️ Deleted: {p}")
except Exception as e:
print(f"⚠️ Could not delete {p}: {e}")
# Clear songlist_tracking.json
songlist_tracking_file.write_text("{}", encoding="utf-8")
print("🧹 Cleared songlist_tracking.json")
# Remove songlist songs from karaoke_tracking.json
if karaoke_tracking_file.exists():
with open(karaoke_tracking_file, 'r', encoding='utf-8') as f:
karaoke_data = json.load(f)
song_keys_to_remove = []
for song_id, song in karaoke_data.get('songs', {}).items():
artist = song.get('artist', '')
title = song.get('title', song.get('name', ''))
key = f"{artist.lower()}_{normalize_title(title)}"
if key in tracking:
song_keys_to_remove.append(song_id)
for song_id in song_keys_to_remove:
del karaoke_data['songs'][song_id]
with open(karaoke_tracking_file, 'w', encoding='utf-8') as f:
json.dump(karaoke_data, f, indent=2, ensure_ascii=False)
print(f"🧹 Removed {len(song_keys_to_remove)} songlist songs from karaoke_tracking.json")
print("✅ Global songlist reset complete.")
# For brevity, the rest of the class methods should be copied here from the original download_karaoke.py,
# updating all references to use the new karaoke_downloader.* imports as needed.