2025-06-27 16:34:52 +02:00
|
|
|
import re
|
|
|
|
import os
|
2025-06-27 17:36:03 +02:00
|
|
|
import json
|
|
|
|
from datetime import datetime
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
# --- Simple Configuration ---
|
2025-06-27 16:34:52 +02:00
|
|
|
CHANNELS_FILE = 'channels.txt'
|
|
|
|
PLAYLIST_FILE = 'playlist.m3u'
|
|
|
|
IMPORT_FILE = 'bulk_import.m3u'
|
2025-06-27 18:00:43 +02:00
|
|
|
LOG_FILE = 'playlist_update.log'
|
2025-06-27 18:36:13 +02:00
|
|
|
|
|
|
|
# Config files (optional)
|
2025-06-27 18:00:43 +02:00
|
|
|
SETTINGS_FILE = 'config/settings.json'
|
|
|
|
GROUP_OVERRIDES_FILE = 'config/group_overrides.json'
|
2025-06-27 17:36:03 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
def log_message(message, level="INFO"):
|
|
|
|
"""Logs messages to file and prints them."""
|
|
|
|
timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
|
|
|
|
formatted_message = f"[{timestamp}] {level}: {message}"
|
|
|
|
|
|
|
|
try:
|
|
|
|
with open(LOG_FILE, 'a', encoding='utf-8') as f:
|
|
|
|
f.write(formatted_message + "\n")
|
|
|
|
except Exception as e:
|
|
|
|
print(f"ERROR: Could not write to log: {e}")
|
|
|
|
|
|
|
|
print(formatted_message)
|
2025-06-27 17:36:03 +02:00
|
|
|
|
|
|
|
def load_settings():
|
2025-06-27 18:00:43 +02:00
|
|
|
"""Load settings with defaults."""
|
2025-06-27 17:36:03 +02:00
|
|
|
default_settings = {
|
|
|
|
"remove_duplicates": True,
|
|
|
|
"sort_channels": True,
|
|
|
|
"backup_before_import": True,
|
2025-06-27 18:12:47 +02:00
|
|
|
"auto_cleanup_import": True,
|
2025-06-27 18:36:13 +02:00
|
|
|
"auto_detect_country": True
|
2025-06-27 17:36:03 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if os.path.exists(SETTINGS_FILE):
|
|
|
|
try:
|
|
|
|
with open(SETTINGS_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
settings = json.load(f)
|
|
|
|
return {**default_settings, **settings}
|
|
|
|
except Exception as e:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Could not load settings, using defaults: {e}", "WARNING")
|
2025-06-27 17:36:03 +02:00
|
|
|
|
|
|
|
return default_settings
|
|
|
|
|
|
|
|
def load_group_overrides():
|
2025-06-27 18:36:13 +02:00
|
|
|
"""Load group overrides."""
|
2025-06-27 17:36:03 +02:00
|
|
|
if os.path.exists(GROUP_OVERRIDES_FILE):
|
|
|
|
try:
|
|
|
|
with open(GROUP_OVERRIDES_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
return json.load(f)
|
|
|
|
except Exception as e:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Could not load group overrides: {e}", "WARNING")
|
2025-06-27 17:36:03 +02:00
|
|
|
|
|
|
|
return {}
|
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
def detect_country_from_channel(channel_name, epg_id="", logo_url=""):
|
|
|
|
"""
|
2025-06-27 18:55:40 +02:00
|
|
|
Simple country detection that will definitely work.
|
2025-06-27 18:36:13 +02:00
|
|
|
"""
|
2025-06-27 18:55:40 +02:00
|
|
|
# Convert to lowercase for easier matching
|
|
|
|
name_lower = channel_name.lower()
|
|
|
|
epg_lower = epg_id.lower()
|
|
|
|
|
|
|
|
log_message(f"Detecting country for: '{channel_name}' (EPG: '{epg_id}')", "DEBUG")
|
|
|
|
|
|
|
|
# UK Detection
|
|
|
|
if "sky" in name_lower or ".uk" in epg_lower or "british" in name_lower or "bbc" in name_lower or "itv" in name_lower:
|
|
|
|
log_message(f"Detected UK for: {channel_name}", "INFO")
|
|
|
|
return "🇬🇧 United Kingdom"
|
|
|
|
|
|
|
|
# US Detection
|
|
|
|
if "usa" in name_lower or "us " in name_lower or ".us" in epg_lower or "america" in name_lower or "cnn" in name_lower or "espn" in name_lower or "fox" in name_lower:
|
|
|
|
log_message(f"Detected US for: {channel_name}", "INFO")
|
|
|
|
return "🇺🇸 United States"
|
|
|
|
|
|
|
|
# Canada Detection
|
|
|
|
if "canada" in name_lower or "cbc" in name_lower or ".ca" in epg_lower or "ctv" in name_lower:
|
|
|
|
log_message(f"Detected Canada for: {channel_name}", "INFO")
|
|
|
|
return "🇨🇦 Canada"
|
|
|
|
|
|
|
|
# Germany Detection
|
|
|
|
if "german" in name_lower or ".de" in epg_lower or "ard" in name_lower or "zdf" in name_lower:
|
|
|
|
log_message(f"Detected Germany for: {channel_name}", "INFO")
|
|
|
|
return "🇩🇪 Germany"
|
|
|
|
|
|
|
|
# France Detection
|
|
|
|
if "france" in name_lower or ".fr" in epg_lower or "tf1" in name_lower:
|
|
|
|
log_message(f"Detected France for: {channel_name}", "INFO")
|
|
|
|
return "🇫🇷 France"
|
|
|
|
|
|
|
|
# No match found
|
|
|
|
log_message(f"No country detected for: {channel_name}", "DEBUG")
|
2025-06-27 18:36:13 +02:00
|
|
|
return "Uncategorized"
|
|
|
|
|
|
|
|
def apply_auto_country_detection(channel, group_overrides, settings):
|
|
|
|
"""
|
|
|
|
Enhanced version of apply_group_overrides that includes auto-detection.
|
|
|
|
"""
|
|
|
|
stream_name = channel.get('Stream name', '')
|
|
|
|
epg_id = channel.get('EPG id', '')
|
|
|
|
logo_url = channel.get('Logo', '')
|
|
|
|
current_group = channel.get('Group', 'Uncategorized')
|
|
|
|
|
|
|
|
# First try manual overrides (highest priority)
|
|
|
|
stream_name_lower = stream_name.lower()
|
2025-06-27 18:12:47 +02:00
|
|
|
for key, new_group in group_overrides.items():
|
2025-06-27 18:36:13 +02:00
|
|
|
if key.lower() in stream_name_lower:
|
2025-06-27 18:12:47 +02:00
|
|
|
channel['Group'] = new_group
|
2025-06-27 18:36:13 +02:00
|
|
|
log_message(f"Manual override: '{stream_name}' → {new_group}", "DEBUG")
|
2025-06-27 18:12:47 +02:00
|
|
|
return channel
|
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
# If auto-detection is enabled, try it
|
2025-06-27 18:12:47 +02:00
|
|
|
if settings.get('auto_detect_country', True):
|
2025-06-27 18:36:13 +02:00
|
|
|
detected_country = detect_country_from_channel(stream_name, epg_id, logo_url)
|
2025-06-27 18:12:47 +02:00
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
# Only override if we detected something specific (not "Uncategorized")
|
|
|
|
if detected_country != "Uncategorized":
|
2025-06-27 18:12:47 +02:00
|
|
|
channel['Group'] = detected_country
|
2025-06-27 18:36:13 +02:00
|
|
|
log_message(f"Auto-detected: '{stream_name}' → {detected_country}", "INFO")
|
2025-06-27 18:12:47 +02:00
|
|
|
else:
|
2025-06-27 18:36:13 +02:00
|
|
|
# Keep existing group or set to Uncategorized
|
|
|
|
if current_group in ['', 'Unknown', 'Other']:
|
|
|
|
channel['Group'] = "Uncategorized"
|
|
|
|
else:
|
|
|
|
# Auto-detection disabled, use manual overrides only
|
|
|
|
if current_group in ['', 'Unknown', 'Other']:
|
|
|
|
channel['Group'] = "Uncategorized"
|
2025-06-27 18:12:47 +02:00
|
|
|
|
|
|
|
return channel
|
|
|
|
|
2025-06-27 16:34:52 +02:00
|
|
|
def parse_channel_block(block):
|
2025-06-27 18:00:43 +02:00
|
|
|
"""Parse a channel block from channels.txt."""
|
2025-06-27 16:34:52 +02:00
|
|
|
channel_data = {}
|
|
|
|
lines = block.strip().split('\n')
|
|
|
|
|
|
|
|
for line in lines:
|
|
|
|
if '=' in line:
|
|
|
|
key, value = line.split('=', 1)
|
|
|
|
key = key.strip()
|
|
|
|
value = value.strip()
|
|
|
|
channel_data[key] = value
|
|
|
|
|
|
|
|
return channel_data
|
|
|
|
|
|
|
|
def parse_m3u_entry(extinf_line, url_line):
|
2025-06-27 18:00:43 +02:00
|
|
|
"""Parse M3U entry."""
|
2025-06-27 16:34:52 +02:00
|
|
|
channel = {}
|
2025-06-27 18:00:43 +02:00
|
|
|
|
|
|
|
# Extract attributes
|
2025-06-27 16:34:52 +02:00
|
|
|
tvg_id_match = re.search(r'tvg-id="([^"]*)"', extinf_line)
|
|
|
|
tvg_logo_match = re.search(r'tvg-logo="([^"]*)"', extinf_line)
|
|
|
|
group_title_match = re.search(r'group-title="([^"]*)"', extinf_line)
|
|
|
|
tvg_name_match = re.search(r'tvg-name="([^"]*)"', extinf_line)
|
|
|
|
|
|
|
|
channel['EPG id'] = tvg_id_match.group(1) if tvg_id_match else ''
|
|
|
|
channel['Logo'] = tvg_logo_match.group(1) if tvg_logo_match else ''
|
|
|
|
channel['Group'] = group_title_match.group(1) if group_title_match else 'Uncategorized'
|
|
|
|
channel['TVG Name'] = tvg_name_match.group(1) if tvg_name_match else ''
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Stream name after the last comma
|
2025-06-27 16:34:52 +02:00
|
|
|
stream_name_match = re.search(r',(.+)$', extinf_line)
|
|
|
|
channel['Stream name'] = stream_name_match.group(1).strip() if stream_name_match else 'Unknown Channel'
|
|
|
|
channel['Stream URL'] = url_line.strip()
|
|
|
|
|
|
|
|
return channel
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
def convert_to_channels_txt_block(channel_data):
|
|
|
|
"""Convert to channels.txt format."""
|
2025-06-27 16:34:52 +02:00
|
|
|
block = []
|
2025-06-27 18:00:43 +02:00
|
|
|
block.append(f"Group = {channel_data.get('Group', 'Uncategorized')}")
|
|
|
|
block.append(f"Stream name = {channel_data.get('Stream name', 'Unknown Channel')}")
|
|
|
|
block.append(f"Logo = {channel_data.get('Logo', '')}")
|
|
|
|
block.append(f"EPG id = {channel_data.get('EPG id', '')}")
|
|
|
|
block.append(f"Stream URL = {channel_data.get('Stream URL', '')}")
|
2025-06-27 16:34:52 +02:00
|
|
|
return "\n".join(block)
|
|
|
|
|
|
|
|
def get_channel_signature(channel):
|
2025-06-27 18:00:43 +02:00
|
|
|
"""Create unique signature for duplicate detection."""
|
2025-06-27 16:34:52 +02:00
|
|
|
stream_name = channel.get('Stream name', '').strip().lower()
|
|
|
|
stream_url = channel.get('Stream URL', '').strip().lower()
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Clean name
|
2025-06-27 17:36:03 +02:00
|
|
|
stream_name_clean = re.sub(r'\s+', ' ', stream_name)
|
|
|
|
stream_name_clean = re.sub(r'[^\w\s]', '', stream_name_clean)
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
return f"{stream_name_clean}|{stream_url}"
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
def remove_duplicates(channels, settings):
|
|
|
|
"""Remove duplicate channels."""
|
2025-06-27 17:36:03 +02:00
|
|
|
if not settings.get('remove_duplicates', True):
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message("Duplicate removal disabled", "INFO")
|
2025-06-27 17:36:03 +02:00
|
|
|
return channels
|
|
|
|
|
2025-06-27 16:34:52 +02:00
|
|
|
seen_signatures = set()
|
|
|
|
unique_channels = []
|
|
|
|
duplicate_count = 0
|
|
|
|
|
|
|
|
for channel in channels:
|
|
|
|
signature = get_channel_signature(channel)
|
|
|
|
|
|
|
|
if signature not in seen_signatures:
|
|
|
|
seen_signatures.add(signature)
|
|
|
|
unique_channels.append(channel)
|
|
|
|
else:
|
|
|
|
duplicate_count += 1
|
|
|
|
|
|
|
|
if duplicate_count > 0:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Removed {duplicate_count} duplicate channels", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
else:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message("No duplicates found", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
return unique_channels
|
|
|
|
|
2025-06-27 18:45:55 +02:00
|
|
|
def update_existing_channels_with_country_detection():
|
|
|
|
"""Re-process existing channels.txt to apply country detection to old channels."""
|
|
|
|
if not os.path.exists(CHANNELS_FILE):
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message("No channels.txt file found", "WARNING")
|
2025-06-27 18:45:55 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
settings = load_settings()
|
|
|
|
group_overrides = load_group_overrides()
|
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message("Starting to re-detect countries for ALL existing channels...", "INFO")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
|
|
|
# Read existing channels
|
|
|
|
with open(CHANNELS_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
content = f.read()
|
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message(f"Read {len(content)} characters from channels.txt", "DEBUG")
|
|
|
|
|
2025-06-27 18:45:55 +02:00
|
|
|
channel_blocks = re.split(r'\n\s*\n+', content.strip())
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message(f"Found {len(channel_blocks)} channel blocks", "INFO")
|
|
|
|
|
2025-06-27 18:45:55 +02:00
|
|
|
updated_channels = []
|
|
|
|
changes_made = 0
|
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
for i, block in enumerate(channel_blocks):
|
2025-06-27 18:45:55 +02:00
|
|
|
if block.strip():
|
|
|
|
channel = parse_channel_block(block)
|
|
|
|
if channel:
|
|
|
|
old_group = channel.get('Group', 'Uncategorized')
|
2025-06-27 18:55:40 +02:00
|
|
|
stream_name = channel.get('Stream name', 'Unknown')
|
|
|
|
epg_id = channel.get('EPG id', '')
|
2025-06-27 18:45:55 +02:00
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message(f"Processing channel {i+1}: '{stream_name}' (currently in '{old_group}')", "DEBUG")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
# Force apply auto-detection regardless of current group
|
|
|
|
detected_country = detect_country_from_channel(stream_name, epg_id, "")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
# Always update if we detected something specific
|
|
|
|
if detected_country != "Uncategorized":
|
|
|
|
channel['Group'] = detected_country
|
2025-06-27 18:45:55 +02:00
|
|
|
changes_made += 1
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message(f"CHANGED: '{stream_name}' from '{old_group}' to '{detected_country}'", "INFO")
|
|
|
|
else:
|
|
|
|
log_message(f"NO CHANGE: '{stream_name}' stays as '{old_group}'", "DEBUG")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
|
|
|
updated_channels.append(channel)
|
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
# Always rewrite the file if we have channels
|
|
|
|
if updated_channels:
|
|
|
|
log_message(f"Rewriting channels.txt with {len(updated_channels)} channels ({changes_made} changes made)", "INFO")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
2025-06-27 18:55:40 +02:00
|
|
|
# Create backup
|
2025-06-27 18:45:55 +02:00
|
|
|
backup_name = f"{CHANNELS_FILE}.backup.{datetime.now().strftime('%Y%m%d_%H%M%S')}"
|
2025-06-27 18:55:40 +02:00
|
|
|
try:
|
|
|
|
import shutil
|
|
|
|
shutil.copy2(CHANNELS_FILE, backup_name)
|
|
|
|
log_message(f"Created backup: {backup_name}", "INFO")
|
|
|
|
except Exception as e:
|
|
|
|
log_message(f"Could not create backup: {e}", "WARNING")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
|
|
|
# Write updated channels
|
2025-06-27 18:55:40 +02:00
|
|
|
try:
|
|
|
|
with open(CHANNELS_FILE, 'w', encoding='utf-8') as f:
|
|
|
|
for i, channel in enumerate(updated_channels):
|
|
|
|
if i > 0:
|
|
|
|
f.write("\n\n")
|
|
|
|
|
|
|
|
block_content = convert_to_channels_txt_block(channel)
|
|
|
|
f.write(block_content)
|
|
|
|
|
|
|
|
log_message(f"Successfully rewrote channels.txt with country detection", "INFO")
|
|
|
|
except Exception as e:
|
|
|
|
log_message(f"ERROR writing channels.txt: {e}", "ERROR")
|
2025-06-27 18:45:55 +02:00
|
|
|
else:
|
2025-06-27 18:55:40 +02:00
|
|
|
log_message("No channels found to update", "WARNING")
|
2025-06-27 18:45:55 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
def process_import():
|
|
|
|
"""Process bulk import file."""
|
2025-06-27 17:36:03 +02:00
|
|
|
settings = load_settings()
|
2025-06-27 18:36:13 +02:00
|
|
|
group_overrides = load_group_overrides()
|
2025-06-27 17:36:03 +02:00
|
|
|
|
2025-06-27 16:34:52 +02:00
|
|
|
if not os.path.exists(IMPORT_FILE):
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"No {IMPORT_FILE} found, skipping import", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
return []
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Processing {IMPORT_FILE}...", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
imported_channels = []
|
|
|
|
|
|
|
|
try:
|
|
|
|
with open(IMPORT_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
lines = f.readlines()
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Found {len(lines)} lines in import file", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
i = 0
|
|
|
|
while i < len(lines):
|
|
|
|
line = lines[i].strip()
|
|
|
|
if line.startswith('#EXTINF:'):
|
|
|
|
if i + 1 < len(lines):
|
|
|
|
extinf_line = line
|
|
|
|
url_line = lines[i+1].strip()
|
|
|
|
|
|
|
|
if not url_line or url_line.startswith('#'):
|
|
|
|
i += 1
|
|
|
|
continue
|
|
|
|
|
|
|
|
channel_data = parse_m3u_entry(extinf_line, url_line)
|
2025-06-27 18:36:13 +02:00
|
|
|
channel_data = apply_auto_country_detection(channel_data, group_overrides, settings)
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
if channel_data.get('Stream name') and channel_data.get('Stream URL'):
|
|
|
|
imported_channels.append(channel_data)
|
|
|
|
|
|
|
|
i += 2
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
else:
|
|
|
|
i += 1
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Parsed {len(imported_channels)} channels from import", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Remove duplicates from import
|
2025-06-27 16:34:52 +02:00
|
|
|
if imported_channels:
|
2025-06-27 18:00:43 +02:00
|
|
|
imported_channels = remove_duplicates(imported_channels, settings)
|
|
|
|
|
|
|
|
# Check existing channels
|
2025-06-27 16:34:52 +02:00
|
|
|
existing_channels = []
|
|
|
|
if os.path.exists(CHANNELS_FILE):
|
|
|
|
with open(CHANNELS_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
content = f.read()
|
|
|
|
channel_blocks = re.split(r'\n\s*\n+', content.strip())
|
|
|
|
for block in channel_blocks:
|
|
|
|
if block.strip():
|
|
|
|
existing_channels.append(parse_channel_block(block))
|
|
|
|
|
|
|
|
existing_signatures = {get_channel_signature(ch) for ch in existing_channels}
|
|
|
|
new_channels = []
|
|
|
|
|
|
|
|
for channel in imported_channels:
|
|
|
|
if get_channel_signature(channel) not in existing_signatures:
|
|
|
|
new_channels.append(channel)
|
|
|
|
|
|
|
|
imported_channels = new_channels
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Final import: {len(imported_channels)} new channels", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Write to channels.txt
|
2025-06-27 16:34:52 +02:00
|
|
|
if imported_channels:
|
2025-06-27 17:36:03 +02:00
|
|
|
lines_before = 0
|
|
|
|
if os.path.exists(CHANNELS_FILE):
|
|
|
|
with open(CHANNELS_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
lines_before = len(f.readlines())
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
with open(CHANNELS_FILE, 'a', encoding='utf-8') as f:
|
|
|
|
for i, channel in enumerate(imported_channels):
|
|
|
|
if i > 0 or lines_before > 0:
|
|
|
|
f.write("\n\n")
|
|
|
|
|
|
|
|
block_content = convert_to_channels_txt_block(channel)
|
|
|
|
f.write(block_content)
|
|
|
|
|
|
|
|
log_message(f"Successfully imported {len(imported_channels)} channels", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
else:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message("No new channels to import", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
except Exception as e:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Error processing import: {e}", "ERROR")
|
2025-06-27 16:34:52 +02:00
|
|
|
return imported_channels
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Clean up import file
|
2025-06-27 17:36:03 +02:00
|
|
|
if settings.get('auto_cleanup_import', True):
|
2025-06-27 18:00:43 +02:00
|
|
|
try:
|
|
|
|
os.remove(IMPORT_FILE)
|
|
|
|
log_message(f"Cleaned up {IMPORT_FILE}", "INFO")
|
|
|
|
except Exception as e:
|
|
|
|
log_message(f"Could not remove {IMPORT_FILE}: {e}", "WARNING")
|
2025-06-27 17:36:03 +02:00
|
|
|
|
|
|
|
return imported_channels
|
|
|
|
|
2025-06-27 16:34:52 +02:00
|
|
|
def generate_playlist():
|
2025-06-27 18:00:43 +02:00
|
|
|
"""Main function."""
|
|
|
|
# Clear log
|
|
|
|
if os.path.exists(LOG_FILE):
|
|
|
|
open(LOG_FILE, 'w').close()
|
2025-06-27 17:36:03 +02:00
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
log_message("Starting playlist generation...", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
settings = load_settings()
|
2025-06-27 18:36:13 +02:00
|
|
|
group_overrides = load_group_overrides()
|
2025-06-27 18:55:40 +02:00
|
|
|
|
|
|
|
log_message(f"Settings loaded: {settings}", "INFO")
|
|
|
|
log_message(f"Group overrides loaded: {group_overrides}", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:45:55 +02:00
|
|
|
# FIRST: Update existing channels with country detection
|
|
|
|
update_existing_channels_with_country_detection()
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Process import
|
|
|
|
imported_channels = process_import()
|
|
|
|
log_message(f"Import returned {len(imported_channels)} channels", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:45:55 +02:00
|
|
|
# Read channels.txt (now with updated countries)
|
2025-06-27 16:34:52 +02:00
|
|
|
if not os.path.exists(CHANNELS_FILE):
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Error: {CHANNELS_FILE} not found", "ERROR")
|
2025-06-27 16:34:52 +02:00
|
|
|
return
|
|
|
|
|
|
|
|
with open(CHANNELS_FILE, 'r', encoding='utf-8') as f:
|
|
|
|
content = f.read()
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Parse channels
|
2025-06-27 16:34:52 +02:00
|
|
|
channel_blocks = re.split(r'\n\s*\n+', content.strip())
|
|
|
|
parsed_channels = []
|
2025-06-27 18:00:43 +02:00
|
|
|
|
|
|
|
for block in channel_blocks:
|
2025-06-27 16:34:52 +02:00
|
|
|
if block.strip():
|
|
|
|
channel = parse_channel_block(block)
|
|
|
|
if channel:
|
2025-06-27 18:45:55 +02:00
|
|
|
# Country detection already applied above, just load the channels
|
2025-06-27 16:34:52 +02:00
|
|
|
parsed_channels.append(channel)
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Parsed {len(parsed_channels)} channels", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Remove duplicates
|
|
|
|
parsed_channels = remove_duplicates(parsed_channels, settings)
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
# Sort channels
|
2025-06-27 17:36:03 +02:00
|
|
|
if settings.get('sort_channels', True):
|
|
|
|
parsed_channels.sort(key=lambda x: (x.get('Group', '').lower(), x.get('Stream name', '').lower()))
|
2025-06-27 18:12:47 +02:00
|
|
|
log_message("Channels sorted by country and name", "INFO")
|
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Build M3U
|
|
|
|
m3u_lines = ["#EXTM3U"]
|
2025-06-27 16:34:52 +02:00
|
|
|
valid_channels = 0
|
2025-06-27 17:36:03 +02:00
|
|
|
|
2025-06-27 18:36:13 +02:00
|
|
|
# Count channels by country for stats
|
|
|
|
country_stats = {}
|
|
|
|
|
2025-06-27 16:34:52 +02:00
|
|
|
for channel in parsed_channels:
|
|
|
|
stream_name = channel.get('Stream name', '')
|
|
|
|
group_name = channel.get('Group', 'Uncategorized')
|
|
|
|
logo_url = channel.get('Logo', '')
|
|
|
|
epg_id = channel.get('EPG id', '')
|
|
|
|
stream_url = channel.get('Stream URL', '')
|
|
|
|
|
|
|
|
if not stream_name or not stream_url:
|
|
|
|
continue
|
|
|
|
|
|
|
|
extinf_attrs = [
|
|
|
|
f'tvg-id="{epg_id}"',
|
|
|
|
f'tvg-logo="{logo_url}"',
|
|
|
|
f'group-title="{group_name}"',
|
|
|
|
f'tvg-name="{stream_name}"'
|
|
|
|
]
|
|
|
|
|
|
|
|
extinf_line = f"#EXTINF:-1 {' '.join(extinf_attrs)},{stream_name}"
|
2025-06-27 18:00:43 +02:00
|
|
|
m3u_lines.append(extinf_line)
|
|
|
|
m3u_lines.append(stream_url)
|
2025-06-27 16:34:52 +02:00
|
|
|
valid_channels += 1
|
2025-06-27 18:36:13 +02:00
|
|
|
|
|
|
|
# Count by country
|
|
|
|
country_stats[group_name] = country_stats.get(group_name, 0) + 1
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
# Write M3U
|
2025-06-27 16:34:52 +02:00
|
|
|
try:
|
|
|
|
with open(PLAYLIST_FILE, 'w', encoding='utf-8') as f:
|
2025-06-27 18:00:43 +02:00
|
|
|
for line in m3u_lines:
|
2025-06-27 16:34:52 +02:00
|
|
|
f.write(line + '\n')
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Generated {PLAYLIST_FILE} with {valid_channels} channels", "INFO")
|
2025-06-27 18:36:13 +02:00
|
|
|
|
|
|
|
# Log country statistics
|
|
|
|
log_message(f"Channels by country: {dict(sorted(country_stats.items(), key=lambda x: x[1], reverse=True))}", "INFO")
|
|
|
|
|
2025-06-27 16:34:52 +02:00
|
|
|
except Exception as e:
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message(f"Error writing playlist: {e}", "ERROR")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
2025-06-27 18:00:43 +02:00
|
|
|
log_message("Playlist generation complete", "INFO")
|
2025-06-27 16:34:52 +02:00
|
|
|
|
|
|
|
if __name__ == "__main__":
|
|
|
|
generate_playlist()
|