#!/usr/bin/env python3 """ IPTV Enhanced Country Detection - Updated Version Uses 3-point analysis: Channel Name + EPG ID + Logo URL Then filters to keep only legitimate countries """ import os import re import shutil from datetime import datetime from pathlib import Path # Ensure correct directory script_dir = Path(__file__).parent root_dir = script_dir.parent # The following line is removed to ensure stable execution within the action # os.chdir(root_dir) def detect_country_from_channel_content(channel_name, epg_id="", logo_url="", stream_url=""): """ Enhanced country detection using 3-point analysis Priority: EPG ID > Logo URL > Channel Name > Stream URL """ # Combine all text for analysis all_text = f"{channel_name.lower()} {epg_id.lower()} {logo_url.lower()} {stream_url.lower()}" # STEP 1: Check for streaming services first (these go to Uncategorized) streaming_services = [ "plex", "pluto", "tubi", "samsung", "xumo", "stirr", "crackle", "imdb tv", "daddylive", "drew247", "aixmedia", "moveonjoy", "drewlive24", "udptv", "a1xs.vip", "zekonew", "forcedtoplay", "cdn1host", "tvpass.org", "jmp2.uk/plu-", "provider-static.plex.tv", "images.pluto.tv" ] for service in streaming_services: if service in all_text: [cite_start]return "Uncategorized" [cite: 152] # STEP 2: EPG ID detection (most reliable) - Enhanced epg_patterns = { ".ca": "๐Ÿ‡จ๐Ÿ‡ฆ Canada", ".us": "๐Ÿ‡บ๐Ÿ‡ธ United States", ".uk": "๐Ÿ‡ฌ๐Ÿ‡ง United Kingdom", ".ph": "๐Ÿ‡ต๐Ÿ‡ญ Philippines", ".au": "๐Ÿ‡ฆ๐Ÿ‡บ Australia", ".jp": "๐Ÿ‡ฏ๐Ÿ‡ต Japan", [cite_start]".my": "๐Ÿ‡ฒ๐Ÿ‡พ Malaysia", [cite: 153] [cite_start]".de": "๐Ÿ‡ฉ๐Ÿ‡ช Germany", [cite: 153] [cite_start]".fr": "๐Ÿ‡ซ๐Ÿ‡ท France", [cite: 153] [cite_start]".es": "๐Ÿ‡ช๐Ÿ‡ธ Spain", [cite: 153] [cite_start]".it": "๐Ÿ‡ฎ๐Ÿ‡น Italy", [cite: 153] [cite_start]".br": "๐Ÿ‡ง๐Ÿ‡ท Brazil", [cite: 153] [cite_start]".nl": "๐Ÿ‡ณ๐Ÿ‡ฑ Netherlands" [cite: 153] } for domain, country in epg_patterns.items(): if domain in epg_id.lower(): [cite_start]return country [cite: 154] # Enhanced Canadian EPG detection canadian_epg_patterns = [ "cbc.", "ctv.", "global.", "tsn.", "sportsnet.", "citytv.", "aptn.", ".ab.ca", ".bc.ca", ".mb.ca", ".nb.ca", ".nl.ca", ".ns.ca", ".nt.ca", ".nu.ca", ".on.ca", ".pe.ca", ".qc.ca", ".sk.ca", ".yt.ca", "cfcn", "cky", "ctfo", "cjoh", "ckws" ] for pattern in canadian_epg_patterns: [cite_start]if pattern in epg_id.lower() or pattern in all_text: [cite: 155] return "๐Ÿ‡จ๐Ÿ‡ฆ Canada" # STEP 3: Enhanced specific channel fixes channel_lower = channel_name.lower() # Enhanced Canadian channels detection canadian_indicators = [ # TSN variations "tsn 1", "tsn 2", "tsn 3", "tsn 4", "tsn 5", "tsn1", "tsn2", "tsn3", "tsn4", "tsn5", # CBC variations [cite_start]"cbc news", "cbc toronto", "cbc calgary", "cbc vancouver", "cbc winnipeg", "cbc montreal", [cite: 156] # CTV variations [cite_start]"ctv calgary", "ctv vancouver", "ctv toronto", "ctv winnipeg", "ctv ottawa", "ctv montreal", [cite: 156] [cite_start]"ctv atlantic", "ctv edmonton", "ctv saskatoon", "ctv regina", "ctv kitchener", [cite: 156] # Regional station calls [cite_start]"cfcn", "cky", "ctfo", "cjoh", "ckws", "cfrn", "cfqc", "ckck", "chch", [cite: 156] # Other Canadian broadcasters [cite_start]"sportsnet", "global tv", "citytv", "aptn", "omni", "tvo", "tรฉlรฉ-quรฉbec" [cite: 157] ] for indicator in canadian_indicators: if indicator in channel_lower: return "๐Ÿ‡จ๐Ÿ‡ฆ Canada" # Enhanced BBC handling (distinguish US vs UK) if "bbc" in channel_lower: # BBC America is US [cite_start]if "bbc america" in channel_lower: [cite: 158] [cite_start]return "๐Ÿ‡บ๐Ÿ‡ธ United States" [cite: 158] # Most other BBC channels are UK elif any(x in channel_lower for x in ["bbc one", "bbc two", "bbc three", "bbc four", [cite_start]"bbc news", "bbc iplayer", "bbc scotland", "bbc wales", [cite: 159] [cite_start]"bbc comedy", "bbc drama", "bbc earth"]): [cite: 159] # Check if it's specifically UK version [cite_start]if not any(x in all_text for x in ["america", ".us", "usa"]): [cite: 159, 160] return "๐Ÿ‡ฌ๐Ÿ‡ง United Kingdom" # US channels that were misclassified if any(x in channel_lower for x in ["tv land", "tvland", "we tv", "wetv", "all weddings we tv", "cheaters", "cheers", "christmas 365"]): return "๐Ÿ‡บ๐Ÿ‡ธ United States" # Enhanced US network detection us_networks = [ [cite_start]"cbs", "nbc", "abc", "fox", "cnn", "espn", "hbo", "showtime", "starz", "cinemax", [cite: 160, 161] [cite_start]"mtv", "vh1", "comedy central", "cartoon network", "nickelodeon", "disney channel", [cite: 161] [cite_start]"discovery", "history", "tlc", "hgtv", "food network", "travel channel", [cite: 161] [cite_start]"lifetime", "hallmark", "e!", "bravo", "oxygen", "syfy", "usa network", [cite: 161] [cite_start]"tnt", "tbs", "fx", "fxx", "amc", "ifc", "tcm", "turner classic" [cite: 161] ] for network in us_networks: [cite_start]if network in channel_lower and not any(x in all_text for x in ["canada", ".ca", "uk", ".uk"]): [cite: 161, 162] return "๐Ÿ‡บ๐Ÿ‡ธ United States" # UK channels (but not BBC America) if "come dine with me" in channel_lower or "itv" in channel_lower: return "๐Ÿ‡ฌ๐Ÿ‡ง United Kingdom" # Philippines news channels if any(x in channel_lower for x in ["anc global", "anc ph"]): return "๐Ÿ‡ต๐Ÿ‡ญ Philippines" # Japan anime channels [cite_start]if "animax" in channel_lower: [cite: 163] [cite_start]return "๐Ÿ‡ฏ๐Ÿ‡ต Japan" [cite: 163] # STEP 4: Logo URL analysis logo_patterns = { "๐Ÿ‡จ๐Ÿ‡ฆ Canada": ["/canada/", "/ca/", "canada.", "canadian"], "๐Ÿ‡บ๐Ÿ‡ธ United States": ["/usa/", "/us/", "united-states", "american"], "๐Ÿ‡ฌ๐Ÿ‡ง United Kingdom": ["/uk/", "/united-kingdom/", "british", "england"], "๐Ÿ‡ฉ๐Ÿ‡ช Germany": ["/germany/", "/de/", "german", "deutschland"], [cite_start]"๐Ÿ‡ซ๐Ÿ‡ท France": ["/france/", "/fr/", "french", "franรงais"], [cite: 164] [cite_start]"๐Ÿ‡ฎ๐Ÿ‡น Italy": ["/italy/", "/it/", "italian", "italiano"], [cite: 164] [cite_start]"๐Ÿ‡ช๐Ÿ‡ธ Spain": ["/spain/", "/es/", "spanish", "espaรฑol"], [cite: 164] [cite_start]"๐Ÿ‡ณ๐Ÿ‡ฑ Netherlands": ["/netherlands/", "/nl/", "dutch", "nederland"], [cite: 164] [cite_start]"๐Ÿ‡ฆ๐Ÿ‡บ Australia": ["/australia/", "/au/", "australian", "aussie"], [cite: 164] [cite_start]"๐Ÿ‡ฏ๐Ÿ‡ต Japan": ["/japan/", "/jp/", "japanese", "ๆ—ฅๆœฌ"], [cite: 164] [cite_start]"๐Ÿ‡ฐ๐Ÿ‡ท South Korea": ["/korea/", "/kr/", "korean", "ํ•œ๊ตญ"], [cite: 164] [cite_start]"๐Ÿ‡ฎ๐Ÿ‡ณ India": ["/india/", "/in/", "indian", "เคญเคพเคฐเคค"], [cite: 164, 165] [cite_start]"๐Ÿ‡ง๐Ÿ‡ท Brazil": ["/brazil/", "/br/", "brazilian", "brasil"], [cite: 165] [cite_start]"๐Ÿ‡ฒ๐Ÿ‡ฝ Mexico": ["/mexico/", "/mx/", "mexican", "mรฉxico"], [cite: 165] [cite_start]"๐Ÿ‡ฆ๐Ÿ‡ท Argentina": ["/argentina/", "/ar/", "argentinian", "argentina"], [cite: 165] [cite_start]"๐Ÿ‡ต๐Ÿ‡ญ Philippines": ["/philippines/", "/ph/", "filipino", "pilipinas"] [cite: 165] } for country, patterns in logo_patterns.items(): for pattern in patterns: if pattern in logo_url.lower(): [cite_start]return country [cite: 166] # STEP 5: Enhanced broadcaster patterns broadcaster_patterns = { "๐Ÿ‡จ๐Ÿ‡ฆ Canada": [ "cbc", "tsn", "ctv", "global", "sportsnet", "citytv", "aptn", "teletoon", "ytv", "discovery canada", "history canada", "slice", "w network", "oln", "hgtv canada", [cite_start]"food network canada", "showcase", "crave", "super channel", "hollywood suite" [cite: 166, 167] ], "๐Ÿ‡บ๐Ÿ‡ธ United States": [ "cbs", "nbc", "abc", "fox", "cnn", "espn", "amc", "mtv", "comedy central", "discovery usa", "history usa", "tlc usa", "hgtv usa", "food network usa", "paramount", "nickelodeon usa", "cartoon network usa", "disney usa", "lifetime", "e!", "bravo usa" ], [cite_start]"๐Ÿ‡ฌ๐Ÿ‡ง United Kingdom": [ [cite: 168] [cite_start]"bbc", "itv", "channel 4", "channel 5", "sky", "dave", "really", "yesterday", [cite: 168] [cite_start]"discovery uk", "history uk", "tlc uk", "living", "alibi", "gold", "drama" [cite: 168] ], "๐Ÿ‡ฉ๐Ÿ‡ช Germany": [ "ard", "zdf", "rtl", "pro7", "sat.1", "vox", "kabel eins", "super rtl", "rtl2", [cite_start]"discovery germany", "history germany", "tlc germany", "dmax", "sixx", "tele 5" [cite: 169] ], "๐Ÿ‡ซ๐Ÿ‡ท France": [ "tf1", "france 2", "france 3", "france 5", "m6", "canal+", "arte", "w9", "tmc", "discovery france", "history france", "tlc france", "planete+", "ushuaia tv" ], "๐Ÿ‡ฎ๐Ÿ‡น Italy": [ [cite_start]"rai", "canale 5", "italia 1", "rete 4", "la7", "tv8", "nove", "20 mediaset", [cite: 170] [cite_start]"discovery italia", "history italia", "dmax italia", "real time", "giallo" [cite: 170] ], "๐Ÿ‡ช๐Ÿ‡ธ Spain": [ "tve", "la 1", "la 2", "antena 3", "cuatro", "telecinco", "la sexta", "nova", [cite_start]"discovery spain", "history spain", "dmax spain", "mega", "neox", "clan" [cite: 170, 171] ], "๐Ÿ‡ณ๐Ÿ‡ฑ Netherlands": [ [cite_start]"npo", "rtl 4", "rtl 5", "rtl 7", "sbs6", "veronica", "net5", "rtl z", [cite: 171] [cite_start]"discovery netherlands", "history netherlands", "tlc netherlands" [cite: 171] ], "๐Ÿ‡ฆ๐Ÿ‡บ Australia": [ "abc australia", "nine network", "seven network", "ten", "foxtel", [cite_start]"discovery australia", "history australia", "lifestyle" [cite: 171, 172] ], "๐Ÿ‡ฏ๐Ÿ‡ต Japan": [ "nhk", "fuji tv", "tbs", "tv asahi", "tv tokyo", "nippon tv", "animax" ], "๐Ÿ‡ฐ๐Ÿ‡ท South Korea": [ "kbs", "mbc", "sbs", "jtbc", "tvn", "ocn" ], "๐Ÿ‡ฎ๐Ÿ‡ณ India": [ [cite_start]"zee", "star plus", "colors", "sony tv", "& tv", "discovery india" [cite: 173] ], "๐Ÿ‡ง๐Ÿ‡ท Brazil": [ "globo", "sbt", "record", "band", "discovery brasil" ], "๐Ÿ‡ฒ๐Ÿ‡ฝ Mexico": [ "televisa", "tv azteca", "once tv", "discovery mexico" ], [cite_start]"๐Ÿ‡ฆ๐Ÿ‡ท Argentina": [ [cite: 174] [cite_start]"telefe", "canal 13", "america tv", "discovery argentina" [cite: 174] ], "๐Ÿ‡ต๐Ÿ‡ญ Philippines": [ "abs-cbn", "gma", "anc", "tv5", "pba rush" ] } for country, keywords in broadcaster_patterns.items(): for keyword in keywords: [cite_start]if keyword in all_text: [cite: 175] return country return "Uncategorized" def is_valid_country_group(group_name): """Check if group name is a valid country (not a streaming service)""" valid_countries = [ "๐Ÿ‡บ๐Ÿ‡ธ United States", "๐Ÿ‡จ๐Ÿ‡ฆ Canada", "๐Ÿ‡ฌ๐Ÿ‡ง United Kingdom", "๐Ÿ‡ฉ๐Ÿ‡ช Germany", "๐Ÿ‡ซ๐Ÿ‡ท France", "๐Ÿ‡ฎ๐Ÿ‡น Italy", "๐Ÿ‡ช๐Ÿ‡ธ Spain", "๐Ÿ‡ณ๐Ÿ‡ฑ Netherlands", "๐Ÿ‡ง๐Ÿ‡ช Belgium", [cite_start]"๐Ÿ‡ฆ๐Ÿ‡น Austria", "๐Ÿ‡จ๐Ÿ‡ญ Switzerland", "๐Ÿ‡ธ๐Ÿ‡ช Sweden", "๐Ÿ‡ณ๐Ÿ‡ด Norway", "๐Ÿ‡ฉ๐Ÿ‡ฐ Denmark", [cite: 176] [cite_start]"๐Ÿ‡ซ๐Ÿ‡ฎ Finland", "๐Ÿ‡ต๐Ÿ‡ฑ Poland", "๐Ÿ‡จ๐Ÿ‡ฟ Czech Republic", "๐Ÿ‡ญ๐Ÿ‡บ Hungary", "๐Ÿ‡ต๐Ÿ‡น Portugal", [cite: 176] [cite_start]"๐Ÿ‡ฌ๐Ÿ‡ท Greece", "๐Ÿ‡ท๐Ÿ‡ด Romania", "๐Ÿ‡ง๐Ÿ‡ฌ Bulgaria", "๐Ÿ‡ญ๐Ÿ‡ท Croatia", "๐Ÿ‡ท๐Ÿ‡ธ Serbia", [cite: 176] [cite_start]"๐Ÿ‡ฆ๐Ÿ‡บ Australia", "๐Ÿ‡ฏ๐Ÿ‡ต Japan", "๐Ÿ‡ฐ๐Ÿ‡ท South Korea", "๐Ÿ‡ฎ๐Ÿ‡ณ India", "๐Ÿ‡จ๐Ÿ‡ณ China", [cite: 176] [cite_start]"๐Ÿ‡ง๐Ÿ‡ท Brazil", "๐Ÿ‡ฒ๐Ÿ‡ฝ Mexico", "๐Ÿ‡ฆ๐Ÿ‡ท Argentina", "๐Ÿ‡จ๐Ÿ‡ฑ Chile", "๐Ÿ‡จ๐Ÿ‡ด Colombia", [cite: 176] [cite_start]"๐Ÿ‡ท๐Ÿ‡บ Russia", "๐Ÿ‡น๐Ÿ‡ท Turkey", "๐Ÿ‡ธ๐Ÿ‡ฆ Saudi Arabia", "๐Ÿ‡ฆ๐Ÿ‡ช UAE", "๐Ÿ‡ช๐Ÿ‡ฌ Egypt", [cite: 176] [cite_start]"๐Ÿ‡ฟ๐Ÿ‡ฆ South Africa", "๐Ÿ‡ณ๐Ÿ‡ฌ Nigeria", "๐Ÿ‡ฐ๐Ÿ‡ช Kenya", "๐Ÿ‡ฎ๐Ÿ‡ฑ Israel", "๐Ÿ‡น๐Ÿ‡ญ Thailand", [cite: 177] [cite_start]"๐Ÿ‡ป๐Ÿ‡ณ Vietnam", "๐Ÿ‡ต๐Ÿ‡ญ Philippines", "๐Ÿ‡ฎ๐Ÿ‡ฉ Indonesia", "๐Ÿ‡ฒ๐Ÿ‡พ Malaysia", "๐Ÿ‡ธ๐Ÿ‡ฌ Singapore" [cite: 177] ] return group_name in valid_countries def clean_malformed_channel_name(raw_name): """Extract clean channel name from malformed EXTINF data.""" if not raw_name or len(raw_name) < 2: return "Unknown Channel" # Handle completely malformed entries like: # [cite_start]".AB.ca",.AB.ca" tvg-logo="..." group-title="DaddyLive CA",CTV Canada [HD]" [cite: 177, 178] if raw_name.startswith('".') and 'tvg-logo=' in raw_name: # Extract the actual channel name after the last comma parts = raw_name.split(',') if len(parts) > 1: clean_name = parts[-1].strip().strip('"').strip() if clean_name: return clean_name # If it contains EXTINF data, extract the name [cite_start]if 'group-title=' in raw_name and ',' in raw_name: [cite: 179] extinf_match = re.search(r'group-title="[^"]*",(.+)') if extinf_match: return extinf_match.group(1).strip().strip('"') # [cite_start]If it has extra quotes and domains, clean them [cite: 199] [cite_start]if raw_name.startswith('.') and raw_name.count('"') > 2: [cite: 199] parts = raw_name.split(',') for part in reversed(parts): cleaned = part.strip().strip('"').strip() if cleaned and not cleaned.startswith('.') and len(cleaned) > 2: if not any(x in cleaned.lower() for x in ['http', 'tvg-', 'group-title', '.com', '.ca', '.us']): [cite_start]return cleaned [cite: 200] # Basic cleaning cleaned = raw_name.strip().strip('"').strip() # Remove leading dots and domains if cleaned.startswith('.'): cleaned = re.sub(r'^\.[\w.]+["\']*,?\s*', '', cleaned) # Remove trailing EXTINF attributes cleaned = re.sub(r'\s+tvg-.*', '', cleaned) [cite_start]return cleaned if cleaned and len(cleaned) > 1 else "Unknown Channel" [cite: 233] def extract_epg_from_malformed(raw_name): """Extract EPG ID from malformed data.""" # Look for domain patterns like .AB.ca, .ON.ca, etc. domain_match = re.search(r'\.([A-Z]{2})\.ca', raw_name) if domain_match: province = domain_match.group(1) return f"generic.{province}.ca" # Look for .us domains domain_match = re.search(r'\.([A-Z]{2})\.us', raw_name) if domain_match: [cite_start]state = domain_match.group(1) [cite: 234] [cite_start]return f"generic.{state}.us" [cite: 234] return "" def load_channels(): """Load channels from channels.txt with integrated data cleanup.""" if not os.path.exists('channels.txt'): print("โŒ No channels.txt found") return [] try: with open('channels.txt', 'r', encoding='utf-8') as f: content = f.read() [cite_start]channels = [] [cite: 235] [cite_start]cleaned_count = 0 [cite: 235] [cite_start]print("๐Ÿงน Step 1: Data Cleanup (fixing malformed entries)") [cite: 235] [cite_start]print("-" * 50) [cite: 235] for block in content.split('\n\n'): if not block.strip(): [cite_start]continue [cite: 236] channel_data = {} for line in block.strip().split('\n'): if '=' in line: key, value = line.split('=', 1) [cite_start]key = key.strip() [cite: 237] [cite_start]value = value.strip() [cite: 237] if key == "Stream name": # Check if this is malformed [cite_start]if (value.startswith('".') or 'tvg-logo=' in value or [cite: 238] [cite_start]'group-title=' in value or value.count('"') > 2): [cite: 238] # Clean the malformed name [cite_start]clean_name = clean_malformed_channel_name(value) [cite: 239] [cite_start]channel_data["Stream name"] = clean_name [cite: 239, 240] # Extract EPG ID if missing [cite_start]if not channel_data.get("EPG id"): [cite: 240] [cite_start]extracted_epg = extract_epg_from_malformed(value) [cite: 241] if extracted_epg: channel_data["EPG id"] = extracted_epg [cite_start]cleaned_count += 1 [cite: 242] if cleaned_count <= 10: # Show first 10 examples [cite_start]print(f"๐Ÿ”ง Fixed: '{value[:40]}...' โ†’ '{clean_name}'") [cite: 243] else: channel_data[key] = value [cite_start]else: [cite: 244] channel_data[key] = value # Only add channels with valid names if (channel_data.get('Stream name') and len(channel_data.get('Stream name', '')) > 1 and [cite_start]channel_data.get('Stream name') != "Unknown Channel"): [cite: 245] channels.append(channel_data) print(f"โœ… Data cleanup complete: {cleaned_count} entries fixed") print(f"๐Ÿ“Š Loaded {len(channels)} channels (after cleanup)") return channels except Exception as e: [cite_start]print(f"โŒ Error loading channels: {e}") [cite: 246] return [] def reorganize_channels(channels): """Enhanced reorganization with 3-point analysis.""" [cite_start]print("\n๐Ÿ” Step 2: Enhanced Country Detection with 3-Point Analysis") [cite: 179] [cite_start]print("๐Ÿ“Š Analyzing: Channel Name + EPG ID + Logo URL") [cite: 179] [cite_start]print("-" * 60) [cite: 179] changes = 0 stats = { [cite_start]'country_detected': 0, [cite: 180] [cite_start]'sent_to_uncategorized': 0, [cite: 180] [cite_start]'kept_existing_country': 0, [cite: 180] [cite_start]'streaming_filtered': 0 [cite: 180] } country_counts = {} for channel in channels: old_group = channel.get('Group', 'Uncategorized') stream_name = channel.get('Stream name', '') epg_id = channel.get('EPG id', '') logo = channel.get('Logo', '') [cite_start]stream_url = channel.get('Stream URL', '') [cite: 181] # Detect country using enhanced 3-point analysis detected_country = detect_country_from_channel_content(stream_name, epg_id, logo, stream_url) # Debug output for first few channels to see what's happening if changes < 5: [cite_start]print(f"๐Ÿ” Debug: '{stream_name}' | EPG: '{epg_id}' | Detected: {detected_country}") [cite: 181, 182] # Decide final group if is_valid_country_group(old_group) and detected_country != "Uncategorized": # Keep existing valid country final_group = old_group stats['kept_existing_country'] += 1 elif detected_country != "Uncategorized": # Use detected country [cite_start]final_group = detected_country [cite: 183] [cite_start]stats['country_detected'] += 1 [cite: 183] if old_group != detected_country: print(f"๐Ÿ” Fixed: '{stream_name}' {old_group} โ†’ {detected_country}") changes += 1 else: # Send to Uncategorized [cite_start]final_group = "Uncategorized" [cite: 184] [cite_start]stats['sent_to_uncategorized'] += 1 [cite: 184] if old_group != "Uncategorized": # Check if it's a streaming service [cite_start]if any(service in stream_name.lower() for service in ['samsung', 'pluto', 'plex', 'tubi']): [cite: 184, 185] [cite_start]stats['streaming_filtered'] += 1 [cite: 185] [cite_start]print(f"๐Ÿ“ฑ Platform: '{stream_name}' โ†’ Uncategorized") [cite: 185] else: print(f"โ“ Undetected: '{stream_name}' โ†’ Uncategorized") [cite_start]changes += 1 [cite: 186] channel['Group'] = final_group country_counts[final_group] = country_counts.get(final_group, 0) + 1 print(f"\n๐Ÿ“Š PROCESSING RESULTS:") print(f"โœ… Changes made: {changes}") print(f"๐Ÿ” Country detected: {stats['country_detected']}") print(f"โœ… Kept existing countries: {stats['kept_existing_country']}") print(f"๐Ÿ“ฑ Streaming services filtered: {stats['streaming_filtered']}") print(f"โ“ Sent to Uncategorized: {stats['sent_to_uncategorized']}") print(f"\n๐ŸŒ FINAL GROUP DISTRIBUTION:") [cite_start]sorted_countries = sorted(country_counts.items(), key=lambda x: (x[0] == "Uncategorized", -x[1])) [cite: 187] for country, count in sorted_countries: percentage = (count / len(channels) * 100) if len(channels) > 0 else 0 print(f" {country}: {count} channels ({percentage:.1f}%)") return channels def save_channels(channels): """Save channels to file.""" # Backup if os.path.exists('channels.txt'): backup = f"channels_backup_{datetime.now().strftime('%Y%m%d_%H%M%S')}.txt" shutil.copy2('channels.txt', backup) [cite_start]print(f"๐Ÿ“‹ Backup: {backup}") [cite: 188] try: with open('channels.txt', 'w', encoding='utf-8') as f: for i, channel in enumerate(channels): if i > 0: f.write("\n\n") [cite_start]f.write(f"Group = {channel.get('Group', 'Uncategorized')}\n") [cite: 188, 189] [cite_start]f.write(f"Stream name = {channel.get('Stream name', 'Unknown')}\n") [cite: 189] [cite_start]f.write(f"Logo = {channel.get('Logo', '')}\n") [cite: 189] [cite_start]f.write(f"EPG id = {channel.get('EPG id', '')}\n") [cite: 189] [cite_start]f.write(f"Stream URL = {channel.get('Stream URL', '')}\n") [cite: 189] [cite_start]print(f"โœ… Saved {len(channels)} channels") [cite: 190] return True except Exception as e: [cite_start]print(f"โŒ Save error: {e}") [cite: 190] return False def generate_m3u(channels): """Generate M3U playlist.""" try: with open('playlist.m3u', 'w', encoding='utf-8') as f: f.write('#EXTM3U\n') [cite_start]for channel in channels: [cite: 191] [cite_start]name = channel.get('Stream name', '') [cite: 191] [cite_start]group = channel.get('Group', 'Uncategorized') [cite: 191] [cite_start]logo = channel.get('Logo', '') [cite: 191] [cite_start]epg_id = channel.get('EPG id', '') [cite: 191] [cite_start]url = channel.get('Stream URL', '') [cite: 191] [cite_start]if name and url: [cite: 192] [cite_start]f.write(f'#EXTINF:-1 group-title="{group}"') [cite: 192] if logo: [cite_start]f.write(f' tvg-logo="{logo}"') [cite: 193] if epg_id: f.write(f' tvg-id="{epg_id}"') f.write(f',{name}\n{url}\n') print("โœ… Generated playlist.m3u") return True [cite_start]except Exception as e: [cite: 194] [cite_start]print(f"โŒ M3U error: {e}") [cite: 194] return False def main(): """Main function with integrated data cleanup and country detection.""" print("๐ŸŽฏ Enhanced IPTV Processing - Data Cleanup + Country Detection") print("=" * 80) print("๐Ÿงน Step 1: Fix malformed channel data") print("๐Ÿ” Step 2: 3-point country analysis (Channel Name + EPG ID + Logo URL)") print("๐ŸŽฏ Step 3: Filter streaming services to Uncategorized") print("=" * 80) [cite_start]channels = load_channels() [cite: 195] if not channels: return False # Enhanced reorganization with cleanup channels = reorganize_channels(channels) # Sort: Countries first (alphabetically), then Uncategorized last channels.sort(key=lambda x: ( "zzz" if x.get('Group') == "Uncategorized" else x.get('Group', ''), x.get('Stream name', '') )) # Save and generate [cite_start]if not save_channels(channels): [cite: 196] return False if not generate_m3u(channels): return False # Clear import try: with open('bulk_import.m3u', 'w', encoding='utf-8') as f: f.write('#EXTM3U\n') print("๐Ÿงน Cleared import file") except: pass [cite_start]print("\n๐ŸŽ‰ ENHANCED PROCESSING COMPLETE!") [cite: 197] [cite_start]print("โœ… Malformed data cleaned and fixed") [cite: 197] [cite_start]print("โœ… 3-point analysis applied to all channels") [cite: 197] [cite_start]print("โœ… Countries detected from EPG ID, Logo URL, and Channel Names") [cite: 197] [cite_start]print("โœ… Streaming services filtered to Uncategorized") [cite: 197] [cite_start]print("โœ… Clean country-organized playlist generated") [cite: 197] # Final statistics uncategorized_count = sum(1 for ch in channels if ch.get('Group') == 'Uncategorized') [cite_start]success_rate = ((len(channels) - uncategorized_count) / len(channels) * 100) if len(channels) > 0 else 0 [cite: 198] [cite_start]print(f"\n๐Ÿ“Š FINAL STATISTICS:") [cite: 198] [cite_start]print(f" Total channels: {len(channels)}") [cite: 198] [cite_start]print(f" Properly categorized: {len(channels) - uncategorized_count} ({success_rate:.1f}%)") [cite: 198] [cite_start]print(f" In Uncategorized: {uncategorized_count} ({100 - success_rate:.1f}%)") [cite: 198] return True if __name__ == "__main__": success = main() exit(0 if success else 1)