Compare commits
20 Commits
main
...
adef-patch
| Author | SHA1 | Date | |
|---|---|---|---|
| b796a820b0 | |||
| 7709ebbf87 | |||
| 5724373550 | |||
| 0a82a3571f | |||
| 56ed4b89dd | |||
| db1211c9bf | |||
| 6eef894fbb | |||
| 858e14683e | |||
| db76afe5b5 | |||
| 1f28a304eb | |||
| b6d0c20db1 | |||
| 758eb0ba49 | |||
| 5a33a2a3d9 | |||
| ff203974e1 | |||
| 56759f06ec | |||
| 0d1286dd41 | |||
| d6798f6eed | |||
| 1847d6a659 | |||
| 13bf29cd07 | |||
| 8034bc1556 |
334
HIDI/__init__.py
334
HIDI/__init__.py
@ -1,334 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from typing import Optional, Iterable
|
|
||||||
from langcodes import Language
|
|
||||||
import base64
|
|
||||||
|
|
||||||
import click
|
|
||||||
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Episode, Series, Movie, Movies, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle, Audio
|
|
||||||
|
|
||||||
|
|
||||||
class HIDI(Service):
|
|
||||||
"""
|
|
||||||
Service code for HiDive (hidive.com)
|
|
||||||
Version: 1.2.0
|
|
||||||
Authorization: Email + password login, with automatic token refresh.
|
|
||||||
Security: FHD@L3
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = r"^https?://(?:www\.)?hidive\.com/(?:season/(?P<season_id>\d+)|playlist/(?P<playlist_id>\d+))$"
|
|
||||||
GEOFENCE = ()
|
|
||||||
NO_SUBTITLES = False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="HIDI", short_help="https://hidive.com")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return HIDI(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
m = re.match(self.TITLE_RE, title)
|
|
||||||
if not m:
|
|
||||||
raise ValueError("Unsupported HiDive URL. Use /season/<id> or /playlist/<id>")
|
|
||||||
|
|
||||||
self.season_id = m.group("season_id")
|
|
||||||
self.playlist_id = m.group("playlist_id")
|
|
||||||
self.kind = "serie" if self.season_id else "movie"
|
|
||||||
self.content_id = int(self.season_id or self.playlist_id)
|
|
||||||
|
|
||||||
if not self.config:
|
|
||||||
raise EnvironmentError("Missing HIDI service config.")
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
self._auth_token = None
|
|
||||||
self._refresh_token = None
|
|
||||||
self._drm_cache = {}
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
base_headers = {
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"Accept": "application/json, text/plain, */*",
|
|
||||||
"Accept-Language": "en-US",
|
|
||||||
"Referer": "https://www.hidive.com/",
|
|
||||||
"Origin": "https://www.hidive.com",
|
|
||||||
"x-api-key": self.config["x_api_key"],
|
|
||||||
"app": "dice",
|
|
||||||
"Realm": "dce.hidive",
|
|
||||||
"x-app-var": self.config["x_app_var"],
|
|
||||||
}
|
|
||||||
self.session.headers.update(base_headers)
|
|
||||||
|
|
||||||
if not credential or not credential.username or not credential.password:
|
|
||||||
raise ValueError("HiDive requires email + password")
|
|
||||||
|
|
||||||
r_login = self.session.post(
|
|
||||||
self.config["endpoints"]["login"],
|
|
||||||
json={"id": credential.username, "secret": credential.password}
|
|
||||||
)
|
|
||||||
if r_login.status_code == 401:
|
|
||||||
raise PermissionError("Invalid email or password.")
|
|
||||||
r_login.raise_for_status()
|
|
||||||
|
|
||||||
login_data = r_login.json()
|
|
||||||
self._auth_token = login_data["authorisationToken"]
|
|
||||||
self._refresh_token = login_data["refreshToken"]
|
|
||||||
|
|
||||||
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
|
||||||
self.log.info("HiDive login successful.")
|
|
||||||
|
|
||||||
def _refresh_auth(self):
|
|
||||||
if not self._refresh_token:
|
|
||||||
raise PermissionError("No refresh token available to renew session.")
|
|
||||||
|
|
||||||
self.log.warning("Auth token expired, refreshing...")
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["refresh"],
|
|
||||||
json={"refreshToken": self._refresh_token}
|
|
||||||
)
|
|
||||||
if r.status_code == 401:
|
|
||||||
raise PermissionError("Refresh token is invalid. Please log in again.")
|
|
||||||
r.raise_for_status()
|
|
||||||
|
|
||||||
data = r.json()
|
|
||||||
self._auth_token = data["authorisationToken"]
|
|
||||||
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
|
||||||
self.log.info("Auth token refreshed successfully.")
|
|
||||||
|
|
||||||
def _api_get(self, url, **kwargs):
|
|
||||||
resp = self.session.get(url, **kwargs)
|
|
||||||
if resp.status_code == 401:
|
|
||||||
self._refresh_auth()
|
|
||||||
resp = self.session.get(url, **kwargs)
|
|
||||||
resp.raise_for_status()
|
|
||||||
return resp
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
# One endpoint for both season and playlist
|
|
||||||
resp = self._api_get(
|
|
||||||
self.config["endpoints"]["view"],
|
|
||||||
params={"type": ("playlist" if self.kind == "movie" else "season"),
|
|
||||||
"id": self.content_id,
|
|
||||||
"timezone": "Europe/Amsterdam"}
|
|
||||||
)
|
|
||||||
data = resp.json()
|
|
||||||
|
|
||||||
if self.kind == "movie":
|
|
||||||
# Find the playlist bucket, then the single VOD
|
|
||||||
vod_id = None
|
|
||||||
movie_title = None
|
|
||||||
description = ""
|
|
||||||
for elem in data.get("elements", []):
|
|
||||||
if elem.get("$type") == "hero":
|
|
||||||
hdr = (elem.get("attributes", {}).get("header", {}) or {}).get("attributes", {})
|
|
||||||
movie_title = hdr.get("text", movie_title)
|
|
||||||
for c in elem.get("attributes", {}).get("content", []):
|
|
||||||
if c.get("$type") == "textblock":
|
|
||||||
description = c.get("attributes", {}).get("text", description)
|
|
||||||
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "playlist":
|
|
||||||
items = elem.get("attributes", {}).get("items", [])
|
|
||||||
if items:
|
|
||||||
vod_id = items[0]["id"]
|
|
||||||
if not movie_title:
|
|
||||||
movie_title = items[0].get("title")
|
|
||||||
if not description:
|
|
||||||
description = items[0].get("description", "")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not vod_id:
|
|
||||||
raise ValueError("No VOD found in playlist data.")
|
|
||||||
|
|
||||||
return Movies([
|
|
||||||
Movie(
|
|
||||||
id_=vod_id,
|
|
||||||
service=self.__class__,
|
|
||||||
name=movie_title or "Unknown Title",
|
|
||||||
description=description or "",
|
|
||||||
year=None,
|
|
||||||
language=Language.get("en"),
|
|
||||||
data={"playlistId": self.content_id}
|
|
||||||
)
|
|
||||||
])
|
|
||||||
|
|
||||||
# Series
|
|
||||||
episodes = []
|
|
||||||
series_title = None
|
|
||||||
for elem in data.get("elements", []):
|
|
||||||
if elem.get("$type") == "bucket" and elem["attributes"].get("type") == "season":
|
|
||||||
for item in elem["attributes"].get("items", []):
|
|
||||||
if item.get("type") != "SEASON_VOD":
|
|
||||||
continue
|
|
||||||
ep_title = item["title"]
|
|
||||||
ep_num = 1
|
|
||||||
if ep_title.startswith("E") and " - " in ep_title:
|
|
||||||
try:
|
|
||||||
ep_num = int(ep_title.split(" - ")[0][1:])
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
episodes.append(Episode(
|
|
||||||
id_=item["id"],
|
|
||||||
service=self.__class__,
|
|
||||||
title=data.get("metadata", {}).get("series", {}).get("title", "") or "HiDive",
|
|
||||||
season=1,
|
|
||||||
number=ep_num,
|
|
||||||
name=item["title"],
|
|
||||||
description=item.get("description", ""),
|
|
||||||
language=Language.get("en"),
|
|
||||||
data=item,
|
|
||||||
))
|
|
||||||
break
|
|
||||||
|
|
||||||
if not episodes:
|
|
||||||
raise ValueError("No episodes found in season data.")
|
|
||||||
return Series(sorted(episodes, key=lambda x: x.number))
|
|
||||||
|
|
||||||
def _get_audio_for_langs(self, mpd_url: str, langs: Iterable[Language]) -> list[Audio]:
|
|
||||||
merged: list[Audio] = []
|
|
||||||
seen = set()
|
|
||||||
|
|
||||||
# Use first available language as fallback, or "en" as ultimate fallback
|
|
||||||
fallback_lang = langs[0] if langs else Language.get("en")
|
|
||||||
|
|
||||||
dash = DASH.from_url(mpd_url, session=self.session)
|
|
||||||
try:
|
|
||||||
# Parse with a valid fallback language
|
|
||||||
base_tracks = dash.to_tracks(language=fallback_lang)
|
|
||||||
except Exception:
|
|
||||||
# Try with English as ultimate fallback
|
|
||||||
base_tracks = dash.to_tracks(language=Language.get("en"))
|
|
||||||
|
|
||||||
all_audio = base_tracks.audio or []
|
|
||||||
|
|
||||||
for lang in langs:
|
|
||||||
# Match by language prefix (e.g. en, ja)
|
|
||||||
for audio in all_audio:
|
|
||||||
lang_code = getattr(audio.language, "language", "en")
|
|
||||||
if lang_code.startswith(lang.language[:2]):
|
|
||||||
key = (lang_code, getattr(audio, "codec", None), getattr(audio, "bitrate", None))
|
|
||||||
if key in seen:
|
|
||||||
continue
|
|
||||||
merged.append(audio)
|
|
||||||
seen.add(key)
|
|
||||||
|
|
||||||
# If nothing matched, just return all available audio tracks
|
|
||||||
if not merged and all_audio:
|
|
||||||
merged = all_audio
|
|
||||||
|
|
||||||
return merged
|
|
||||||
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
vod_resp = self._api_get(
|
|
||||||
self.config["endpoints"]["vod"].format(vod_id=title.id),
|
|
||||||
params={"includePlaybackDetails": "URL"},
|
|
||||||
)
|
|
||||||
vod = vod_resp.json()
|
|
||||||
|
|
||||||
playback_url = vod.get("playerUrlCallback")
|
|
||||||
if not playback_url:
|
|
||||||
raise ValueError("No playback URL found.")
|
|
||||||
|
|
||||||
stream_data = self._api_get(playback_url).json()
|
|
||||||
dash_list = stream_data.get("dash", [])
|
|
||||||
if not dash_list:
|
|
||||||
raise ValueError("No DASH streams available.")
|
|
||||||
|
|
||||||
entry = dash_list[0]
|
|
||||||
mpd_url = entry["url"]
|
|
||||||
|
|
||||||
# Collect available HiDive metadata languages
|
|
||||||
meta_audio_tracks = vod.get("onlinePlaybackMetadata", {}).get("audioTracks", [])
|
|
||||||
available_langs = []
|
|
||||||
for m in meta_audio_tracks:
|
|
||||||
lang_code = (m.get("languageCode") or "").split("-")[0]
|
|
||||||
if not lang_code:
|
|
||||||
continue
|
|
||||||
try:
|
|
||||||
available_langs.append(Language.get(lang_code))
|
|
||||||
except Exception:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Use first available language as fallback, or English as ultimate fallback
|
|
||||||
fallback_lang = available_langs[0] if available_langs else Language.get("en")
|
|
||||||
|
|
||||||
# Parse DASH manifest with a valid fallback language
|
|
||||||
base_tracks = DASH.from_url(mpd_url, session=self.session).to_tracks(language=fallback_lang)
|
|
||||||
|
|
||||||
audio_tracks = self._get_audio_for_langs(mpd_url, available_langs)
|
|
||||||
|
|
||||||
# Map metadata labels
|
|
||||||
meta_audio_map = {m.get("languageCode", "").split("-")[0]: m.get("label") for m in meta_audio_tracks}
|
|
||||||
for a in audio_tracks:
|
|
||||||
lang_code = getattr(a.language, "language", "en")
|
|
||||||
a.name = meta_audio_map.get(lang_code, lang_code)
|
|
||||||
a.is_original_lang = (lang_code == title.language.language)
|
|
||||||
|
|
||||||
base_tracks.audio = audio_tracks
|
|
||||||
|
|
||||||
# Subtitles
|
|
||||||
subtitles = []
|
|
||||||
for sub in entry.get("subtitles", []):
|
|
||||||
if sub.get("format", "").lower() != "vtt":
|
|
||||||
continue
|
|
||||||
lang_code = sub.get("language", "en").replace("-", "_")
|
|
||||||
try:
|
|
||||||
lang = Language.get(lang_code)
|
|
||||||
except Exception:
|
|
||||||
lang = Language.get("en")
|
|
||||||
subtitles.append(Subtitle(
|
|
||||||
id_=f"{lang_code}:vtt",
|
|
||||||
url=sub.get("url"),
|
|
||||||
language=lang,
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
name=lang.language_name(),
|
|
||||||
))
|
|
||||||
base_tracks.subtitles = subtitles
|
|
||||||
|
|
||||||
# DRM info
|
|
||||||
drm = entry.get("drm", {}) or {}
|
|
||||||
jwt = drm.get("jwtToken")
|
|
||||||
lic_url = (drm.get("url") or "").strip()
|
|
||||||
if jwt and lic_url:
|
|
||||||
self._drm_cache[title.id] = (jwt, lic_url)
|
|
||||||
|
|
||||||
return base_tracks
|
|
||||||
|
|
||||||
|
|
||||||
def _hidive_get_drm_info(self, title: Title_T) -> tuple[str, str]:
|
|
||||||
if title.id in self._drm_cache:
|
|
||||||
return self._drm_cache[title.id]
|
|
||||||
self.get_tracks(title)
|
|
||||||
return self._drm_cache[title.id]
|
|
||||||
|
|
||||||
def _decode_hidive_license_payload(self, payload: bytes) -> bytes:
|
|
||||||
text = payload.decode("utf-8", errors="ignore")
|
|
||||||
prefix = "data:application/octet-stream;base64,"
|
|
||||||
if text.startswith(prefix):
|
|
||||||
b64 = text.split(",", 1)[1]
|
|
||||||
return base64.b64decode(b64)
|
|
||||||
return payload
|
|
||||||
|
|
||||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes | str | None:
|
|
||||||
jwt_token, license_url = self._hidive_get_drm_info(title)
|
|
||||||
headers = {
|
|
||||||
"Authorization": f"Bearer {jwt_token}",
|
|
||||||
"Content-Type": "application/octet-stream",
|
|
||||||
"Accept": "*/*",
|
|
||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
|
|
||||||
"Origin": "https://www.hidive.com",
|
|
||||||
"Referer": "https://www.hidive.com/",
|
|
||||||
"X-DRM-INFO": "eyJzeXN0ZW0iOiJjb20ud2lkZXZpbmUuYWxwaGEifQ==",
|
|
||||||
}
|
|
||||||
r = self.session.post(license_url, data=challenge, headers=headers, timeout=30)
|
|
||||||
r.raise_for_status()
|
|
||||||
return self._decode_hidive_license_payload(r.content)
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
@ -1,10 +0,0 @@
|
|||||||
x_api_key: "857a1e5d-e35e-4fdf-805b-a87b6f8364bf"
|
|
||||||
x_app_var: "6.59.1.e16cdfd"
|
|
||||||
|
|
||||||
endpoints:
|
|
||||||
init: "https://dce-frontoffice.imggaming.com/api/v1/init/"
|
|
||||||
login: "https://dce-frontoffice.imggaming.com/api/v2/login"
|
|
||||||
vod: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}?includePlaybackDetails=URL"
|
|
||||||
adjacent: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}/adjacent"
|
|
||||||
view: "https://dce-frontoffice.imggaming.com/api/v1/view" # Changed from season_view
|
|
||||||
refresh: "https://dce-frontoffice.imggaming.com/api/v2/token/refresh"
|
|
||||||
407
KNPY/__init__.py
407
KNPY/__init__.py
@ -1,407 +0,0 @@
|
|||||||
import base64
|
|
||||||
import json
|
|
||||||
import re
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from typing import List, Optional
|
|
||||||
|
|
||||||
import click
|
|
||||||
import jwt
|
|
||||||
from langcodes import Language
|
|
||||||
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.search_result import SearchResult
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Subtitle, Tracks
|
|
||||||
|
|
||||||
|
|
||||||
class KNPY(Service):
|
|
||||||
"""
|
|
||||||
Service code for Kanopy (kanopy.com).
|
|
||||||
Version: 1.0.0
|
|
||||||
|
|
||||||
Auth: Credential (username + password)
|
|
||||||
Security: FHD@L3
|
|
||||||
|
|
||||||
Handles both Movies and Series (Playlists).
|
|
||||||
Detects and stops for movies that require tickets.
|
|
||||||
Caching included
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Updated regex to match the new URL structure with library subdomain and path
|
|
||||||
TITLE_RE = r"^https?://(?:www\.)?kanopy\.com/.+/(?P<id>\d+)$"
|
|
||||||
GEOFENCE = ()
|
|
||||||
NO_SUBTITLES = False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="KNPY", short_help="https://kanopy.com")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return KNPY(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
if not self.config:
|
|
||||||
raise ValueError("KNPY configuration not found. Ensure config.yaml exists.")
|
|
||||||
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
|
|
||||||
match = re.match(self.TITLE_RE, title)
|
|
||||||
if match:
|
|
||||||
self.content_id = match.group("id")
|
|
||||||
else:
|
|
||||||
self.content_id = None
|
|
||||||
self.search_query = title
|
|
||||||
|
|
||||||
self.API_VERSION = self.config["client"]["api_version"]
|
|
||||||
self.USER_AGENT = self.config["client"]["user_agent"]
|
|
||||||
self.WIDEVINE_UA = self.config["client"]["widevine_ua"]
|
|
||||||
|
|
||||||
self.session.headers.update({
|
|
||||||
"x-version": self.API_VERSION,
|
|
||||||
"user-agent": self.USER_AGENT
|
|
||||||
})
|
|
||||||
|
|
||||||
self._jwt = None
|
|
||||||
self._visitor_id = None
|
|
||||||
self._user_id = None
|
|
||||||
self._domain_id = None
|
|
||||||
self.widevine_license_url = None
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
if not credential or not credential.username or not credential.password:
|
|
||||||
raise ValueError("Kanopy requires email and password for authentication.")
|
|
||||||
|
|
||||||
cache = self.cache.get("auth_token")
|
|
||||||
|
|
||||||
if cache and not cache.expired:
|
|
||||||
cached_data = cache.data
|
|
||||||
valid_token = None
|
|
||||||
|
|
||||||
if isinstance(cached_data, dict) and "token" in cached_data:
|
|
||||||
if cached_data.get("username") == credential.username:
|
|
||||||
valid_token = cached_data["token"]
|
|
||||||
self.log.info("Using cached authentication token")
|
|
||||||
else:
|
|
||||||
self.log.info(f"Cached token belongs to '{cached_data.get('username')}', but logging in as '{credential.username}'. Re-authenticating.")
|
|
||||||
|
|
||||||
elif isinstance(cached_data, str):
|
|
||||||
self.log.info("Found legacy cached token format. Re-authenticating to ensure correct user.")
|
|
||||||
|
|
||||||
if valid_token:
|
|
||||||
self._jwt = valid_token
|
|
||||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
|
||||||
|
|
||||||
if not self._user_id or not self._domain_id or not self._visitor_id:
|
|
||||||
try:
|
|
||||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
|
||||||
self._user_id = decoded_jwt["data"]["uid"]
|
|
||||||
self._visitor_id = decoded_jwt["data"]["visitor_id"]
|
|
||||||
self.log.info(f"Extracted user_id and visitor_id from cached token.")
|
|
||||||
self._fetch_user_details()
|
|
||||||
return
|
|
||||||
except (KeyError, jwt.DecodeError) as e:
|
|
||||||
self.log.error(f"Could not decode cached token: {e}. Re-authenticating.")
|
|
||||||
|
|
||||||
self.log.info("Performing handshake to get visitor token...")
|
|
||||||
r = self.session.get(self.config["endpoints"]["handshake"])
|
|
||||||
r.raise_for_status()
|
|
||||||
handshake_data = r.json()
|
|
||||||
self._visitor_id = handshake_data["visitorId"]
|
|
||||||
initial_jwt = handshake_data["jwt"]
|
|
||||||
|
|
||||||
self.log.info(f"Logging in as {credential.username}...")
|
|
||||||
login_payload = {
|
|
||||||
"credentialType": "email",
|
|
||||||
"emailUser": {
|
|
||||||
"email": credential.username,
|
|
||||||
"password": credential.password
|
|
||||||
}
|
|
||||||
}
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["login"],
|
|
||||||
json=login_payload,
|
|
||||||
headers={"authorization": f"Bearer {initial_jwt}"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
login_data = r.json()
|
|
||||||
self._jwt = login_data["jwt"]
|
|
||||||
self._user_id = login_data["userId"]
|
|
||||||
|
|
||||||
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
|
||||||
self.log.info(f"Successfully authenticated as {credential.username}")
|
|
||||||
|
|
||||||
self._fetch_user_details()
|
|
||||||
|
|
||||||
try:
|
|
||||||
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
|
||||||
exp_timestamp = decoded_jwt.get("exp")
|
|
||||||
|
|
||||||
cache_payload = {
|
|
||||||
"token": self._jwt,
|
|
||||||
"username": credential.username
|
|
||||||
}
|
|
||||||
|
|
||||||
if exp_timestamp:
|
|
||||||
expiration_in_seconds = int(exp_timestamp - datetime.now(timezone.utc).timestamp())
|
|
||||||
self.log.info(f"Caching token for {expiration_in_seconds / 60:.2f} minutes.")
|
|
||||||
cache.set(data=cache_payload, expiration=expiration_in_seconds)
|
|
||||||
else:
|
|
||||||
self.log.warning("JWT has no 'exp' claim, caching for 1 hour as a fallback.")
|
|
||||||
cache.set(data=cache_payload, expiration=3600)
|
|
||||||
except Exception as e:
|
|
||||||
self.log.error(f"Failed to decode JWT for caching: {e}. Caching for 1 hour as a fallback.")
|
|
||||||
cache.set(
|
|
||||||
data={"token": self._jwt, "username": credential.username},
|
|
||||||
expiration=3600
|
|
||||||
)
|
|
||||||
|
|
||||||
def _fetch_user_details(self):
|
|
||||||
self.log.info("Fetching user library memberships...")
|
|
||||||
r = self.session.get(self.config["endpoints"]["memberships"].format(user_id=self._user_id))
|
|
||||||
r.raise_for_status()
|
|
||||||
memberships = r.json()
|
|
||||||
|
|
||||||
for membership in memberships.get("list", []):
|
|
||||||
if membership.get("status") == "active" and membership.get("isDefault", False):
|
|
||||||
self._domain_id = str(membership["domainId"])
|
|
||||||
self.log.info(f"Using default library domain: {membership.get('sitename', 'Unknown')} (ID: {self._domain_id})")
|
|
||||||
return
|
|
||||||
|
|
||||||
if memberships.get("list"):
|
|
||||||
self._domain_id = str(memberships["list"][0]["domainId"])
|
|
||||||
self.log.warning(f"No default library found. Using first active domain: {self._domain_id}")
|
|
||||||
else:
|
|
||||||
raise ValueError("No active library memberships found for this user.")
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
if not self.content_id:
|
|
||||||
raise ValueError("A content ID is required to get titles. Use a URL or run a search first.")
|
|
||||||
if not self._domain_id:
|
|
||||||
raise ValueError("Domain ID not set. Authentication may have failed.")
|
|
||||||
|
|
||||||
r = self.session.get(self.config["endpoints"]["video_info"].format(video_id=self.content_id, domain_id=self._domain_id))
|
|
||||||
r.raise_for_status()
|
|
||||||
content_data = r.json()
|
|
||||||
|
|
||||||
content_type = content_data.get("type")
|
|
||||||
|
|
||||||
def parse_lang(data):
|
|
||||||
try:
|
|
||||||
langs = data.get("languages", [])
|
|
||||||
if langs and isinstance(langs, list) and len(langs) > 0:
|
|
||||||
return Language.find(langs[0])
|
|
||||||
except:
|
|
||||||
pass
|
|
||||||
return Language.get("en")
|
|
||||||
|
|
||||||
if content_type == "video":
|
|
||||||
video_data = content_data["video"]
|
|
||||||
movie = Movie(
|
|
||||||
id_=str(video_data["videoId"]),
|
|
||||||
service=self.__class__,
|
|
||||||
name=video_data["title"],
|
|
||||||
year=video_data.get("productionYear"),
|
|
||||||
description=video_data.get("descriptionHtml", ""),
|
|
||||||
language=parse_lang(video_data),
|
|
||||||
data=video_data,
|
|
||||||
)
|
|
||||||
return Movies([movie])
|
|
||||||
|
|
||||||
elif content_type == "playlist":
|
|
||||||
playlist_data = content_data["playlist"]
|
|
||||||
series_title = playlist_data["title"]
|
|
||||||
series_year = playlist_data.get("productionYear")
|
|
||||||
|
|
||||||
season_match = re.search(r'(?:Season|S)\s*(\d+)', series_title, re.IGNORECASE)
|
|
||||||
season_num = int(season_match.group(1)) if season_match else 1
|
|
||||||
|
|
||||||
r = self.session.get(self.config["endpoints"]["video_items"].format(video_id=self.content_id, domain_id=self._domain_id))
|
|
||||||
r.raise_for_status()
|
|
||||||
items_data = r.json()
|
|
||||||
|
|
||||||
episodes = []
|
|
||||||
for i, item in enumerate(items_data.get("list", [])):
|
|
||||||
if item.get("type") != "video":
|
|
||||||
continue
|
|
||||||
|
|
||||||
video_data = item["video"]
|
|
||||||
ep_num = i + 1
|
|
||||||
|
|
||||||
ep_title = video_data.get("title", "")
|
|
||||||
ep_match = re.search(r'Ep(?:isode)?\.?\s*(\d+)', ep_title, re.IGNORECASE)
|
|
||||||
if ep_match:
|
|
||||||
ep_num = int(ep_match.group(1))
|
|
||||||
|
|
||||||
episodes.append(
|
|
||||||
Episode(
|
|
||||||
id_=str(video_data["videoId"]),
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_title,
|
|
||||||
season=season_num,
|
|
||||||
number=ep_num,
|
|
||||||
name=video_data["title"],
|
|
||||||
description=video_data.get("descriptionHtml", ""),
|
|
||||||
year=video_data.get("productionYear", series_year),
|
|
||||||
language=parse_lang(video_data),
|
|
||||||
data=video_data,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
series = Series(episodes)
|
|
||||||
series.name = series_title
|
|
||||||
series.description = playlist_data.get("descriptionHtml", "")
|
|
||||||
series.year = series_year
|
|
||||||
return series
|
|
||||||
|
|
||||||
else:
|
|
||||||
raise ValueError(f"Unsupported content type: {content_type}")
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
play_payload = {
|
|
||||||
"videoId": int(title.id),
|
|
||||||
"domainId": int(self._domain_id),
|
|
||||||
"userId": int(self._user_id),
|
|
||||||
"visitorId": self._visitor_id
|
|
||||||
}
|
|
||||||
|
|
||||||
self.session.headers.setdefault("authorization", f"Bearer {self._jwt}")
|
|
||||||
self.session.headers.setdefault("x-version", self.API_VERSION)
|
|
||||||
self.session.headers.setdefault("user-agent", self.USER_AGENT)
|
|
||||||
|
|
||||||
r = self.session.post(self.config["endpoints"]["plays"], json=play_payload)
|
|
||||||
response_json = None
|
|
||||||
try:
|
|
||||||
response_json = r.json()
|
|
||||||
except Exception:
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Handle known errors gracefully
|
|
||||||
if r.status_code == 403:
|
|
||||||
if response_json and response_json.get("errorSubcode") == "playRegionRestricted":
|
|
||||||
self.log.error("Kanopy reports: This video is not available in your country.")
|
|
||||||
raise PermissionError(
|
|
||||||
"Playback blocked by region restriction. Try connecting through a supported country or verify your library’s access region."
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
self.log.error(f"Access forbidden (HTTP 403). Response: {response_json}")
|
|
||||||
raise PermissionError("Kanopy denied access to this video. It may require a different library membership or authentication.")
|
|
||||||
|
|
||||||
# Raise for any other HTTP errors
|
|
||||||
r.raise_for_status()
|
|
||||||
play_data = response_json or r.json()
|
|
||||||
|
|
||||||
manifest_url = None
|
|
||||||
for manifest in play_data.get("manifests", []):
|
|
||||||
if manifest["manifestType"] == "dash":
|
|
||||||
url = manifest["url"]
|
|
||||||
manifest_url = f"https://kanopy.com{url}" if url.startswith("/") else url
|
|
||||||
drm_type = manifest.get("drmType")
|
|
||||||
if drm_type == "kanopyDrm":
|
|
||||||
play_id = play_data.get("playId")
|
|
||||||
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(license_id=f"{play_id}-0")
|
|
||||||
elif drm_type == "studioDrm":
|
|
||||||
license_id = manifest.get("drmLicenseID", f"{play_data.get('playId')}-1")
|
|
||||||
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(license_id=license_id)
|
|
||||||
else:
|
|
||||||
self.log.warning(f"Unknown drmType: {drm_type}")
|
|
||||||
self.widevine_license_url = None
|
|
||||||
break
|
|
||||||
|
|
||||||
if not manifest_url:
|
|
||||||
raise ValueError("Could not find a DASH manifest for this title.")
|
|
||||||
if not self.widevine_license_url:
|
|
||||||
raise ValueError("Could not construct Widevine license URL.")
|
|
||||||
|
|
||||||
self.log.info(f"Fetching DASH manifest from: {manifest_url}")
|
|
||||||
r = self.session.get(manifest_url)
|
|
||||||
r.raise_for_status()
|
|
||||||
|
|
||||||
# Refresh headers for manifest parsing
|
|
||||||
self.session.headers.clear()
|
|
||||||
self.session.headers.update({
|
|
||||||
"User-Agent": self.WIDEVINE_UA,
|
|
||||||
"Accept": "*/*",
|
|
||||||
"Accept-Encoding": "gzip, deflate",
|
|
||||||
"Connection": "keep-alive",
|
|
||||||
})
|
|
||||||
|
|
||||||
tracks = DASH.from_text(r.text, url=manifest_url).to_tracks(language=title.language)
|
|
||||||
for caption_data in play_data.get("captions", []):
|
|
||||||
lang = caption_data.get("language", "en")
|
|
||||||
for file_info in caption_data.get("files", []):
|
|
||||||
if file_info.get("type") == "webvtt":
|
|
||||||
tracks.add(Subtitle(
|
|
||||||
id_=f"caption-{lang}",
|
|
||||||
url=file_info["url"],
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
language=Language.get(lang)
|
|
||||||
))
|
|
||||||
break
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
|
|
||||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not self.widevine_license_url:
|
|
||||||
raise ValueError("Widevine license URL was not set. Call get_tracks first.")
|
|
||||||
|
|
||||||
license_headers = {
|
|
||||||
"Content-Type": "application/octet-stream",
|
|
||||||
"User-Agent": self.WIDEVINE_UA,
|
|
||||||
"Authorization": f"Bearer {self._jwt}",
|
|
||||||
"X-Version": self.API_VERSION
|
|
||||||
}
|
|
||||||
|
|
||||||
r = self.session.post(
|
|
||||||
self.widevine_license_url,
|
|
||||||
data=challenge,
|
|
||||||
headers=license_headers
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
return r.content
|
|
||||||
|
|
||||||
# def search(self) -> List[SearchResult]:
|
|
||||||
# if not hasattr(self, 'search_query'):
|
|
||||||
# self.log.error("Search query not set. Cannot search.")
|
|
||||||
# return []
|
|
||||||
|
|
||||||
# self.log.info(f"Searching for '{self.search_query}'...")
|
|
||||||
# params = {
|
|
||||||
# "query": self.search_query,
|
|
||||||
# "sort": "relevance",
|
|
||||||
# "domainId": self._domain_id,
|
|
||||||
# "page": 0,
|
|
||||||
# "perPage": 20
|
|
||||||
# }
|
|
||||||
# r = self.session.get(self.config["endpoints"]["search"], params=params)
|
|
||||||
# r.raise_for_status()
|
|
||||||
# search_data = r.json()
|
|
||||||
|
|
||||||
# results = []
|
|
||||||
# for item in search_data.get("list", []):
|
|
||||||
# item_type = item.get("type")
|
|
||||||
# if item_type not in ["playlist", "video"]:
|
|
||||||
# continue
|
|
||||||
|
|
||||||
# video_id = item.get("videoId")
|
|
||||||
# title = item.get("title", "No Title")
|
|
||||||
# label = "Series" if item_type == "playlist" else "Movie"
|
|
||||||
|
|
||||||
# results.append(
|
|
||||||
# SearchResult(
|
|
||||||
# id_=str(video_id),
|
|
||||||
# title=title,
|
|
||||||
# description="",
|
|
||||||
# label=label,
|
|
||||||
# url=f"https://www.kanopy.com/watch/{video_id}"
|
|
||||||
# )
|
|
||||||
# )
|
|
||||||
# return results
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list:
|
|
||||||
return []
|
|
||||||
@ -1,15 +0,0 @@
|
|||||||
client:
|
|
||||||
api_version: "Android/com.kanopy/6.21.0/952 (SM-A525F; Android 15)"
|
|
||||||
user_agent: "okhttp/5.2.1"
|
|
||||||
widevine_ua: "KanopyApplication/6.21.0 (Linux;Android 15) AndroidXMedia3/1.8.0"
|
|
||||||
|
|
||||||
endpoints:
|
|
||||||
handshake: "https://kanopy.com/kapi/handshake"
|
|
||||||
login: "https://kanopy.com/kapi/login"
|
|
||||||
memberships: "https://kanopy.com/kapi/memberships?userId={user_id}"
|
|
||||||
video_info: "https://kanopy.com/kapi/videos/{video_id}?domainId={domain_id}"
|
|
||||||
video_items: "https://kanopy.com/kapi/videos/{video_id}/items?domainId={domain_id}"
|
|
||||||
search: "https://kanopy.com/kapi/search/videos"
|
|
||||||
plays: "https://kanopy.com/kapi/plays"
|
|
||||||
access_expires_in: "https://kanopy.com/kapi/users/{user_id}/history/videos/{video_id}/access_expires_in?domainId={domain_id}"
|
|
||||||
widevine_license: "https://kanopy.com/kapi/licenses/widevine/{license_id}"
|
|
||||||
297
KOWP/__init__.py
297
KOWP/__init__.py
@ -1,297 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from typing import Optional, List, Dict, Any
|
|
||||||
|
|
||||||
import click
|
|
||||||
from langcodes import Language
|
|
||||||
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.search_result import SearchResult
|
|
||||||
from unshackle.core.titles import Episode, Series, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Subtitle, Tracks
|
|
||||||
from unshackle.core.utilities import is_close_match
|
|
||||||
|
|
||||||
class KOWP(Service):
|
|
||||||
"""
|
|
||||||
Service code for Kocowa Plus (kocowa.com).
|
|
||||||
Version: 1.0.0
|
|
||||||
|
|
||||||
Auth: Credential (username + password)
|
|
||||||
Security: FHD@L3
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = r"^(?:https?://(?:www\.)?kocowa\.com/[^/]+/season/)?(?P<title_id>\d+)"
|
|
||||||
GEOFENCE = ()
|
|
||||||
NO_SUBTITLES = False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="kowp", short_help="https://www.kocowa.com")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.option("--extras", is_flag=True, default=False, help="Include teasers/extras")
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return KOWP(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str, extras: bool = False):
|
|
||||||
super().__init__(ctx)
|
|
||||||
match = re.match(self.TITLE_RE, title)
|
|
||||||
if match:
|
|
||||||
self.title_id = match.group("title_id")
|
|
||||||
else:
|
|
||||||
self.title_id = title # fallback to use as search keyword
|
|
||||||
self.include_extras = extras
|
|
||||||
self.brightcove_account_id = None
|
|
||||||
self.brightcove_pk = None
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
if not credential:
|
|
||||||
raise ValueError("KOWP requires username and password")
|
|
||||||
|
|
||||||
payload = {
|
|
||||||
"username": credential.username,
|
|
||||||
"password": credential.password,
|
|
||||||
"device_id": f"{credential.username}_browser",
|
|
||||||
"device_type": "browser",
|
|
||||||
"device_model": "Firefox",
|
|
||||||
"device_version": "firefox/143.0",
|
|
||||||
"push_token": None,
|
|
||||||
"app_version": "v4.0.16",
|
|
||||||
}
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["login"],
|
|
||||||
json=payload,
|
|
||||||
headers={"Authorization": "anonymous", "Origin": "https://www.kocowa.com"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
res = r.json()
|
|
||||||
if res.get("code") != "0000":
|
|
||||||
raise PermissionError(f"Login failed: {res.get('message')}")
|
|
||||||
|
|
||||||
self.access_token = res["object"]["access_token"]
|
|
||||||
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["middleware_auth"],
|
|
||||||
json={"token": f"wA-Auth.{self.access_token}"},
|
|
||||||
headers={"Origin": "https://www.kocowa.com"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
self.middleware_token = r.json()["token"]
|
|
||||||
|
|
||||||
self._fetch_brightcove_config()
|
|
||||||
|
|
||||||
def _fetch_brightcove_config(self):
|
|
||||||
"""Fetch Brightcove account_id and policy_key from Kocowa's public config endpoint."""
|
|
||||||
try:
|
|
||||||
r = self.session.get(
|
|
||||||
"https://middleware.bcmw.kocowa.com/api/config",
|
|
||||||
headers={
|
|
||||||
"Origin": "https://www.kocowa.com",
|
|
||||||
"Referer": "https://www.kocowa.com/",
|
|
||||||
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36 Edg/142.0.0.0"
|
|
||||||
}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
config = r.json()
|
|
||||||
|
|
||||||
self.brightcove_account_id = config.get("VC_ACCOUNT_ID")
|
|
||||||
self.brightcove_pk = config.get("BCOV_POLICY_KEY")
|
|
||||||
|
|
||||||
if not self.brightcove_account_id:
|
|
||||||
raise ValueError("VC_ACCOUNT_ID missing in /api/config response")
|
|
||||||
if not self.brightcove_pk:
|
|
||||||
raise ValueError("BCOV_POLICY_KEY missing in /api/config response")
|
|
||||||
|
|
||||||
self.log.info(f"Brightcove config loaded: account_id={self.brightcove_account_id}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
raise RuntimeError(f"Failed to fetch or parse Brightcove config: {e}")
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
all_episodes = []
|
|
||||||
offset = 0
|
|
||||||
limit = 20
|
|
||||||
series_title = None # Store the title from the first request
|
|
||||||
|
|
||||||
while True:
|
|
||||||
url = self.config["endpoints"]["metadata"].format(title_id=self.title_id)
|
|
||||||
sep = "&" if "?" in url else "?"
|
|
||||||
url += f"{sep}offset={offset}&limit={limit}"
|
|
||||||
|
|
||||||
r = self.session.get(
|
|
||||||
url,
|
|
||||||
headers={"Authorization": self.access_token, "Origin": "https://www.kocowa.com"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
data = r.json()["object"]
|
|
||||||
|
|
||||||
# Extract the series title only from the very first page
|
|
||||||
if series_title is None and "meta" in data:
|
|
||||||
series_title = data["meta"]["title"]["en"]
|
|
||||||
|
|
||||||
page_objects = data.get("next_episodes", {}).get("objects", [])
|
|
||||||
if not page_objects:
|
|
||||||
break
|
|
||||||
|
|
||||||
for ep in page_objects:
|
|
||||||
is_episode = ep.get("detail_type") == "episode"
|
|
||||||
is_extra = ep.get("detail_type") in ("teaser", "extra")
|
|
||||||
if is_episode or (self.include_extras and is_extra):
|
|
||||||
all_episodes.append(ep)
|
|
||||||
|
|
||||||
offset += limit
|
|
||||||
total = data.get("next_episodes", {}).get("total_count", 0)
|
|
||||||
if len(all_episodes) >= total or len(page_objects) < limit:
|
|
||||||
break
|
|
||||||
|
|
||||||
# If we never got the series title, exit with an error
|
|
||||||
if series_title is None:
|
|
||||||
raise ValueError("Could not retrieve series metadata to get the title.")
|
|
||||||
|
|
||||||
episodes = []
|
|
||||||
for ep in all_episodes:
|
|
||||||
meta = ep["meta"]
|
|
||||||
ep_type = "Episode" if ep["detail_type"] == "episode" else ep["detail_type"].capitalize()
|
|
||||||
ep_num = meta.get("episode_number", 0)
|
|
||||||
title = meta["title"].get("en") or f"{ep_type} {ep_num}"
|
|
||||||
desc = meta["description"].get("en") or ""
|
|
||||||
|
|
||||||
episodes.append(
|
|
||||||
Episode(
|
|
||||||
id_=str(ep["id"]),
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_title,
|
|
||||||
season=meta.get("season_number", 1),
|
|
||||||
number=ep_num,
|
|
||||||
name=title,
|
|
||||||
description=desc,
|
|
||||||
year=None,
|
|
||||||
language=Language.get("en"),
|
|
||||||
data=ep,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
return Series(episodes)
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
# Authorize playback
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["authorize"].format(episode_id=title.id),
|
|
||||||
headers={"Authorization": f"Bearer {self.middleware_token}"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
auth_data = r.json()
|
|
||||||
if not auth_data.get("Success"):
|
|
||||||
raise PermissionError("Playback authorization failed")
|
|
||||||
self.playback_token = auth_data["token"]
|
|
||||||
|
|
||||||
# Fetch Brightcove manifest
|
|
||||||
manifest_url = (
|
|
||||||
f"https://edge.api.brightcove.com/playback/v1/accounts/{self.brightcove_account_id}/videos/ref:{title.id}"
|
|
||||||
)
|
|
||||||
r = self.session.get(
|
|
||||||
manifest_url,
|
|
||||||
headers={"Accept": f"application/json;pk={self.brightcove_pk}"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
manifest = r.json()
|
|
||||||
|
|
||||||
# Get DASH URL + Widevine license
|
|
||||||
dash_url = widevine_url = None
|
|
||||||
for src in manifest.get("sources", []):
|
|
||||||
if src.get("type") == "application/dash+xml":
|
|
||||||
dash_url = src["src"]
|
|
||||||
widevine_url = (
|
|
||||||
src.get("key_systems", {})
|
|
||||||
.get("com.widevine.alpha", {})
|
|
||||||
.get("license_url")
|
|
||||||
)
|
|
||||||
if dash_url and widevine_url:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not dash_url or not widevine_url:
|
|
||||||
raise ValueError("No Widevine DASH stream found")
|
|
||||||
|
|
||||||
self.widevine_license_url = widevine_url
|
|
||||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
for sub in manifest.get("text_tracks", []):
|
|
||||||
srclang = sub.get("srclang")
|
|
||||||
if not srclang or srclang == "thumbnails":
|
|
||||||
continue
|
|
||||||
|
|
||||||
subtitle_track = Subtitle(
|
|
||||||
id_=sub["id"],
|
|
||||||
url=sub["src"],
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
language=Language.get(srclang),
|
|
||||||
sdh=True, # Kocowa subs are SDH - mark them as such
|
|
||||||
forced=False,
|
|
||||||
)
|
|
||||||
tracks.add(subtitle_track)
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
r = self.session.post(
|
|
||||||
self.widevine_license_url,
|
|
||||||
data=challenge,
|
|
||||||
headers={
|
|
||||||
"BCOV-Auth": self.playback_token,
|
|
||||||
"Content-Type": "application/octet-stream",
|
|
||||||
"Origin": "https://www.kocowa.com",
|
|
||||||
"Referer": "https://www.kocowa.com/",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
return r.content
|
|
||||||
|
|
||||||
def search(self) -> List[SearchResult]:
|
|
||||||
url = "https://prod-fms.kocowa.com/api/v01/fe/gks/autocomplete"
|
|
||||||
params = {
|
|
||||||
"search_category": "All",
|
|
||||||
"search_input": self.title_id,
|
|
||||||
"include_webtoon": "true",
|
|
||||||
}
|
|
||||||
|
|
||||||
r = self.session.get(
|
|
||||||
url,
|
|
||||||
params=params,
|
|
||||||
headers={
|
|
||||||
"Authorization": self.access_token,
|
|
||||||
"Origin": "https://www.kocowa.com ",
|
|
||||||
"Referer": "https://www.kocowa.com/ ",
|
|
||||||
}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
response = r.json()
|
|
||||||
contents = response.get("object", {}).get("contents", [])
|
|
||||||
|
|
||||||
results = []
|
|
||||||
for item in contents:
|
|
||||||
if item.get("detail_type") != "season":
|
|
||||||
continue
|
|
||||||
|
|
||||||
meta = item["meta"]
|
|
||||||
title_en = meta["title"].get("en") or "[No Title]"
|
|
||||||
description_en = meta["description"].get("en") or ""
|
|
||||||
show_id = str(item["id"])
|
|
||||||
|
|
||||||
results.append(
|
|
||||||
SearchResult(
|
|
||||||
id_=show_id,
|
|
||||||
title=title_en,
|
|
||||||
description=description_en,
|
|
||||||
label="season",
|
|
||||||
url=f"https://www.kocowa.com/en_us/season/{show_id}/"
|
|
||||||
)
|
|
||||||
)
|
|
||||||
return results
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list:
|
|
||||||
return []
|
|
||||||
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
login: "https://prod-sgwv3.kocowa.com/api/v01/user/signin"
|
|
||||||
middleware_auth: "https://middleware.bcmw.kocowa.com/authenticate-user"
|
|
||||||
metadata: "https://prod-fms.kocowa.com/api/v01/fe/content/get?id={title_id}"
|
|
||||||
authorize: "https://middleware.bcmw.kocowa.com/api/playback/authorize/{episode_id}"
|
|
||||||
396
MUBI/__init__.py
396
MUBI/__init__.py
@ -1,396 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
import uuid
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from typing import Optional, Generator
|
|
||||||
from langcodes import Language
|
|
||||||
import base64
|
|
||||||
import click
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Episode, Movie, Movies, Title_T, Titles_T, Series
|
|
||||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
|
||||||
|
|
||||||
|
|
||||||
class MUBI(Service):
|
|
||||||
"""
|
|
||||||
Service code for MUBI (mubi.com)
|
|
||||||
Version: 1.2.0
|
|
||||||
|
|
||||||
Authorization: Required cookies (lt token + session)
|
|
||||||
Security: FHD @ L3 (Widevine)
|
|
||||||
|
|
||||||
Supports:
|
|
||||||
• Series ↦ https://mubi.com/en/nl/series/twin-peaks
|
|
||||||
• Movies ↦ https://mubi.com/en/nl/films/the-substance
|
|
||||||
|
|
||||||
"""
|
|
||||||
SERIES_TITLE_RE = r"^https?://(?:www\.)?mubi\.com(?:/[^/]+)*?/series/(?P<series_slug>[^/]+)(?:/season/(?P<season_slug>[^/]+))?$"
|
|
||||||
TITLE_RE = r"^(?:https?://(?:www\.)?mubi\.com)(?:/[^/]+)*?/films/(?P<slug>[^/?#]+)$"
|
|
||||||
NO_SUBTITLES = False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="MUBI", short_help="https://mubi.com")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return MUBI(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
|
|
||||||
m_film = re.match(self.TITLE_RE, title)
|
|
||||||
m_series = re.match(self.SERIES_TITLE_RE, title)
|
|
||||||
|
|
||||||
if not m_film and not m_series:
|
|
||||||
raise ValueError(f"Invalid MUBI URL: {title}")
|
|
||||||
|
|
||||||
self.is_series = bool(m_series)
|
|
||||||
self.slug = m_film.group("slug") if m_film else None
|
|
||||||
self.series_slug = m_series.group("series_slug") if m_series else None
|
|
||||||
self.season_slug = m_series.group("season_slug") if m_series else None
|
|
||||||
|
|
||||||
self.film_id: Optional[int] = None
|
|
||||||
self.lt_token: Optional[str] = None
|
|
||||||
self.session_token: Optional[str] = None
|
|
||||||
self.user_id: Optional[int] = None
|
|
||||||
self.country_code: Optional[str] = None
|
|
||||||
self.anonymous_user_id: Optional[str] = None
|
|
||||||
self.default_country: Optional[str] = None
|
|
||||||
self.reels_data: Optional[list] = None
|
|
||||||
|
|
||||||
# Store CDM reference
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
|
|
||||||
if self.config is None:
|
|
||||||
raise EnvironmentError("Missing service config for MUBI.")
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
super().authenticate(cookies, credential)
|
|
||||||
|
|
||||||
try:
|
|
||||||
r_ip = self.session.get(self.config["endpoints"]["ip_geolocation"], timeout=5)
|
|
||||||
r_ip.raise_for_status()
|
|
||||||
ip_data = r_ip.json()
|
|
||||||
if ip_data.get("country"):
|
|
||||||
self.default_country = ip_data["country"]
|
|
||||||
self.log.debug(f"Detected country from IP: {self.default_country}")
|
|
||||||
else:
|
|
||||||
self.log.warning("IP geolocation response did not contain a country code.")
|
|
||||||
except Exception as e:
|
|
||||||
raise ValueError(f"Failed to fetch IP geolocation: {e}")
|
|
||||||
|
|
||||||
if not cookies:
|
|
||||||
raise PermissionError("MUBI requires login cookies.")
|
|
||||||
|
|
||||||
# Extract essential tokens
|
|
||||||
lt_cookie = next((c for c in cookies if c.name == "lt"), None)
|
|
||||||
session_cookie = next((c for c in cookies if c.name == "_mubi_session"), None)
|
|
||||||
snow_id_cookie = next((c for c in cookies if c.name == "_snow_id.c006"), None)
|
|
||||||
|
|
||||||
if not lt_cookie:
|
|
||||||
raise PermissionError("Missing 'lt' cookie (Bearer token).")
|
|
||||||
if not session_cookie:
|
|
||||||
raise PermissionError("Missing '_mubi_session' cookie.")
|
|
||||||
|
|
||||||
self.lt_token = lt_cookie.value
|
|
||||||
self.session_token = session_cookie.value
|
|
||||||
|
|
||||||
# Extract anonymous_user_id from _snow_id.c006
|
|
||||||
if snow_id_cookie and "." in snow_id_cookie.value:
|
|
||||||
self.anonymous_user_id = snow_id_cookie.value.split(".")[0]
|
|
||||||
else:
|
|
||||||
self.anonymous_user_id = str(uuid.uuid4())
|
|
||||||
self.log.warning(f"No _snow_id.c006 cookie found — generated new anonymous_user_id: {self.anonymous_user_id}")
|
|
||||||
|
|
||||||
base_headers = {
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) Firefox/143.0",
|
|
||||||
"Origin": "https://mubi.com",
|
|
||||||
"Referer": "https://mubi.com/",
|
|
||||||
"CLIENT": "web",
|
|
||||||
"Client-Accept-Video-Codecs": "h265,vp9,h264",
|
|
||||||
"Client-Accept-Audio-Codecs": "aac",
|
|
||||||
"Authorization": f"Bearer {self.lt_token}",
|
|
||||||
"ANONYMOUS_USER_ID": self.anonymous_user_id,
|
|
||||||
"Client-Country": self.default_country,
|
|
||||||
"Sec-Fetch-Dest": "empty",
|
|
||||||
"Sec-Fetch-Mode": "cors",
|
|
||||||
"Sec-Fetch-Site": "same-site",
|
|
||||||
"Pragma": "no-cache",
|
|
||||||
"Cache-Control": "no-cache",
|
|
||||||
}
|
|
||||||
|
|
||||||
self.session.headers.update(base_headers)
|
|
||||||
|
|
||||||
r_account = self.session.get(self.config["endpoints"]["account"])
|
|
||||||
if not r_account.ok:
|
|
||||||
raise PermissionError(f"Failed to fetch MUBI account: {r_account.status_code} {r_account.text}")
|
|
||||||
|
|
||||||
account_data = r_account.json()
|
|
||||||
self.user_id = account_data.get("id")
|
|
||||||
self.country_code = (account_data.get("country") or {}).get("code", "NL")
|
|
||||||
|
|
||||||
self.session.headers["Client-Country"] = self.country_code
|
|
||||||
self.GEOFENCE = (self.country_code,)
|
|
||||||
|
|
||||||
self._bind_anonymous_user()
|
|
||||||
|
|
||||||
self.log.info(
|
|
||||||
f"Authenticated as user {self.user_id}, "
|
|
||||||
f"country: {self.country_code}, "
|
|
||||||
f"anonymous_id: {self.anonymous_user_id}"
|
|
||||||
)
|
|
||||||
|
|
||||||
def _bind_anonymous_user(self):
|
|
||||||
try:
|
|
||||||
r = self.session.put(
|
|
||||||
self.config["endpoints"]["current_user"],
|
|
||||||
json={"anonymous_user_uuid": self.anonymous_user_id},
|
|
||||||
headers={"Content-Type": "application/json"}
|
|
||||||
)
|
|
||||||
if r.ok:
|
|
||||||
self.log.debug("Anonymous user ID successfully bound to account.")
|
|
||||||
else:
|
|
||||||
self.log.warning(f"Failed to bind anonymous_user_uuid: {r.status_code}")
|
|
||||||
except Exception as e:
|
|
||||||
self.log.warning(f"Exception while binding anonymous_user_uuid: {e}")
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
if self.is_series:
|
|
||||||
return self._get_series_titles()
|
|
||||||
else:
|
|
||||||
return self._get_film_title()
|
|
||||||
|
|
||||||
def _get_film_title(self) -> Movies:
|
|
||||||
url = self.config["endpoints"]["film_by_slug"].format(slug=self.slug)
|
|
||||||
r = self.session.get(url)
|
|
||||||
r.raise_for_status()
|
|
||||||
data = r.json()
|
|
||||||
|
|
||||||
self.film_id = data["id"]
|
|
||||||
|
|
||||||
# Fetch reels to get definitive language code and cache the response
|
|
||||||
url_reels = self.config["endpoints"]["reels"].format(film_id=self.film_id)
|
|
||||||
r_reels = self.session.get(url_reels)
|
|
||||||
r_reels.raise_for_status()
|
|
||||||
self.reels_data = r_reels.json()
|
|
||||||
|
|
||||||
# Extract original language from the first audio track of the first reel
|
|
||||||
original_language_code = "en" # Default fallback
|
|
||||||
if self.reels_data and self.reels_data[0].get("audio_tracks"):
|
|
||||||
first_audio_track = self.reels_data[0]["audio_tracks"][0]
|
|
||||||
if "language_code" in first_audio_track:
|
|
||||||
original_language_code = first_audio_track["language_code"]
|
|
||||||
self.log.debug(f"Detected original language from reels: '{original_language_code}'")
|
|
||||||
|
|
||||||
genres = ", ".join(data.get("genres", [])) or "Unknown"
|
|
||||||
description = (
|
|
||||||
data.get("default_editorial_html", "")
|
|
||||||
.replace("<p>", "").replace("</p>", "").replace("<em>", "").replace("</em>", "").strip()
|
|
||||||
)
|
|
||||||
year = data.get("year")
|
|
||||||
name = data.get("title", "Unknown")
|
|
||||||
|
|
||||||
movie = Movie(
|
|
||||||
id_=self.film_id,
|
|
||||||
service=self.__class__,
|
|
||||||
name=name,
|
|
||||||
year=year,
|
|
||||||
description=description,
|
|
||||||
language=Language.get(original_language_code),
|
|
||||||
data=data,
|
|
||||||
)
|
|
||||||
|
|
||||||
return Movies([movie])
|
|
||||||
|
|
||||||
def _get_series_titles(self) -> Titles_T:
|
|
||||||
# Fetch series metadata
|
|
||||||
series_url = self.config["endpoints"]["series"].format(series_slug=self.series_slug)
|
|
||||||
r_series = self.session.get(series_url)
|
|
||||||
r_series.raise_for_status()
|
|
||||||
series_data = r_series.json()
|
|
||||||
|
|
||||||
episodes = []
|
|
||||||
|
|
||||||
# If season is explicitly specified, only fetch that season
|
|
||||||
if self.season_slug:
|
|
||||||
eps_url = self.config["endpoints"]["season_episodes"].format(
|
|
||||||
series_slug=self.series_slug,
|
|
||||||
season_slug=self.season_slug
|
|
||||||
)
|
|
||||||
r_eps = self.session.get(eps_url)
|
|
||||||
if r_eps.status_code == 404:
|
|
||||||
raise ValueError(f"Season '{self.season_slug}' not found.")
|
|
||||||
r_eps.raise_for_status()
|
|
||||||
episodes_data = r_eps.json().get("episodes", [])
|
|
||||||
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
|
||||||
else:
|
|
||||||
# No season specified fetch ALL seasons
|
|
||||||
seasons = series_data.get("seasons", [])
|
|
||||||
if not seasons:
|
|
||||||
raise ValueError("No seasons found for this series.")
|
|
||||||
|
|
||||||
for season in seasons:
|
|
||||||
season_slug = season["slug"]
|
|
||||||
eps_url = self.config["endpoints"]["season_episodes"].format(
|
|
||||||
series_slug=self.series_slug,
|
|
||||||
season_slug=season_slug
|
|
||||||
)
|
|
||||||
|
|
||||||
self.log.debug(f"Fetching episodes for season: {season_slug}")
|
|
||||||
|
|
||||||
r_eps = self.session.get(eps_url)
|
|
||||||
|
|
||||||
# Stop if season returns 404 or empty
|
|
||||||
if r_eps.status_code == 404:
|
|
||||||
self.log.info(f"Season '{season_slug}' not available, skipping.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
r_eps.raise_for_status()
|
|
||||||
episodes_data = r_eps.json().get("episodes", [])
|
|
||||||
|
|
||||||
if not episodes_data:
|
|
||||||
self.log.info(f"No episodes found in season '{season_slug}'.")
|
|
||||||
continue
|
|
||||||
|
|
||||||
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
|
||||||
|
|
||||||
from unshackle.core.titles import Series
|
|
||||||
return Series(sorted(episodes, key=lambda x: (x.season, x.number)))
|
|
||||||
|
|
||||||
def _add_episodes_to_list(self, episodes_list: list, episodes_data: list, series_data: dict):
|
|
||||||
"""Helper to avoid code duplication when adding episodes."""
|
|
||||||
for ep in episodes_data:
|
|
||||||
# Use episode's own language detection via its consumable.playback_languages
|
|
||||||
playback_langs = ep.get("consumable", {}).get("playback_languages", {})
|
|
||||||
audio_langs = playback_langs.get("audio_options", ["English"])
|
|
||||||
lang_code = audio_langs[0].split()[0].lower() if audio_langs else "en"
|
|
||||||
|
|
||||||
try:
|
|
||||||
detected_lang = Language.get(lang_code)
|
|
||||||
except:
|
|
||||||
detected_lang = Language.get("en")
|
|
||||||
|
|
||||||
episodes_list.append(Episode(
|
|
||||||
id_=ep["id"],
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_data["title"], # Series title
|
|
||||||
season=ep["episode"]["season_number"],
|
|
||||||
number=ep["episode"]["number"],
|
|
||||||
name=ep["title"], # Episode title
|
|
||||||
description=ep.get("short_synopsis", ""),
|
|
||||||
language=detected_lang,
|
|
||||||
data=ep, # Full episode data for later use in get_tracks
|
|
||||||
))
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
film_id = getattr(title, "id", None)
|
|
||||||
if not film_id:
|
|
||||||
raise RuntimeError("Title ID not found.")
|
|
||||||
|
|
||||||
# For series episodes, we don't have reels cached, so skip reel-based logic
|
|
||||||
url_view = self.config["endpoints"]["initiate_viewing"].format(film_id=film_id)
|
|
||||||
r_view = self.session.post(url_view, json={}, headers={"Content-Type": "application/json"})
|
|
||||||
r_view.raise_for_status()
|
|
||||||
view_data = r_view.json()
|
|
||||||
reel_id = view_data["reel_id"]
|
|
||||||
|
|
||||||
# For films, use reels data for language/audio mapping
|
|
||||||
if not self.is_series:
|
|
||||||
if not self.film_id:
|
|
||||||
raise RuntimeError("film_id not set. Call get_titles() first.")
|
|
||||||
|
|
||||||
if not self.reels_data:
|
|
||||||
self.log.warning("Reels data not cached, fetching now.")
|
|
||||||
url_reels = self.config["endpoints"]["reels"].format(film_id=film_id)
|
|
||||||
r_reels = self.session.get(url_reels)
|
|
||||||
r_reels.raise_for_status()
|
|
||||||
reels = r_reels.json()
|
|
||||||
else:
|
|
||||||
reels = self.reels_data
|
|
||||||
|
|
||||||
reel = next((r for r in reels if r["id"] == reel_id), reels[0])
|
|
||||||
else:
|
|
||||||
# For episodes, we don’t need reel-based logic — just proceed
|
|
||||||
pass
|
|
||||||
|
|
||||||
# Request secure streaming URL, works for both films and episodes
|
|
||||||
url_secure = self.config["endpoints"]["secure_url"].format(film_id=film_id)
|
|
||||||
r_secure = self.session.get(url_secure)
|
|
||||||
r_secure.raise_for_status()
|
|
||||||
secure_data = r_secure.json()
|
|
||||||
|
|
||||||
manifest_url = None
|
|
||||||
for entry in secure_data.get("urls", []):
|
|
||||||
if entry.get("content_type") == "application/dash+xml":
|
|
||||||
manifest_url = entry["src"]
|
|
||||||
break
|
|
||||||
|
|
||||||
if not manifest_url:
|
|
||||||
raise ValueError("No DASH manifest URL found.")
|
|
||||||
|
|
||||||
# Parse DASH, use title.language as fallback
|
|
||||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
# Add subtitles
|
|
||||||
subtitles = []
|
|
||||||
for sub in secure_data.get("text_track_urls", []):
|
|
||||||
lang_code = sub.get("language_code", "und")
|
|
||||||
vtt_url = sub.get("url")
|
|
||||||
if not vtt_url:
|
|
||||||
continue
|
|
||||||
|
|
||||||
is_original = lang_code == title.language.language
|
|
||||||
|
|
||||||
subtitles.append(
|
|
||||||
Subtitle(
|
|
||||||
id_=sub["id"],
|
|
||||||
url=vtt_url,
|
|
||||||
language=Language.get(lang_code),
|
|
||||||
is_original_lang=is_original,
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
name=sub.get("display_name", lang_code.upper()),
|
|
||||||
forced=False,
|
|
||||||
sdh=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
tracks.subtitles = subtitles
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not self.user_id:
|
|
||||||
raise RuntimeError("user_id not set — authenticate first.")
|
|
||||||
|
|
||||||
dt_custom_data = {
|
|
||||||
"userId": self.user_id,
|
|
||||||
"sessionId": self.lt_token,
|
|
||||||
"merchant": "mubi"
|
|
||||||
}
|
|
||||||
|
|
||||||
dt_custom_data_b64 = base64.b64encode(json.dumps(dt_custom_data).encode()).decode()
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"Accept": "*/*",
|
|
||||||
"Origin": "https://mubi.com",
|
|
||||||
"Referer": "https://mubi.com/",
|
|
||||||
"dt-custom-data": dt_custom_data_b64,
|
|
||||||
}
|
|
||||||
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["license"],
|
|
||||||
data=challenge,
|
|
||||||
headers=headers,
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
license_data = r.json()
|
|
||||||
if license_data.get("status") != "OK":
|
|
||||||
raise PermissionError(f"DRM license error: {license_data}")
|
|
||||||
return base64.b64decode(license_data["license"])
|
|
||||||
|
|
||||||
@ -1,12 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
account: "https://api.mubi.com/v4/account"
|
|
||||||
current_user: "https://api.mubi.com/v4/current_user"
|
|
||||||
film_by_slug: "https://api.mubi.com/v4/films/{slug}"
|
|
||||||
playback_languages: "https://api.mubi.com/v4/films/{film_id}/playback_languages"
|
|
||||||
initiate_viewing: "https://api.mubi.com/v4/films/{film_id}/viewing?parental_lock_enabled=true"
|
|
||||||
reels: "https://api.mubi.com/v4/films/{film_id}/reels"
|
|
||||||
secure_url: "https://api.mubi.com/v4/films/{film_id}/viewing/secure_url"
|
|
||||||
license: "https://lic.drmtoday.com/license-proxy-widevine/cenc/"
|
|
||||||
ip_geolocation: "https://directory.cookieyes.com/api/v1/ip"
|
|
||||||
series: "https://api.mubi.com/v4/series/{series_slug}"
|
|
||||||
season_episodes: "https://api.mubi.com/v4/series/{series_slug}/seasons/{season_slug}/episodes/available"
|
|
||||||
456
NPO/__init__.py
456
NPO/__init__.py
@ -1,5 +1,8 @@
|
|||||||
import json
|
import json
|
||||||
import re
|
import re
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
import hashlib
|
||||||
from http.cookiejar import CookieJar
|
from http.cookiejar import CookieJar
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from langcodes import Language
|
from langcodes import Language
|
||||||
@ -12,7 +15,7 @@ from unshackle.core.credential import Credential
|
|||||||
from unshackle.core.manifests import DASH
|
from unshackle.core.manifests import DASH
|
||||||
from unshackle.core.service import Service
|
from unshackle.core.service import Service
|
||||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
from unshackle.core.tracks import Chapter, Tracks, Subtitle, Chapters
|
||||||
|
|
||||||
|
|
||||||
class NPO(Service):
|
class NPO(Service):
|
||||||
@ -27,19 +30,18 @@ class NPO(Service):
|
|||||||
|
|
||||||
Supports:
|
Supports:
|
||||||
• Series ↦ https://npo.nl/start/serie/{slug}
|
• Series ↦ https://npo.nl/start/serie/{slug}
|
||||||
• Movies ↦ https://npo.nl/start/video/{slug}
|
• Movies ↦ https://npo.nl/start/start/video/{slug}
|
||||||
|
|
||||||
Note: Movie inside a series can be downloaded as movie by converting URL to:
|
Note: Movie inside a series can be downloaded as movie by converting URL to:
|
||||||
https://npo.nl/start/video/slug
|
https://npo.nl/start/start/video/slug
|
||||||
|
|
||||||
To change between Widevine and Playready, you need to change the DrmType in config.yaml to either widevine or playready
|
To change between Widevine and Playready, you need to change the DrmType in config.yaml to either widevine or playready
|
||||||
"""
|
"""
|
||||||
|
|
||||||
TITLE_RE = (
|
TITLE_RE = (
|
||||||
r"^(?:https?://(?:www\.)?npo\.nl/start/)?"
|
r"^(?:https?://(?:www\.)?npo\.nl/start/)?"
|
||||||
r"(?:(?P<type>video|serie)/(?P<slug>[^/]+)"
|
r"(?:(?P<type>video|serie|afspelen)/(?P<slug>[^/]+)"
|
||||||
r"(?:/afleveringen)?"
|
r"(?:/(?P<path>.*))?)?$"
|
||||||
r"(?:/seizoen-(?P<season>[^/]+)/(?P<episode>[^/]+)/afspelen)?)?$"
|
|
||||||
)
|
)
|
||||||
GEOFENCE = ("NL",)
|
GEOFENCE = ("NL",)
|
||||||
NO_SUBTITLES = False
|
NO_SUBTITLES = False
|
||||||
@ -54,6 +56,11 @@ class NPO(Service):
|
|||||||
def __init__(self, ctx, title: str):
|
def __init__(self, ctx, title: str):
|
||||||
super().__init__(ctx)
|
super().__init__(ctx)
|
||||||
|
|
||||||
|
self.slug = None
|
||||||
|
self.kind = None
|
||||||
|
self.season_slug = None
|
||||||
|
self.episode_slug = None
|
||||||
|
|
||||||
m = re.match(self.TITLE_RE, title)
|
m = re.match(self.TITLE_RE, title)
|
||||||
if not m:
|
if not m:
|
||||||
self.search_term = title
|
self.search_term = title
|
||||||
@ -61,12 +68,35 @@ class NPO(Service):
|
|||||||
|
|
||||||
self.slug = m.group("slug")
|
self.slug = m.group("slug")
|
||||||
self.kind = m.group("type") or "video"
|
self.kind = m.group("type") or "video"
|
||||||
self.season_slug = m.group("season")
|
path = m.group("path") or ""
|
||||||
self.episode_slug = m.group("episode")
|
|
||||||
|
if self.kind == "afspelen":
|
||||||
|
self.kind = "video"
|
||||||
|
|
||||||
|
if "afleveringen" in path:
|
||||||
|
self.kind = "serie"
|
||||||
|
season_match = re.search(r"seizoen-([^/]+)", path)
|
||||||
|
if season_match:
|
||||||
|
self.season_slug = season_match.group(1)
|
||||||
|
|
||||||
|
episode_match = re.search(r"seizoen-([^/]+)/([^/]+)/afspelen", path)
|
||||||
|
if episode_match:
|
||||||
|
self.season_slug = episode_match.group(1)
|
||||||
|
self.episode_slug = episode_match.group(2)
|
||||||
|
|
||||||
|
self.original_title_url = title # Store the original URL for later use
|
||||||
|
|
||||||
if self.config is None:
|
if self.config is None:
|
||||||
raise EnvironmentError("Missing service config.")
|
raise EnvironmentError("Missing service config.")
|
||||||
|
|
||||||
|
# Construct X-Nos header
|
||||||
|
salt = int(time.time())
|
||||||
|
user_agent = f"nos;{salt};Google/Nexus;Android/6.0;nl.nos.app/5.1.1"
|
||||||
|
string_to_hash = f";UB}}7Gaji==JPHtjX3@c{user_agent}"
|
||||||
|
md5_hash = hashlib.md5(string_to_hash.encode('utf-8')).hexdigest()
|
||||||
|
xnos = md5_hash + base64.b64encode(user_agent.encode('utf-8')).decode('utf-8')
|
||||||
|
self.session.headers['X-Nos'] = xnos
|
||||||
|
|
||||||
# Store CDM reference
|
# Store CDM reference
|
||||||
self.cdm = ctx.obj.cdm
|
self.cdm = ctx.obj.cdm
|
||||||
|
|
||||||
@ -93,9 +123,12 @@ class NPO(Service):
|
|||||||
else:
|
else:
|
||||||
self.log.warning("NPO auth check failed.")
|
self.log.warning("NPO auth check failed.")
|
||||||
|
|
||||||
def _fetch_next_data(self, slug: str) -> dict:
|
def _fetch_next_data(self, slug: str, full_url: Optional[str] = None) -> dict:
|
||||||
"""Fetch and parse __NEXT_DATA__ from video/series page."""
|
"""Fetch and parse __NEXT_DATA__ from video/series page."""
|
||||||
url = f"https://npo.nl/start/{'video' if self.kind == 'video' else 'serie'}/{slug}"
|
if full_url:
|
||||||
|
url = full_url
|
||||||
|
else:
|
||||||
|
url = f"https://npo.nl/start/{'video' if self.kind == 'video' else 'serie'}/{slug}"
|
||||||
r = self.session.get(url)
|
r = self.session.get(url)
|
||||||
r.raise_for_status()
|
r.raise_for_status()
|
||||||
match = re.search(r'<script id="__NEXT_DATA__" type="application/json">({.*?})</script>', r.text, re.DOTALL)
|
match = re.search(r'<script id="__NEXT_DATA__" type="application/json">({.*?})</script>', r.text, re.DOTALL)
|
||||||
@ -103,58 +136,256 @@ class NPO(Service):
|
|||||||
raise RuntimeError("Failed to extract __NEXT_DATA__")
|
raise RuntimeError("Failed to extract __NEXT_DATA__")
|
||||||
return json.loads(match.group(1))
|
return json.loads(match.group(1))
|
||||||
|
|
||||||
|
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
license_url_base = self.config["endpoints"]["license"]
|
||||||
|
# Extract drmToken from track.data where the stream response was stored in get_tracks
|
||||||
|
npo_stream_data = track.data.get("npo_stream_data", {})
|
||||||
|
stream_details = npo_stream_data.get("stream", {})
|
||||||
|
drm_token = stream_details.get("drmToken") or stream_details.get("token")
|
||||||
|
|
||||||
|
if not drm_token:
|
||||||
|
raise ValueError("DRM token not found in title data for license request.")
|
||||||
|
|
||||||
|
# Construct the license_url with custom_data query parameter
|
||||||
|
license_url = f"{license_url_base}?custom_data={drm_token}"
|
||||||
|
|
||||||
|
# As per working DL.py script, only Content-Type is sent for license request
|
||||||
|
headers = {'Content-Type': 'application/octet-stream'}
|
||||||
|
|
||||||
|
self.log.debug(f"Requesting Widevine license from {license_url} (with custom_data) using minimal headers...")
|
||||||
|
|
||||||
|
# The challenge (Widevine PSSH) needs to be sent as the raw binary data.
|
||||||
|
r = self.session.post(license_url, data=challenge, headers=headers)
|
||||||
|
r.raise_for_status() # Raise an exception for HTTP errors
|
||||||
|
|
||||||
|
self.log.debug(f"Received Widevine license response (status: {r.status_code}, size: {len(r.content)} bytes)")
|
||||||
|
|
||||||
|
# The license response should be returned as raw bytes.
|
||||||
|
return r.content
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
def get_titles(self) -> Titles_T:
|
||||||
next_data = self._fetch_next_data(self.slug)
|
# Handle 'afspelen' URLs directly for specific episodes
|
||||||
build_id = next_data["buildId"] # keep if needed elsewhere
|
if self.kind == "video" and not self.season_slug and not self.episode_slug and self.original_title_url:
|
||||||
|
try:
|
||||||
|
# Use the original URL to fetch __NEXT_DATA__
|
||||||
|
next_data = self._fetch_next_data(self.slug, full_url=self.original_title_url)
|
||||||
|
|
||||||
page_props = next_data["props"]["pageProps"]
|
product_info = None
|
||||||
queries = page_props["dehydratedState"]["queries"]
|
# Check the main program data in pageProps
|
||||||
|
page_props = next_data.get("props", {}).get("pageProps", {})
|
||||||
|
if page_props:
|
||||||
|
program_data = page_props.get("program", {})
|
||||||
|
if program_data and program_data.get("productId"):
|
||||||
|
product_info = program_data
|
||||||
|
else:
|
||||||
|
# Fallback for video data, if not found in program
|
||||||
|
video_data = page_props.get("video", {})
|
||||||
|
if video_data and video_data.get("productId"):
|
||||||
|
product_info = video_data
|
||||||
|
|
||||||
def get_data(fragment: str):
|
# Fallback to dehydrated state queries if not found in pageProps directly
|
||||||
return next((q["state"]["data"] for q in queries if fragment in str(q.get("queryKey", ""))), None)
|
if product_info is None:
|
||||||
|
queries = next_data.get("props", {}).get("pageProps", {}).get("dehydratedState", {}).get("queries", [])
|
||||||
|
for item in queries:
|
||||||
|
state = item.get("state", {})
|
||||||
|
if state:
|
||||||
|
episode_data = state.get('data', {})
|
||||||
|
if isinstance(episode_data, dict) and episode_data.get('productId'):
|
||||||
|
product_info = episode_data
|
||||||
|
break
|
||||||
|
|
||||||
if self.kind == "serie":
|
if product_info and product_info.get("productId"):
|
||||||
series_data = get_data("series:detail-")
|
# Check if it's part of a series
|
||||||
if not series_data:
|
if product_info.get("series"):
|
||||||
raise ValueError("Series metadata not found")
|
season_number = product_info.get("season", {}).get("seasonKey")
|
||||||
|
if season_number is None and product_info.get("season", {}).get("slug"):
|
||||||
|
season_match = re.search(r"seizoen-(\d+)", product_info["season"]["slug"])
|
||||||
|
if season_match:
|
||||||
|
season_number = int(season_match.group(1))
|
||||||
|
|
||||||
|
return Series([
|
||||||
|
Episode(
|
||||||
|
id_=product_info["productId"],
|
||||||
|
service=self.__class__,
|
||||||
|
title=product_info["series"]["title"],
|
||||||
|
season=season_number,
|
||||||
|
number=product_info.get("programKey"),
|
||||||
|
name=product_info["title"],
|
||||||
|
description=(product_info.get("synopsis", {}) or {}).get("long", ""),
|
||||||
|
language=Language.get("nl"),
|
||||||
|
data=product_info,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
# It's a standalone movie/video
|
||||||
|
return Movies([
|
||||||
|
Movie(
|
||||||
|
id_=product_info["productId"],
|
||||||
|
service=self.__class__,
|
||||||
|
name=product_info["title"],
|
||||||
|
description=(product_info.get("synopsis", {}) or {}).get("long", ""),
|
||||||
|
year=(int(product_info["firstBroadcastDate"]) // 31536000 + 1970) if product_info.get("firstBroadcastDate") else None,
|
||||||
|
language=Language.get("nl"),
|
||||||
|
data=product_info,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
except Exception as e:
|
||||||
|
self.log.debug(f"Direct __NEXT_DATA__ fetch for afspelen URL failed: {e}")
|
||||||
|
|
||||||
|
# Prioritize broadcast search for /afspelen/ URLs
|
||||||
|
if self.kind != 'serie' and not self.season_slug and not self.episode_slug:
|
||||||
|
search_url_broadcasts = f"https://npo.nl/start/api/domain/search-collection-items?searchType=broadcasts&searchQuery={self.slug}&subscriptionType=anonymous"
|
||||||
|
broadcast_data = self.session.get(search_url_broadcasts).json()
|
||||||
|
|
||||||
|
if broadcast_data.get("items"):
|
||||||
|
item_data = broadcast_data["items"][0]
|
||||||
|
|
||||||
|
# If the item has a 'series' key, it's an episode of a series
|
||||||
|
if item_data.get("series"):
|
||||||
|
season_number = item_data.get("season", {}).get("seasonKey")
|
||||||
|
if season_number is None and item_data.get("season", {}).get("slug"):
|
||||||
|
# Fallback: Extract season number from slug like "seizoen-5"
|
||||||
|
season_match = re.search(r"seizoen-(\d+)", item_data["season"]["slug"])
|
||||||
|
if season_match:
|
||||||
|
season_number = int(season_match.group(1))
|
||||||
|
|
||||||
|
return Series([
|
||||||
|
Episode(
|
||||||
|
id_=item_data["productId"],
|
||||||
|
service=self.__class__,
|
||||||
|
title=item_data["series"]["title"], # Use series title as main title
|
||||||
|
season=season_number,
|
||||||
|
number=item_data.get("programKey"),
|
||||||
|
name=item_data["title"], # Use episode title as episode name
|
||||||
|
description=(item_data.get("synopsis", {}) or {}).get("long", ""),
|
||||||
|
language=Language.get("nl"),
|
||||||
|
data=item_data,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
# Otherwise, it's a standalone movie
|
||||||
|
return Movies([
|
||||||
|
Movie(
|
||||||
|
id_=item_data["productId"],
|
||||||
|
service=self.__class__,
|
||||||
|
name=item_data["title"],
|
||||||
|
description=(item_data.get("synopsis", {}) or {}).get("long", ""),
|
||||||
|
year=(int(item_data["firstBroadcastDate"]) // 31536000 + 1970) if item_data.get("firstBroadcastDate") else None,
|
||||||
|
language=Language.get("nl"),
|
||||||
|
data=item_data,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Fallback to series search if not an /afspelen/ single item or if season/episode slugs are present
|
||||||
|
search_url_series = f"https://npo.nl/start/api/domain/search-collection-items?searchType=series&searchQuery={self.slug}&subscriptionType=anonymous"
|
||||||
|
series_data = self.session.get(search_url_series).json()
|
||||||
|
|
||||||
|
if series_data.get("items"):
|
||||||
|
# It's a series
|
||||||
|
series_info = series_data["items"][0]
|
||||||
|
series_slug = series_info["slug"]
|
||||||
|
series_type = series_info["type"]
|
||||||
|
series_guid = series_info["guid"]
|
||||||
|
|
||||||
|
seasons_url = f"https://npo.nl/start/api/domain/series-seasons?slug={series_slug}&type={series_type}"
|
||||||
|
seasons_data = self.session.get(seasons_url).json()
|
||||||
|
|
||||||
episodes = []
|
episodes = []
|
||||||
seasons = get_data("series:seasons-") or []
|
for season in seasons_data:
|
||||||
for season in seasons:
|
if self.season_slug and str(season.get("seasonKey")) != self.season_slug and season.get('slug') != f'seizoen-{self.season_slug}':
|
||||||
eps = get_data(f"programs:season-{season['guid']}") or []
|
continue
|
||||||
for e in eps:
|
|
||||||
|
season_guid = season["guid"]
|
||||||
|
episodes_url = f"https://npo.nl/start/api/domain/programs-by-season?guid={season_guid}"
|
||||||
|
episodes_data = self.session.get(episodes_url).json()
|
||||||
|
|
||||||
|
for episode_data in episodes_data:
|
||||||
episodes.append(
|
episodes.append(
|
||||||
Episode(
|
Episode(
|
||||||
id_=e["guid"],
|
id_=episode_data["productId"],
|
||||||
service=self.__class__,
|
service=self.__class__,
|
||||||
title=series_data["title"],
|
title=series_info["title"],
|
||||||
season=int(season["seasonKey"]),
|
season=episode_data.get("season", {}).get("seasonKey"),
|
||||||
number=int(e["programKey"]),
|
number=episode_data.get("programKey"),
|
||||||
name=e["title"],
|
name=episode_data["title"],
|
||||||
description=(e.get("synopsis", {}) or {}).get("long", ""),
|
description=(episode_data.get("synopsis", {}) or {}).get("long", ""),
|
||||||
language=Language.get("nl"),
|
language=Language.get("nl"),
|
||||||
data=e,
|
data=episode_data,
|
||||||
)
|
)
|
||||||
)
|
)
|
||||||
return Series(episodes)
|
|
||||||
|
if self.episode_slug:
|
||||||
|
# Filter for the specific episode requested
|
||||||
|
filtered_episodes = [ep for ep in episodes if ep.data.get("slug") == self.episode_slug]
|
||||||
|
return Series(filtered_episodes)
|
||||||
|
else:
|
||||||
|
return Series(episodes)
|
||||||
|
|
||||||
|
# Fallback: If neither broadcast nor series search returned items,
|
||||||
|
# try to fetch __NEXT_DATA__ for the video page (assuming it's a movie/standalone video)
|
||||||
|
try:
|
||||||
|
# Ensure self.kind is set to 'video' for _fetch_next_data to construct the correct URL
|
||||||
|
original_kind = self.kind
|
||||||
|
self.kind = "video"
|
||||||
|
next_data = self._fetch_next_data(self.slug)
|
||||||
|
self.kind = original_kind # Restore original kind
|
||||||
|
|
||||||
# Movie
|
# Try to find the product info in the dehydrated state
|
||||||
item = get_data("program:detail-") or queries[0]["state"]["data"]
|
product_info = None
|
||||||
synopsis = item.get("synopsis", {})
|
queries = next_data.get("props", {}).get("pageProps", {}).get("dehydratedState", {}).get("queries", [])
|
||||||
desc = synopsis.get("long") or synopsis.get("short", "") if isinstance(synopsis, dict) else str(synopsis)
|
for item in queries:
|
||||||
year = (int(item["firstBroadcastDate"]) // 31536000 + 1970) if item.get("firstBroadcastDate") else None
|
state = item.get("state", {})
|
||||||
|
if state:
|
||||||
|
episode_data = state.get('data', {})
|
||||||
|
if isinstance(episode_data, dict):
|
||||||
|
# NPO.py uses slug to find, let's use it as well
|
||||||
|
if episode_data.get('slug') == self.slug:
|
||||||
|
product_info = episode_data
|
||||||
|
break
|
||||||
|
|
||||||
|
# Fallback if not found in dehydratedState queries (different Next.js version or structure)
|
||||||
|
if product_info is None:
|
||||||
|
page_props = next_data.get("props", {}).get("pageProps", {})
|
||||||
|
if page_props:
|
||||||
|
# Check for program data
|
||||||
|
program_data = page_props.get("program", {})
|
||||||
|
if program_data and program_data.get("slug") == self.slug:
|
||||||
|
product_info = program_data
|
||||||
|
else:
|
||||||
|
# Check for direct video data
|
||||||
|
video_data = page_props.get("video", {})
|
||||||
|
if video_data and video_data.get("slug") == self.slug:
|
||||||
|
product_info = video_data
|
||||||
|
|
||||||
return Movies([
|
|
||||||
Movie(
|
if product_info and product_info.get("productId"):
|
||||||
id_=item["guid"],
|
# If it has 'series' key, it's likely a series episode, not a standalone movie
|
||||||
service=self.__class__,
|
if not product_info.get("series"):
|
||||||
name=item["title"],
|
return Movies([
|
||||||
description=desc,
|
Movie(
|
||||||
year=year,
|
id_=product_info["productId"],
|
||||||
language=Language.get("nl"),
|
service=self.__class__,
|
||||||
data=item,
|
name=product_info.get("title", self.slug), # Use slug as fallback title
|
||||||
)
|
description=product_info.get("synopsis", {}).get("long", ""),
|
||||||
])
|
year=(int(product_info["firstBroadcastDate"]) // 31536000 + 1970) if product_info.get("firstBroadcastDate") else None,
|
||||||
|
language=Language.get("nl"), # NPO is Dutch
|
||||||
|
data=product_info,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
self.log.debug(f"Content for {self.slug} identified as a series episode via __NEXT_DATA__ fallback, not a standalone movie.")
|
||||||
|
# If it's a series episode, we don't want to treat it as a movie here.
|
||||||
|
# The series search path should handle it, or this fallback should be for strict movies.
|
||||||
|
# For now, let's return empty if it's a series episode.
|
||||||
|
return []
|
||||||
|
except Exception as e:
|
||||||
|
self.log.debug(f"Fallback to __NEXT_DATA__ for video failed: {e}")
|
||||||
|
|
||||||
|
# If neither broadcast, series, nor __NEXT_DATA__ fallback returned items, return an empty list
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> Chapters:
|
||||||
|
return []
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
product_id = title.data.get("productId")
|
product_id = title.data.get("productId")
|
||||||
@ -193,119 +424,30 @@ class NPO(Service):
|
|||||||
if not manifest_url:
|
if not manifest_url:
|
||||||
raise ValueError("No stream URL in response")
|
raise ValueError("No stream URL in response")
|
||||||
|
|
||||||
is_unencrypted = "unencrypted" in manifest_url.lower() or not any(k in stream for k in ["drmToken", "token"])
|
|
||||||
|
|
||||||
# Parse DASH
|
# Parse DASH
|
||||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
# Store the entire stream response data into track.data so it's accessible later by get_widevine_license
|
||||||
|
for tr in tracks:
|
||||||
|
tr.data["npo_stream_data"] = data # Always store stream data for all tracks
|
||||||
|
|
||||||
|
# HACK: NPO reports some Dutch audio tracks as English for older content.
|
||||||
|
# If the title language is Dutch, assume any English audio tracks are also Dutch.
|
||||||
|
if title.language == Language.get("nl"):
|
||||||
|
for track in tracks.audio:
|
||||||
|
if track.language == Language.get("en"):
|
||||||
|
self.log.debug("Correcting 'en' audio track to 'nl' for Dutch title.")
|
||||||
|
track.language = Language.get("nl")
|
||||||
|
|
||||||
# Subtitles
|
# Subtitles
|
||||||
subtitles = []
|
subtitles = []
|
||||||
for sub in (data.get("assets", {}) or {}).get("subtitles", []) or []:
|
for sub in (data.get("assets", {}) or {}).get("subtitle", []):
|
||||||
if not isinstance(sub, dict):
|
if sub["format"] == "webvtt":
|
||||||
continue
|
subtitles.append(Subtitle(url=sub["url"], language=Language.get(sub["lang"])))
|
||||||
lang = sub.get("iso", "und")
|
else:
|
||||||
location = sub.get("location")
|
self.log.warning(f"Unsupported subtitle format: {sub['format']}")
|
||||||
if not location:
|
|
||||||
continue # skip if no URL provided
|
|
||||||
subtitles.append(
|
|
||||||
Subtitle(
|
|
||||||
id_=sub.get("name", lang),
|
|
||||||
url=location.strip(),
|
|
||||||
language=Language.get(lang),
|
|
||||||
is_original_lang=lang == "nl",
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
name=sub.get("name", "Unknown"),
|
|
||||||
forced=False,
|
|
||||||
sdh=False,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
tracks.subtitles = subtitles
|
|
||||||
|
|
||||||
# DRM
|
|
||||||
if is_unencrypted:
|
|
||||||
for tr in tracks.videos + tracks.audio:
|
|
||||||
if hasattr(tr, "drm") and tr.drm:
|
|
||||||
tr.drm.clear()
|
|
||||||
else:
|
|
||||||
self.drm_token = stream.get("drmToken") or stream.get("token") or stream.get("drm_token")
|
|
||||||
if not self.drm_token:
|
|
||||||
raise ValueError(f"No DRM token found. Available keys: {list(stream.keys())}")
|
|
||||||
|
|
||||||
for tr in tracks.videos + tracks.audio:
|
|
||||||
if getattr(tr, "drm", None):
|
|
||||||
if drm_type == "playready":
|
|
||||||
tr.drm.license = lambda challenge, **kw: self.get_playready_license(
|
|
||||||
challenge=challenge, title=title, track=tr
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
tr.drm.license = lambda challenge, **kw: self.get_widevine_license(
|
|
||||||
challenge=challenge, title=title, track=tr
|
|
||||||
)
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not self.drm_token:
|
|
||||||
raise ValueError("DRM token not set, login or paid content may be required.")
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["license"],
|
|
||||||
params={"custom_data": self.drm_token},
|
|
||||||
data=challenge,
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
return r.content
|
|
||||||
|
|
||||||
def get_playready_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not self.drm_token:
|
|
||||||
raise ValueError("DRM token not set, login or paid content may be required.")
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "text/xml; charset=utf-8",
|
|
||||||
"SOAPAction": "http://schemas.microsoft.com/DRM/2007/03/protocols/AcquireLicense",
|
|
||||||
"Origin": "https://npo.nl",
|
|
||||||
"Referer": "https://npo.nl/",
|
|
||||||
"User-Agent": (
|
|
||||||
"Mozilla/5.0 (Windows NT 10.0; Win64; x64) "
|
|
||||||
"AppleWebKit/537.36 (KHTML, like Gecko) "
|
|
||||||
"Chrome/141.0.0.0 Safari/537.36 Edg/141.0.0.0"
|
|
||||||
),
|
|
||||||
}
|
|
||||||
r = self.session.post(
|
|
||||||
self.config["endpoints"]["license"],
|
|
||||||
params={"custom_data": self.drm_token},
|
|
||||||
data=challenge,
|
|
||||||
headers=headers,
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
return r.content
|
|
||||||
|
|
||||||
def search(self) -> Generator[SearchResult, None, None]:
|
|
||||||
query = getattr(self, "search_term", None) or getattr(self, "title", None)
|
|
||||||
search = self.session.get(
|
|
||||||
url=self.config["endpoints"]["search"],
|
|
||||||
params={
|
|
||||||
"searchQuery": query, # always use the correct attribute
|
|
||||||
"searchType": "series",
|
|
||||||
"subscriptionType": "premium",
|
|
||||||
"includePremiumContent": "true",
|
|
||||||
},
|
|
||||||
headers={
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"Accept": "application/json, text/plain, */*",
|
|
||||||
"Origin": "https://npo.nl",
|
|
||||||
"Referer": f"https://npo.nl/start/zoeken?zoekTerm={query}",
|
|
||||||
}
|
|
||||||
).json()
|
|
||||||
for result in search.get("items", []):
|
|
||||||
yield SearchResult(
|
|
||||||
id_=result.get("guid"),
|
|
||||||
title=result.get("title"),
|
|
||||||
label=result.get("type", "SERIES").upper() if result.get("type") else "SERIES",
|
|
||||||
url=f"https://npo.nl/start/serie/{result.get('slug')}" if result.get("type") == "timeless_series" else
|
|
||||||
f"https://npo.nl/start/video/{result.get('slug')}"
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
|
if not self.NO_SUBTITLES:
|
||||||
|
tracks.subtitles.extend(subtitles)
|
||||||
|
|
||||||
|
return tracks
|
||||||
149
PTHS/__init__.py
149
PTHS/__init__.py
@ -1,149 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
from typing import Optional
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from langcodes import Language
|
|
||||||
import click
|
|
||||||
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Movie, Movies, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Tracks
|
|
||||||
|
|
||||||
|
|
||||||
class PTHS(Service):
|
|
||||||
"""
|
|
||||||
Service code for Pathé Thuis (pathe-thuis.nl)
|
|
||||||
Version: 1.0.0
|
|
||||||
|
|
||||||
Security: SD @ L3 (Widevine)
|
|
||||||
FHD @ L1
|
|
||||||
Authorization: Cookies or authentication token
|
|
||||||
|
|
||||||
Supported:
|
|
||||||
• Movies → https://www.pathe-thuis.nl/film/{id}
|
|
||||||
|
|
||||||
Note:
|
|
||||||
Pathé Thuis does not have episodic content, only movies.
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = (
|
|
||||||
r"^(?:https?://(?:www\.)?pathe-thuis\.nl/film/)?(?P<id>\d+)(?:/[^/]+)?$"
|
|
||||||
)
|
|
||||||
GEOFENCE = ("NL",)
|
|
||||||
NO_SUBTITLES = True
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="PTHS", short_help="https://www.pathe-thuis.nl")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return PTHS(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
|
|
||||||
m = re.match(self.TITLE_RE, title)
|
|
||||||
if not m:
|
|
||||||
raise ValueError(
|
|
||||||
f"Unsupported Pathé Thuis URL or ID: {title}\n"
|
|
||||||
"Use e.g. https://www.pathe-thuis.nl/film/30591"
|
|
||||||
)
|
|
||||||
|
|
||||||
self.movie_id = m.group("id")
|
|
||||||
self.drm_token = None
|
|
||||||
|
|
||||||
if self.config is None:
|
|
||||||
raise EnvironmentError("Missing service config for Pathé Thuis.")
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
super().authenticate(cookies, credential)
|
|
||||||
|
|
||||||
if not cookies:
|
|
||||||
self.log.warning("No cookies provided, proceeding unauthenticated.")
|
|
||||||
return
|
|
||||||
|
|
||||||
token = next((c.value for c in cookies if c.name == "authenticationToken"), None)
|
|
||||||
if not token:
|
|
||||||
self.log.info("No authenticationToken cookie found, unauthenticated mode.")
|
|
||||||
return
|
|
||||||
|
|
||||||
self.session.headers.update({
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"X-Pathe-Device-Identifier": "web-widevine-1",
|
|
||||||
"X-Pathe-Auth-Session-Token": token,
|
|
||||||
})
|
|
||||||
self.log.info("Authentication token successfully attached to session.")
|
|
||||||
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
url = self.config["endpoints"]["metadata"].format(movie_id=self.movie_id)
|
|
||||||
r = self.session.get(url)
|
|
||||||
r.raise_for_status()
|
|
||||||
data = r.json()
|
|
||||||
|
|
||||||
movie = Movie(
|
|
||||||
id_=str(data["id"]),
|
|
||||||
service=self.__class__,
|
|
||||||
name=data["name"],
|
|
||||||
description=data.get("intro", ""),
|
|
||||||
year=data.get("year"),
|
|
||||||
language=Language.get(data.get("language", "en")),
|
|
||||||
data=data,
|
|
||||||
)
|
|
||||||
return Movies([movie])
|
|
||||||
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
ticket_id = self._get_ticket_id(title)
|
|
||||||
url = self.config["endpoints"]["ticket"].format(ticket_id=ticket_id)
|
|
||||||
|
|
||||||
r = self.session.get(url)
|
|
||||||
r.raise_for_status()
|
|
||||||
data = r.json()
|
|
||||||
stream = data["stream"]
|
|
||||||
|
|
||||||
manifest_url = stream.get("url") or stream.get("drmurl")
|
|
||||||
if not manifest_url:
|
|
||||||
raise ValueError("No stream manifest URL found.")
|
|
||||||
|
|
||||||
self.drm_token = stream["token"]
|
|
||||||
self.license_url = stream["rawData"]["licenseserver"]
|
|
||||||
|
|
||||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
|
|
||||||
def _get_ticket_id(self, title: Title_T) -> str:
|
|
||||||
"""Fetch the user's owned ticket ID if present."""
|
|
||||||
data = title.data
|
|
||||||
for t in (data.get("tickets") or []):
|
|
||||||
if t.get("playable") and str(t.get("movieId")) == str(self.movie_id):
|
|
||||||
return str(t["id"])
|
|
||||||
raise ValueError("No valid ticket found for this movie. Ensure purchase or login.")
|
|
||||||
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T):
|
|
||||||
return []
|
|
||||||
|
|
||||||
|
|
||||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not self.license_url or not self.drm_token:
|
|
||||||
raise ValueError("Missing license URL or token.")
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"Content-Type": "application/octet-stream",
|
|
||||||
"Authorization": f"Bearer {self.drm_token}",
|
|
||||||
}
|
|
||||||
|
|
||||||
params = {"custom_data": self.drm_token}
|
|
||||||
|
|
||||||
r = self.session.post(self.license_url, params=params, data=challenge, headers=headers)
|
|
||||||
r.raise_for_status()
|
|
||||||
|
|
||||||
if not r.content:
|
|
||||||
raise ValueError("Empty license response, likely invalid or expired token.")
|
|
||||||
return r.content
|
|
||||||
@ -1,3 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
metadata: "https://www.pathe-thuis.nl/api/movies/{movie_id}?include=editions"
|
|
||||||
ticket: "https://www.pathe-thuis.nl/api/tickets/{ticket_id}"
|
|
||||||
33
README.md
33
README.md
@ -1,40 +1,11 @@
|
|||||||
|
# This service is relatively new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
||||||
# These services is new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
|
||||||
|
|
||||||
### If you have personal questions or want to request a service, DM me at discord (jerukpurut)
|
|
||||||
|
|
||||||
|
|
||||||
- Roadmap:
|
|
||||||
|
|
||||||
1. NPO:
|
|
||||||
- To add search functionality
|
- To add search functionality
|
||||||
- More accurate metadata (the year of showing is not according the year of release)
|
- More accurate metadata (the year of showing is not according the year of release)
|
||||||
- Have a automatic CDM recognition option instead of the user puts it manually in the config for drmType
|
- Have a automatic CDM recognition option instead of the user puts it manually in the config for drmType
|
||||||
2. KOWP:
|
|
||||||
- Audio mislabel as English
|
|
||||||
- To add Playready Support
|
|
||||||
3. PTHS:
|
|
||||||
- To add Playready Support (is needed since L3 is just 480p)
|
|
||||||
- Search Functionality
|
|
||||||
- Account login if possible
|
|
||||||
4. HIDI:
|
|
||||||
- Subtitle is a bit misplace if second sentences came up making the last sentence on the first order and vice versa (needs to be fixed)
|
|
||||||
5. MUBI:
|
|
||||||
- Search Functionality
|
|
||||||
6. VIKI:
|
|
||||||
- CSRF Token is now scraped, would be from a api requests soon
|
|
||||||
7. VIDO:
|
|
||||||
- Subtitle has little quirk of having javanese and sundanese language labeled on the HLS one but not the DASH one
|
|
||||||
- Search functionality not available yet
|
|
||||||
8. KNPY:
|
|
||||||
- Need to fix the search function
|
|
||||||
9. VRT:
|
|
||||||
- Search functionality
|
|
||||||
- Fixing few hickups
|
|
||||||
10. SKST (the hardest service I ever dealt upon now):
|
|
||||||
- Subtitles is a litte bit hit or miss for movies and for series there's still no subtitles
|
|
||||||
|
|
||||||
- Acknowledgment
|
- Acknowledgment
|
||||||
|
|
||||||
Thanks to Adef for the NPO start downloader.
|
Thanks to FairTrade for turing my project into an unshackle service
|
||||||
|
|
||||||
|
|||||||
1048
SKST/__init__.py
1048
SKST/__init__.py
File diff suppressed because it is too large
Load Diff
@ -1,42 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
signin: "https://rango.id.skyshowtime.com/signin/service/international"
|
|
||||||
tokens: "https://ovp.skyshowtime.com/auth/tokens"
|
|
||||||
personas: "https://web.clients.skyshowtime.com/bff/personas/v2"
|
|
||||||
atom_node: "https://atom.skyshowtime.com/adapter-calypso/v3/query/node"
|
|
||||||
atom_search: "https://atom.skyshowtime.com/adapter-calypso/v3/query/search"
|
|
||||||
playback: "https://ovp.skyshowtime.com/video/playouts/vod"
|
|
||||||
|
|
||||||
params:
|
|
||||||
provider: "SKYSHOWTIME"
|
|
||||||
proposition: "SKYSHOWTIME"
|
|
||||||
platform: "PC"
|
|
||||||
device: "COMPUTER"
|
|
||||||
client_version: "6.11.21-gsp"
|
|
||||||
|
|
||||||
signature:
|
|
||||||
app_id: "SHOWMAX-ANDROID-v1"
|
|
||||||
key: "kC2UFjsH6PHrc5ENGfyTgC5bPA7aBVZ4aJAyqBBP"
|
|
||||||
version: "1.0"
|
|
||||||
|
|
||||||
territories:
|
|
||||||
- NL
|
|
||||||
- PL
|
|
||||||
- ES
|
|
||||||
- PT
|
|
||||||
- SE
|
|
||||||
- NO
|
|
||||||
- DK
|
|
||||||
- FI
|
|
||||||
- CZ
|
|
||||||
- SK
|
|
||||||
- HU
|
|
||||||
- RO
|
|
||||||
- BG
|
|
||||||
- HR
|
|
||||||
- SI
|
|
||||||
- BA
|
|
||||||
- RS
|
|
||||||
- ME
|
|
||||||
- MK
|
|
||||||
- AL
|
|
||||||
- XK
|
|
||||||
452
VIDO/__init__.py
452
VIDO/__init__.py
@ -1,452 +0,0 @@
|
|||||||
import re
|
|
||||||
import uuid
|
|
||||||
import xml.etree.ElementTree as ET
|
|
||||||
from urllib.parse import urljoin
|
|
||||||
from hashlib import md5
|
|
||||||
from typing import Optional, Union
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from langcodes import Language
|
|
||||||
|
|
||||||
import click
|
|
||||||
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import HLS, DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from datetime import datetime, timezone
|
|
||||||
|
|
||||||
|
|
||||||
class VIDO(Service):
|
|
||||||
"""
|
|
||||||
Vidio.com service, Series and Movies, login required.
|
|
||||||
Version: 2.3.0
|
|
||||||
|
|
||||||
Supports URLs like:
|
|
||||||
• https://www.vidio.com/premier/2978/giligilis (Series)
|
|
||||||
• https://www.vidio.com/watch/7454613-marantau-short-movie (Movie)
|
|
||||||
|
|
||||||
Security: HD@L3 (Widevine DRM when available)
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = r"^https?://(?:www\.)?vidio\.com/(?:premier|series|watch)/(?P<id>\d+)"
|
|
||||||
GEOFENCE = ("ID",)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="VIDO", short_help="https://vidio.com (login required)")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return VIDO(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
|
|
||||||
match = re.match(self.TITLE_RE, title)
|
|
||||||
if not match:
|
|
||||||
raise ValueError(f"Unsupported or invalid Vidio URL: {title}")
|
|
||||||
self.content_id = match.group("id")
|
|
||||||
|
|
||||||
self.is_movie = "watch" in title
|
|
||||||
|
|
||||||
# Static app identifiers from Android traffic
|
|
||||||
self.API_AUTH = "laZOmogezono5ogekaso5oz4Mezimew1"
|
|
||||||
self.USER_AGENT = "vidioandroid/7.14.6-e4d1de87f2 (3191683)"
|
|
||||||
self.API_APP_INFO = "android/15/7.14.6-e4d1de87f2-3191683"
|
|
||||||
self.VISITOR_ID = str(uuid.uuid4())
|
|
||||||
|
|
||||||
# Auth state
|
|
||||||
self._email = None
|
|
||||||
self._user_token = None
|
|
||||||
self._access_token = None
|
|
||||||
|
|
||||||
# DRM state
|
|
||||||
self.license_url = None
|
|
||||||
self.custom_data = None
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
if not credential or not credential.username or not credential.password:
|
|
||||||
raise ValueError("Vidio requires email and password login.")
|
|
||||||
|
|
||||||
self._email = credential.username
|
|
||||||
password = credential.password
|
|
||||||
|
|
||||||
cache_key = f"auth_tokens_{self._email}"
|
|
||||||
cache = self.cache.get(cache_key)
|
|
||||||
|
|
||||||
# Check if valid tokens are already in the cache
|
|
||||||
if cache and not cache.expired:
|
|
||||||
self.log.info("Using cached authentication tokens")
|
|
||||||
cached_data = cache.data
|
|
||||||
self._user_token = cached_data.get("user_token")
|
|
||||||
self._access_token = cached_data.get("access_token")
|
|
||||||
if self._user_token and self._access_token:
|
|
||||||
return
|
|
||||||
|
|
||||||
# If no valid cache, proceed with login
|
|
||||||
self.log.info("Authenticating with username and password")
|
|
||||||
headers = {
|
|
||||||
"referer": "android-app://com.vidio.android",
|
|
||||||
"x-api-platform": "app-android",
|
|
||||||
"x-api-auth": self.API_AUTH,
|
|
||||||
"user-agent": self.USER_AGENT,
|
|
||||||
"x-api-app-info": self.API_APP_INFO,
|
|
||||||
"accept-language": "en",
|
|
||||||
"content-type": "application/x-www-form-urlencoded",
|
|
||||||
"x-visitor-id": self.VISITOR_ID,
|
|
||||||
}
|
|
||||||
|
|
||||||
data = f"login={self._email}&password={password}"
|
|
||||||
r = self.session.post("https://api.vidio.com/api/login", headers=headers, data=data)
|
|
||||||
r.raise_for_status()
|
|
||||||
|
|
||||||
auth_data = r.json()
|
|
||||||
self._user_token = auth_data["auth"]["authentication_token"]
|
|
||||||
self._access_token = auth_data["auth_tokens"]["access_token"]
|
|
||||||
self.log.info(f"Authenticated as {self._email}")
|
|
||||||
|
|
||||||
try:
|
|
||||||
expires_at_str = auth_data["auth_tokens"]["access_token_expires_at"]
|
|
||||||
expires_at_dt = datetime.fromisoformat(expires_at_str)
|
|
||||||
now_utc = datetime.now(timezone.utc)
|
|
||||||
expiration_in_seconds = max(0, int((expires_at_dt - now_utc).total_seconds()))
|
|
||||||
self.log.info(f"Token expires in {expiration_in_seconds / 60:.2f} minutes.")
|
|
||||||
except (KeyError, ValueError) as e:
|
|
||||||
self.log.warning(f"Could not parse token expiration: {e}. Defaulting to 1 hour.")
|
|
||||||
expiration_in_seconds = 3600
|
|
||||||
|
|
||||||
cache.set({
|
|
||||||
"user_token": self._user_token,
|
|
||||||
"access_token": self._access_token
|
|
||||||
}, expiration=expiration_in_seconds)
|
|
||||||
|
|
||||||
def _headers(self):
|
|
||||||
if not self._user_token or not self._access_token:
|
|
||||||
raise RuntimeError("Not authenticated. Call authenticate() first.")
|
|
||||||
return {
|
|
||||||
"referer": "android-app://com.vidio.android",
|
|
||||||
"x-api-platform": "app-android",
|
|
||||||
"x-api-auth": self.API_AUTH,
|
|
||||||
"user-agent": self.USER_AGENT,
|
|
||||||
"x-api-app-info": self.API_APP_INFO,
|
|
||||||
"x-visitor-id": self.VISITOR_ID,
|
|
||||||
"x-user-email": self._email,
|
|
||||||
"x-user-token": self._user_token,
|
|
||||||
"x-authorization": self._access_token,
|
|
||||||
"accept-language": "en",
|
|
||||||
"accept": "application/json",
|
|
||||||
"accept-charset": "UTF-8",
|
|
||||||
"content-type": "application/vnd.api+json",
|
|
||||||
}
|
|
||||||
|
|
||||||
def _extract_subtitles_from_mpd(self, mpd_url: str) -> list[Subtitle]:
|
|
||||||
"""
|
|
||||||
Manually parse the MPD to extract subtitle tracks.
|
|
||||||
Handles plain VTT format (for free content).
|
|
||||||
"""
|
|
||||||
subtitles = []
|
|
||||||
|
|
||||||
try:
|
|
||||||
r = self.session.get(mpd_url)
|
|
||||||
r.raise_for_status()
|
|
||||||
mpd_content = r.text
|
|
||||||
|
|
||||||
# Get base URL for resolving relative paths
|
|
||||||
base_url = mpd_url.rsplit('/', 1)[0] + '/'
|
|
||||||
|
|
||||||
# Remove namespace for easier parsing
|
|
||||||
mpd_content_clean = re.sub(r'\sxmlns="[^"]+"', '', mpd_content)
|
|
||||||
root = ET.fromstring(mpd_content_clean)
|
|
||||||
|
|
||||||
for adaptation_set in root.findall('.//AdaptationSet'):
|
|
||||||
content_type = adaptation_set.get('contentType', '')
|
|
||||||
|
|
||||||
if content_type != 'text':
|
|
||||||
continue
|
|
||||||
|
|
||||||
lang = adaptation_set.get('lang', 'und')
|
|
||||||
|
|
||||||
for rep in adaptation_set.findall('Representation'):
|
|
||||||
mime_type = rep.get('mimeType', '')
|
|
||||||
|
|
||||||
# Handle plain VTT (free content)
|
|
||||||
if mime_type == 'text/vtt':
|
|
||||||
segment_list = rep.find('SegmentList')
|
|
||||||
if segment_list is not None:
|
|
||||||
for segment_url in segment_list.findall('SegmentURL'):
|
|
||||||
media = segment_url.get('media')
|
|
||||||
if media:
|
|
||||||
full_url = urljoin(base_url, media)
|
|
||||||
|
|
||||||
# Determine if auto-generated
|
|
||||||
is_auto = '-auto' in lang
|
|
||||||
clean_lang = lang.replace('-auto', '')
|
|
||||||
|
|
||||||
subtitle = Subtitle(
|
|
||||||
id_=md5(full_url.encode()).hexdigest()[0:16],
|
|
||||||
url=full_url,
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
language=Language.get(clean_lang),
|
|
||||||
forced=False,
|
|
||||||
sdh=False,
|
|
||||||
)
|
|
||||||
|
|
||||||
subtitles.append(subtitle)
|
|
||||||
self.log.debug(f"Found VTT subtitle: {lang} -> {full_url}")
|
|
||||||
|
|
||||||
except Exception as e:
|
|
||||||
self.log.warning(f"Failed to extract subtitles from MPD: {e}")
|
|
||||||
|
|
||||||
return subtitles
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
headers = self._headers()
|
|
||||||
|
|
||||||
if self.is_movie:
|
|
||||||
r = self.session.get(f"https://api.vidio.com/api/videos/{self.content_id}/detail", headers=headers)
|
|
||||||
r.raise_for_status()
|
|
||||||
video_data = r.json()["video"]
|
|
||||||
year = None
|
|
||||||
if video_data.get("publish_date"):
|
|
||||||
try:
|
|
||||||
year = int(video_data["publish_date"][:4])
|
|
||||||
except (ValueError, TypeError):
|
|
||||||
pass
|
|
||||||
return Movies([
|
|
||||||
Movie(
|
|
||||||
id_=video_data["id"],
|
|
||||||
service=self.__class__,
|
|
||||||
name=video_data["title"],
|
|
||||||
description=video_data.get("description", ""),
|
|
||||||
year=year,
|
|
||||||
language=Language.get("id"),
|
|
||||||
data=video_data,
|
|
||||||
)
|
|
||||||
])
|
|
||||||
else:
|
|
||||||
r = self.session.get(f"https://api.vidio.com/content_profiles/{self.content_id}", headers=headers)
|
|
||||||
r.raise_for_status()
|
|
||||||
root = r.json()["data"]
|
|
||||||
series_title = root["attributes"]["title"]
|
|
||||||
|
|
||||||
r_playlists = self.session.get(
|
|
||||||
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists",
|
|
||||||
headers=headers
|
|
||||||
)
|
|
||||||
r_playlists.raise_for_status()
|
|
||||||
playlists_data = r_playlists.json()
|
|
||||||
|
|
||||||
# Use metadata to identify season playlists
|
|
||||||
season_playlist_ids = set()
|
|
||||||
if "meta" in playlists_data and "playlist_group" in playlists_data["meta"]:
|
|
||||||
for group in playlists_data["meta"]["playlist_group"]:
|
|
||||||
if group.get("type") == "season":
|
|
||||||
season_playlist_ids.update(group.get("playlist_ids", []))
|
|
||||||
|
|
||||||
season_playlists = []
|
|
||||||
for pl in playlists_data["data"]:
|
|
||||||
playlist_id = int(pl["id"])
|
|
||||||
name = pl["attributes"]["name"].lower()
|
|
||||||
|
|
||||||
if season_playlist_ids:
|
|
||||||
if playlist_id in season_playlist_ids:
|
|
||||||
season_playlists.append(pl)
|
|
||||||
else:
|
|
||||||
if ("season" in name or name == "episode" or name == "episodes") and \
|
|
||||||
"trailer" not in name and "extra" not in name:
|
|
||||||
season_playlists.append(pl)
|
|
||||||
|
|
||||||
if not season_playlists:
|
|
||||||
raise ValueError("No season playlists found for this series.")
|
|
||||||
|
|
||||||
def extract_season_number(pl):
|
|
||||||
name = pl["attributes"]["name"]
|
|
||||||
match = re.search(r"season\s*(\d+)", name, re.IGNORECASE)
|
|
||||||
if match:
|
|
||||||
return int(match.group(1))
|
|
||||||
elif name.lower() in ["season", "episodes", "episode"]:
|
|
||||||
return 1
|
|
||||||
else:
|
|
||||||
return 0
|
|
||||||
|
|
||||||
season_playlists.sort(key=extract_season_number)
|
|
||||||
|
|
||||||
all_episodes = []
|
|
||||||
|
|
||||||
for playlist in season_playlists:
|
|
||||||
playlist_id = playlist["id"]
|
|
||||||
season_number = extract_season_number(playlist)
|
|
||||||
|
|
||||||
if season_number == 0:
|
|
||||||
season_number = 1
|
|
||||||
|
|
||||||
self.log.debug(f"Processing playlist '{playlist['attributes']['name']}' as Season {season_number}")
|
|
||||||
|
|
||||||
page = 1
|
|
||||||
while True:
|
|
||||||
r_eps = self.session.get(
|
|
||||||
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists/{playlist_id}/videos",
|
|
||||||
params={
|
|
||||||
"page[number]": page,
|
|
||||||
"page[size]": 20,
|
|
||||||
"sort": "order",
|
|
||||||
"included": "upcoming_videos"
|
|
||||||
},
|
|
||||||
headers=headers,
|
|
||||||
)
|
|
||||||
r_eps.raise_for_status()
|
|
||||||
page_data = r_eps.json()
|
|
||||||
|
|
||||||
for raw_ep in page_data["data"]:
|
|
||||||
attrs = raw_ep["attributes"]
|
|
||||||
ep_number = len([e for e in all_episodes if e.season == season_number]) + 1
|
|
||||||
all_episodes.append(
|
|
||||||
Episode(
|
|
||||||
id_=int(raw_ep["id"]),
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_title,
|
|
||||||
season=season_number,
|
|
||||||
number=ep_number,
|
|
||||||
name=attrs["title"],
|
|
||||||
description=attrs.get("description", ""),
|
|
||||||
language=Language.get("id"),
|
|
||||||
data=raw_ep,
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
if not page_data["links"].get("next"):
|
|
||||||
break
|
|
||||||
page += 1
|
|
||||||
|
|
||||||
if not all_episodes:
|
|
||||||
raise ValueError("No episodes found in any season.")
|
|
||||||
|
|
||||||
return Series(all_episodes)
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
headers = self._headers()
|
|
||||||
headers.update({
|
|
||||||
"x-device-brand": "samsung",
|
|
||||||
"x-device-model": "SM-A525F",
|
|
||||||
"x-device-form-factor": "phone",
|
|
||||||
"x-device-soc": "Qualcomm SM7125",
|
|
||||||
"x-device-os": "Android 15 (API 35)",
|
|
||||||
"x-device-android-mpc": "0",
|
|
||||||
"x-device-cpu-arch": "arm64-v8a",
|
|
||||||
"x-device-platform": "android",
|
|
||||||
"x-app-version": "7.14.6-e4d1de87f2-3191683",
|
|
||||||
})
|
|
||||||
|
|
||||||
video_id = str(title.id)
|
|
||||||
url = f"https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
|
||||||
|
|
||||||
r = self.session.get(url, headers=headers)
|
|
||||||
r.raise_for_status()
|
|
||||||
stream = r.json()
|
|
||||||
|
|
||||||
if not isinstance(stream, dict):
|
|
||||||
raise ValueError("Vidio returned invalid stream data.")
|
|
||||||
|
|
||||||
# Extract DRM info
|
|
||||||
custom_data = stream.get("custom_data") or {}
|
|
||||||
license_servers = stream.get("license_servers") or {}
|
|
||||||
widevine_data = custom_data.get("widevine") if isinstance(custom_data, dict) else None
|
|
||||||
license_url = license_servers.get("drm_license_url") if isinstance(license_servers, dict) else None
|
|
||||||
|
|
||||||
# Get stream URLs, check all possible HLS and DASH fields
|
|
||||||
# HLS URLs (prefer in this order)
|
|
||||||
hls_url = (
|
|
||||||
stream.get("stream_hls_url") or
|
|
||||||
stream.get("stream_token_hls_url") or
|
|
||||||
stream.get("stream_token_url") # This is also HLS (m3u8)
|
|
||||||
)
|
|
||||||
|
|
||||||
# DASH URLs
|
|
||||||
dash_url = stream.get("stream_dash_url") or stream.get("stream_token_dash_url")
|
|
||||||
|
|
||||||
has_drm = widevine_data and license_url and dash_url and isinstance(widevine_data, str)
|
|
||||||
|
|
||||||
if has_drm:
|
|
||||||
# DRM content: must use DASH
|
|
||||||
self.log.info("Widevine DRM detected, using DASH")
|
|
||||||
self.custom_data = widevine_data
|
|
||||||
self.license_url = license_url
|
|
||||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
elif hls_url:
|
|
||||||
# Non-DRM: prefer HLS (H.264, proper frame_rate metadata)
|
|
||||||
self.log.info("No DRM detected, using HLS")
|
|
||||||
self.custom_data = None
|
|
||||||
self.license_url = None
|
|
||||||
tracks = HLS.from_url(hls_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
# Clear HLS subtitles (they're segmented and incompatible)
|
|
||||||
if tracks.subtitles:
|
|
||||||
self.log.debug("Clearing HLS subtitles (incompatible format)")
|
|
||||||
tracks.subtitles.clear()
|
|
||||||
|
|
||||||
# Get subtitles from DASH manifest (plain VTT) if available
|
|
||||||
if dash_url:
|
|
||||||
self.log.debug("Extracting subtitles from DASH manifest")
|
|
||||||
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
|
||||||
if manual_subs:
|
|
||||||
for sub in manual_subs:
|
|
||||||
tracks.add(sub)
|
|
||||||
self.log.info(f"Added {len(manual_subs)} subtitle tracks from DASH")
|
|
||||||
|
|
||||||
elif dash_url:
|
|
||||||
# Fallback to DASH only if no HLS available
|
|
||||||
self.log.warning("No HLS available, using DASH (VP9 codec - may have issues)")
|
|
||||||
self.custom_data = None
|
|
||||||
self.license_url = None
|
|
||||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
# Try manual subtitle extraction for non-DRM DASH
|
|
||||||
if not tracks.subtitles:
|
|
||||||
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
|
||||||
if manual_subs:
|
|
||||||
for sub in manual_subs:
|
|
||||||
tracks.add(sub)
|
|
||||||
else:
|
|
||||||
raise ValueError("No playable stream (DASH or HLS) available.")
|
|
||||||
|
|
||||||
self.log.info(f"Found {len(tracks.videos)} video tracks, {len(tracks.audio)} audio tracks, {len(tracks.subtitles)} subtitle tracks")
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def search(self):
|
|
||||||
raise NotImplementedError("Search not implemented for Vidio.")
|
|
||||||
|
|
||||||
def get_widevine_service_certificate(self, **_) -> Union[bytes, str, None]:
|
|
||||||
return None
|
|
||||||
|
|
||||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not self.license_url or not self.custom_data:
|
|
||||||
raise ValueError("DRM license info missing.")
|
|
||||||
|
|
||||||
headers = {
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"Referer": "https://www.vidio.com/",
|
|
||||||
"Origin": "https://www.vidio.com",
|
|
||||||
"pallycon-customdata-v2": self.custom_data,
|
|
||||||
"Content-Type": "application/octet-stream",
|
|
||||||
}
|
|
||||||
|
|
||||||
self.log.debug(f"Requesting Widevine license from: {self.license_url}")
|
|
||||||
response = self.session.post(
|
|
||||||
self.license_url,
|
|
||||||
data=challenge,
|
|
||||||
headers=headers
|
|
||||||
)
|
|
||||||
|
|
||||||
if not response.ok:
|
|
||||||
error_summary = response.text[:200] if response.text else "No response body"
|
|
||||||
raise Exception(f"License request failed ({response.status_code}): {error_summary}")
|
|
||||||
|
|
||||||
return response.content
|
|
||||||
|
|
||||||
@ -1,5 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
content_profile: "https://api.vidio.com/content_profiles/{content_id}"
|
|
||||||
playlists: "https://api.vidio.com/content_profiles/{content_id}/playlists"
|
|
||||||
playlist_videos: "https://api.vidio.com/content_profiles/{content_id}/playlists/{playlist_id}/videos"
|
|
||||||
stream: "https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
|
||||||
328
VIKI/__init__.py
328
VIKI/__init__.py
@ -1,328 +0,0 @@
|
|||||||
import base64
|
|
||||||
import json
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from typing import Optional, Generator
|
|
||||||
|
|
||||||
import click
|
|
||||||
from unshackle.core.search_result import SearchResult
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Movie, Movies, Series, Episode, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
|
||||||
from unshackle.core.drm import Widevine
|
|
||||||
from langcodes import Language
|
|
||||||
|
|
||||||
|
|
||||||
class VIKI(Service):
|
|
||||||
"""
|
|
||||||
Service code for Rakuten Viki (viki.com)
|
|
||||||
Version: 1.4.0
|
|
||||||
|
|
||||||
Authorization: Required cookies (_viki_session, device_id).
|
|
||||||
Security: FHD @ L3 (Widevine)
|
|
||||||
|
|
||||||
Supports:
|
|
||||||
• Movies and TV Series
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = r"^(?:https?://(?:www\.)?viki\.com)?/(?:movies|tv)/(?P<id>\d+c)-.+$"
|
|
||||||
GEOFENCE = ()
|
|
||||||
NO_SUBTITLES = False
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="VIKI", short_help="https://viki.com")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return VIKI(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
|
|
||||||
m = re.match(self.TITLE_RE, title)
|
|
||||||
if not m:
|
|
||||||
self.search_term = title
|
|
||||||
self.title_url = None
|
|
||||||
return
|
|
||||||
|
|
||||||
self.container_id = m.group("id")
|
|
||||||
self.title_url = title
|
|
||||||
self.video_id: Optional[str] = None
|
|
||||||
self.api_access_key: Optional[str] = None
|
|
||||||
self.drm_license_url: Optional[str] = None
|
|
||||||
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
if self.config is None:
|
|
||||||
raise EnvironmentError("Missing service config for VIKI.")
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
super().authenticate(cookies, credential)
|
|
||||||
|
|
||||||
if not cookies:
|
|
||||||
raise PermissionError("VIKI requires a cookie file for authentication.")
|
|
||||||
|
|
||||||
session_cookie = next((c for c in cookies if c.name == "_viki_session"), None)
|
|
||||||
device_cookie = next((c for c in cookies if c.name == "device_id"), None)
|
|
||||||
|
|
||||||
if not session_cookie or not device_cookie:
|
|
||||||
raise PermissionError("Your cookie file is missing '_viki_session' or 'device_id'.")
|
|
||||||
|
|
||||||
self.session.headers.update({
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"X-Viki-App-Ver": "14.64.0",
|
|
||||||
"X-Viki-Device-ID": device_cookie.value,
|
|
||||||
"Origin": "https://www.viki.com",
|
|
||||||
"Referer": "https://www.viki.com/",
|
|
||||||
})
|
|
||||||
self.log.info("VIKI authentication cookies loaded successfully.")
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
if not self.title_url:
|
|
||||||
raise ValueError("No URL provided to process.")
|
|
||||||
|
|
||||||
self.log.debug(f"Scraping page for API access key: {self.title_url}")
|
|
||||||
r_page = self.session.get(self.title_url)
|
|
||||||
r_page.raise_for_status()
|
|
||||||
|
|
||||||
match = re.search(r'"token":"([^"]+)"', r_page.text)
|
|
||||||
if not match:
|
|
||||||
raise RuntimeError("Failed to extract API access key from page source.")
|
|
||||||
|
|
||||||
self.api_access_key = match.group(1)
|
|
||||||
self.log.debug(f"Extracted API access key: {self.api_access_key[:10]}...")
|
|
||||||
|
|
||||||
url = self.config["endpoints"]["container"].format(container_id=self.container_id)
|
|
||||||
params = {
|
|
||||||
"app": self.config["params"]["app"],
|
|
||||||
"token": self.api_access_key,
|
|
||||||
}
|
|
||||||
r = self.session.get(url, params=params)
|
|
||||||
r.raise_for_status()
|
|
||||||
data = r.json()
|
|
||||||
|
|
||||||
content_type = data.get("type")
|
|
||||||
if content_type == "film":
|
|
||||||
return self._parse_movie(data)
|
|
||||||
elif content_type == "series":
|
|
||||||
return self._parse_series(data)
|
|
||||||
else:
|
|
||||||
self.log.error(f"Unknown content type '{content_type}' found.")
|
|
||||||
return Movies([])
|
|
||||||
|
|
||||||
def _parse_movie(self, data: dict) -> Movies:
|
|
||||||
name = data.get("titles", {}).get("en", "Unknown Title")
|
|
||||||
year = int(data["created_at"][:4]) if "created_at" in data else None
|
|
||||||
description = data.get("descriptions", {}).get("en", "")
|
|
||||||
original_lang_code = data.get("origin", {}).get("language", "en")
|
|
||||||
self.video_id = data.get("watch_now", {}).get("id")
|
|
||||||
|
|
||||||
if not self.video_id:
|
|
||||||
raise ValueError(f"Could not find a playable video ID for container {self.container_id}.")
|
|
||||||
|
|
||||||
return Movies([
|
|
||||||
Movie(
|
|
||||||
id_=self.container_id,
|
|
||||||
service=self.__class__,
|
|
||||||
name=name,
|
|
||||||
year=year,
|
|
||||||
description=description,
|
|
||||||
language=Language.get(original_lang_code),
|
|
||||||
data=data,
|
|
||||||
)
|
|
||||||
])
|
|
||||||
|
|
||||||
def _parse_series(self, data: dict) -> Series:
|
|
||||||
"""Parse series metadata and fetch episodes."""
|
|
||||||
series_name = data.get("titles", {}).get("en", "Unknown Title")
|
|
||||||
year = int(data["created_at"][:4]) if "created_at" in data else None
|
|
||||||
description = data.get("descriptions", {}).get("en", "")
|
|
||||||
original_lang_code = data.get("origin", {}).get("language", "en")
|
|
||||||
|
|
||||||
self.log.info(f"Parsing series: {series_name}")
|
|
||||||
|
|
||||||
# Fetch episode list IDs
|
|
||||||
episodes_url = self.config["endpoints"]["episodes"].format(container_id=self.container_id)
|
|
||||||
params = {
|
|
||||||
"app": self.config["params"]["app"],
|
|
||||||
"token": self.api_access_key,
|
|
||||||
"direction": "asc",
|
|
||||||
"with_upcoming": "true",
|
|
||||||
"sort": "number",
|
|
||||||
"blocked": "true",
|
|
||||||
"only_ids": "true"
|
|
||||||
}
|
|
||||||
|
|
||||||
r = self.session.get(episodes_url, params=params)
|
|
||||||
r.raise_for_status()
|
|
||||||
episodes_data = r.json()
|
|
||||||
|
|
||||||
episode_ids = episodes_data.get("response", [])
|
|
||||||
self.log.info(f"Found {len(episode_ids)} episodes")
|
|
||||||
|
|
||||||
episodes = []
|
|
||||||
for idx, ep_id in enumerate(episode_ids, 1):
|
|
||||||
# Fetch individual episode metadata
|
|
||||||
ep_url = self.config["endpoints"]["episode_meta"].format(video_id=ep_id)
|
|
||||||
ep_params = {
|
|
||||||
"app": self.config["params"]["app"],
|
|
||||||
"token": self.api_access_key,
|
|
||||||
}
|
|
||||||
|
|
||||||
try:
|
|
||||||
r_ep = self.session.get(ep_url, params=ep_params)
|
|
||||||
r_ep.raise_for_status()
|
|
||||||
ep_data = r_ep.json()
|
|
||||||
|
|
||||||
ep_number = ep_data.get("number", idx)
|
|
||||||
ep_title = ep_data.get("titles", {}).get("en", "")
|
|
||||||
ep_description = ep_data.get("descriptions", {}).get("en", "")
|
|
||||||
|
|
||||||
# If no episode title, use generic name
|
|
||||||
if not ep_title:
|
|
||||||
ep_title = f"Episode {ep_number}"
|
|
||||||
|
|
||||||
# Store the video_id in the data dict
|
|
||||||
ep_data["video_id"] = ep_id
|
|
||||||
|
|
||||||
self.log.debug(f"Episode {ep_number}: {ep_title} ({ep_id})")
|
|
||||||
|
|
||||||
episodes.append(
|
|
||||||
Episode(
|
|
||||||
id_=ep_id,
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_name, # Series title
|
|
||||||
season=1, # VIKI typically doesn't separate seasons clearly
|
|
||||||
number=ep_number,
|
|
||||||
name=ep_title, # Episode title
|
|
||||||
description=ep_description,
|
|
||||||
language=Language.get(original_lang_code),
|
|
||||||
data=ep_data
|
|
||||||
)
|
|
||||||
)
|
|
||||||
except Exception as e:
|
|
||||||
self.log.warning(f"Failed to fetch episode {ep_id}: {e}")
|
|
||||||
# Create a basic episode entry even if metadata fetch fails
|
|
||||||
episodes.append(
|
|
||||||
Episode(
|
|
||||||
id_=ep_id,
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_name,
|
|
||||||
season=1,
|
|
||||||
number=idx,
|
|
||||||
name=f"Episode {idx}",
|
|
||||||
description="",
|
|
||||||
language=Language.get(original_lang_code),
|
|
||||||
data={"video_id": ep_id} # Store video_id in data
|
|
||||||
)
|
|
||||||
)
|
|
||||||
|
|
||||||
# Return Series with just the episodes list
|
|
||||||
return Series(episodes)
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
# For episodes, get the video_id from the data dict
|
|
||||||
if isinstance(title, Episode):
|
|
||||||
self.video_id = title.data.get("video_id")
|
|
||||||
if not self.video_id:
|
|
||||||
# Fallback to episode id if video_id not in data
|
|
||||||
self.video_id = title.data.get("id")
|
|
||||||
elif not self.video_id:
|
|
||||||
raise RuntimeError("video_id not set. Call get_titles() first.")
|
|
||||||
|
|
||||||
if not self.video_id:
|
|
||||||
raise ValueError("Could not determine video_id for this title")
|
|
||||||
|
|
||||||
self.log.info(f"Getting tracks for video ID: {self.video_id}")
|
|
||||||
|
|
||||||
url = self.config["endpoints"]["playback"].format(video_id=self.video_id)
|
|
||||||
r = self.session.get(url)
|
|
||||||
r.raise_for_status()
|
|
||||||
data = r.json()
|
|
||||||
|
|
||||||
# Get the DRM-protected manifest from queue
|
|
||||||
manifest_url = None
|
|
||||||
for item in data.get("queue", []):
|
|
||||||
if item.get("type") == "video" and item.get("format") == "mpd":
|
|
||||||
manifest_url = item.get("url")
|
|
||||||
break
|
|
||||||
|
|
||||||
if not manifest_url:
|
|
||||||
raise ValueError("No DRM-protected manifest URL found in queue")
|
|
||||||
|
|
||||||
self.log.debug(f"Found DRM-protected manifest URL: {manifest_url}")
|
|
||||||
|
|
||||||
# Create headers for manifest download
|
|
||||||
manifest_headers = {
|
|
||||||
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
|
||||||
"Accept": "*/*",
|
|
||||||
"Accept-Language": "en",
|
|
||||||
"Accept-Encoding": "gzip, deflate, br, zstd",
|
|
||||||
"X-Viki-App-Ver": "14.64.0",
|
|
||||||
"X-Viki-Device-ID": self.session.headers.get("X-Viki-Device-ID", ""),
|
|
||||||
"Origin": "https://www.viki.com",
|
|
||||||
"Referer": "https://www.viki.com/",
|
|
||||||
"Connection": "keep-alive",
|
|
||||||
"Sec-Fetch-Dest": "empty",
|
|
||||||
"Sec-Fetch-Mode": "cors",
|
|
||||||
"Sec-Fetch-Site": "cross-site",
|
|
||||||
"Pragma": "no-cache",
|
|
||||||
"Cache-Control": "no-cache",
|
|
||||||
}
|
|
||||||
|
|
||||||
# Parse tracks from the DRM-protected manifest
|
|
||||||
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
# Subtitles
|
|
||||||
title_language = title.language.language
|
|
||||||
subtitles = []
|
|
||||||
for sub in data.get("subtitles", []):
|
|
||||||
sub_url = sub.get("src")
|
|
||||||
lang_code = sub.get("srclang")
|
|
||||||
if not sub_url or not lang_code:
|
|
||||||
continue
|
|
||||||
|
|
||||||
subtitles.append(
|
|
||||||
Subtitle(
|
|
||||||
id_=lang_code,
|
|
||||||
url=sub_url,
|
|
||||||
language=Language.get(lang_code),
|
|
||||||
is_original_lang=lang_code == title_language,
|
|
||||||
codec=Subtitle.Codec.WebVTT,
|
|
||||||
name=sub.get("label", lang_code.upper()).split(" (")[0]
|
|
||||||
)
|
|
||||||
)
|
|
||||||
tracks.subtitles = subtitles
|
|
||||||
|
|
||||||
# Store DRM license URL (only dt3) at service level
|
|
||||||
drm_b64 = data.get("drm")
|
|
||||||
if drm_b64:
|
|
||||||
drm_data = json.loads(base64.b64decode(drm_b64))
|
|
||||||
self.drm_license_url = drm_data.get("dt3") # Use dt3 as requested
|
|
||||||
else:
|
|
||||||
self.log.warning("No DRM info found, assuming unencrypted stream.")
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
if not hasattr(self, 'drm_license_url') or not self.drm_license_url:
|
|
||||||
raise ValueError("DRM license URL not available.")
|
|
||||||
|
|
||||||
r = self.session.post(
|
|
||||||
self.drm_license_url,
|
|
||||||
data=challenge,
|
|
||||||
headers={"Content-type": "application/octet-stream"}
|
|
||||||
)
|
|
||||||
r.raise_for_status()
|
|
||||||
return r.content
|
|
||||||
|
|
||||||
def search(self) -> Generator[SearchResult, None, None]:
|
|
||||||
self.log.warning("Search not yet implemented for VIKI.")
|
|
||||||
return
|
|
||||||
yield
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
@ -1,8 +0,0 @@
|
|||||||
params:
|
|
||||||
app: "100000a"
|
|
||||||
endpoints:
|
|
||||||
container: "https://api.viki.io/v4/containers/{container_id}.json"
|
|
||||||
episodes: "https://api.viki.io/v4/series/{container_id}/episodes.json" # New
|
|
||||||
episode_meta: "https://api.viki.io/v4/videos/{video_id}.json" # New
|
|
||||||
playback: "https://www.viki.com/api/videos/{video_id}"
|
|
||||||
search: "https://api.viki.io/v4/search/all.json"
|
|
||||||
264
VRT/__init__.py
264
VRT/__init__.py
@ -1,264 +0,0 @@
|
|||||||
import json
|
|
||||||
import re
|
|
||||||
import time
|
|
||||||
import base64
|
|
||||||
import warnings # Added
|
|
||||||
from http.cookiejar import CookieJar
|
|
||||||
from typing import Optional, List
|
|
||||||
from langcodes import Language
|
|
||||||
|
|
||||||
import click
|
|
||||||
import jwt
|
|
||||||
from bs4 import XMLParsedAsHTMLWarning # Added
|
|
||||||
from collections.abc import Generator
|
|
||||||
from unshackle.core.search_result import SearchResult
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
|
||||||
|
|
||||||
# Ignore the BeautifulSoup XML warning caused by STPP subtitles
|
|
||||||
warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
|
|
||||||
|
|
||||||
# GraphQL Fragments and Queries
|
|
||||||
FRAGMENTS = """
|
|
||||||
fragment tileFragment on Tile {
|
|
||||||
... on ITile {
|
|
||||||
title
|
|
||||||
action { ... on LinkAction { link } }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
QUERY_PROGRAM = """
|
|
||||||
query VideoProgramPage($pageId: ID!) {
|
|
||||||
page(id: $pageId) {
|
|
||||||
... on ProgramPage {
|
|
||||||
title
|
|
||||||
components {
|
|
||||||
__typename
|
|
||||||
... on PaginatedTileList { listId title }
|
|
||||||
... on StaticTileList { listId title }
|
|
||||||
... on ContainerNavigation {
|
|
||||||
items {
|
|
||||||
title
|
|
||||||
components {
|
|
||||||
__typename
|
|
||||||
... on PaginatedTileList { listId }
|
|
||||||
... on StaticTileList { listId }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
QUERY_PAGINATED_LIST = FRAGMENTS + """
|
|
||||||
query PaginatedTileListPage($listId: ID!, $after: ID) {
|
|
||||||
list(listId: $listId) {
|
|
||||||
... on PaginatedTileList {
|
|
||||||
paginatedItems(first: 50, after: $after) {
|
|
||||||
edges { node { ...tileFragment } }
|
|
||||||
pageInfo { endCursor hasNextPage }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
... on StaticTileList {
|
|
||||||
items { ...tileFragment }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
QUERY_PLAYBACK = """
|
|
||||||
query EpisodePage($pageId: ID!) {
|
|
||||||
page(id: $pageId) {
|
|
||||||
... on PlaybackPage {
|
|
||||||
title
|
|
||||||
player { modes { streamId } }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
"""
|
|
||||||
|
|
||||||
class VRT(Service):
|
|
||||||
"""
|
|
||||||
Service code for VRT MAX (vrt.be)
|
|
||||||
Version: 2.1.1
|
|
||||||
Auth: Gigya + OIDC flow
|
|
||||||
Security: FHD @ L3 (Widevine)
|
|
||||||
Supports:
|
|
||||||
- Movies: https://www.vrt.be/vrtmax/a-z/rikkie-de-ooievaar-2/
|
|
||||||
Series: https://www.vrt.be/vrtmax/a-z/schaar-steen-papier/
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = r"^(?:https?://(?:www\.)?vrt\.be/vrtmax/a-z/)?(?P<slug>[^/]+)(?:/(?P<season_num>\d+)/(?P<episode_slug>[^/]+))?/?$"
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="VRT", short_help="https://www.vrt.be/vrtmax/")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return VRT(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title: str):
|
|
||||||
super().__init__(ctx)
|
|
||||||
self.cdm = ctx.obj.cdm
|
|
||||||
|
|
||||||
m = re.match(self.TITLE_RE, title)
|
|
||||||
if m:
|
|
||||||
self.slug = m.group("slug")
|
|
||||||
self.is_series_root = m.group("episode_slug") is None
|
|
||||||
if "vrtmax/a-z" in title:
|
|
||||||
self.page_id = "/" + title.split("vrt.be/")[1].split("?")[0]
|
|
||||||
else:
|
|
||||||
self.page_id = f"/vrtmax/a-z/{self.slug}/"
|
|
||||||
else:
|
|
||||||
self.search_term = title
|
|
||||||
|
|
||||||
self.access_token = None
|
|
||||||
self.video_token = None
|
|
||||||
|
|
||||||
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
|
||||||
cache = self.cache.get("auth_data")
|
|
||||||
if cache and not cache.expired:
|
|
||||||
self.log.info("Using cached VRT session.")
|
|
||||||
self.access_token = cache.data["access_token"]
|
|
||||||
self.video_token = cache.data["video_token"]
|
|
||||||
return
|
|
||||||
|
|
||||||
if not credential or not credential.username or not credential.password: return
|
|
||||||
|
|
||||||
self.log.info(f"Logging in to VRT as {credential.username}...")
|
|
||||||
login_params = {
|
|
||||||
"apiKey": self.config["settings"]["api_key"],
|
|
||||||
"loginID": credential.username,
|
|
||||||
"password": credential.password,
|
|
||||||
"format": "json",
|
|
||||||
"sdk": "Android_6.1.0"
|
|
||||||
}
|
|
||||||
r = self.session.post(self.config["endpoints"]["gigya_login"], data=login_params)
|
|
||||||
gigya_data = r.json()
|
|
||||||
if gigya_data.get("errorCode") != 0: raise PermissionError("Gigya login failed")
|
|
||||||
|
|
||||||
sso_params = {"UID": gigya_data["UID"], "UIDSignature": gigya_data["UIDSignature"], "signatureTimestamp": gigya_data["signatureTimestamp"]}
|
|
||||||
r = self.session.get(self.config["endpoints"]["vrt_sso"], params=sso_params)
|
|
||||||
|
|
||||||
match = re.search(r'var response = "(.*?)";', r.text)
|
|
||||||
token_data = json.loads(match.group(1).replace('\\"', '"'))
|
|
||||||
self.access_token = token_data["tokens"]["access_token"]
|
|
||||||
self.video_token = token_data["tokens"]["video_token"]
|
|
||||||
|
|
||||||
decoded = jwt.decode(self.access_token, options={"verify_signature": False})
|
|
||||||
cache.set(data={"access_token": self.access_token, "video_token": self.video_token}, expiration=int(decoded["exp"] - time.time()) - 300)
|
|
||||||
|
|
||||||
def _get_gql_headers(self):
|
|
||||||
return {
|
|
||||||
"x-vrt-client-name": self.config["settings"]["client_name"],
|
|
||||||
"x-vrt-client-version": self.config["settings"]["client_version"],
|
|
||||||
"x-vrt-zone": "default",
|
|
||||||
"authorization": f"Bearer {self.access_token}" if self.access_token else None,
|
|
||||||
"Content-Type": "application/json"
|
|
||||||
}
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
if not self.is_series_root:
|
|
||||||
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
|
||||||
data = r.json()["data"]["page"]
|
|
||||||
return Movies([Movie(id_=data["player"]["modes"][0]["streamId"], service=self.__class__, name=data["title"], language=Language.get("nl"), data={"page_id": self.page_id})])
|
|
||||||
|
|
||||||
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PROGRAM, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
|
||||||
program_data = r.json().get("data", {}).get("page")
|
|
||||||
if not program_data:
|
|
||||||
raise ValueError(f"Series page not found: {self.page_id}")
|
|
||||||
|
|
||||||
series_name = program_data["title"]
|
|
||||||
episodes = []
|
|
||||||
list_ids = []
|
|
||||||
|
|
||||||
for comp in program_data.get("components", []):
|
|
||||||
typename = comp.get("__typename")
|
|
||||||
if typename in ("PaginatedTileList", "StaticTileList") and "listId" in comp:
|
|
||||||
list_ids.append((comp.get("title") or "Episodes", comp["listId"]))
|
|
||||||
elif typename == "ContainerNavigation":
|
|
||||||
for item in comp.get("items", []):
|
|
||||||
item_title = item.get("title", "Episodes")
|
|
||||||
for sub in item.get("components", []):
|
|
||||||
if "listId" in sub:
|
|
||||||
list_ids.append((item_title, sub["listId"]))
|
|
||||||
|
|
||||||
seen_lists = set()
|
|
||||||
unique_list_ids = []
|
|
||||||
for title, lid in list_ids:
|
|
||||||
if lid not in seen_lists:
|
|
||||||
unique_list_ids.append((title, lid))
|
|
||||||
seen_lists.add(lid)
|
|
||||||
|
|
||||||
for season_title, list_id in unique_list_ids:
|
|
||||||
after = None
|
|
||||||
while True:
|
|
||||||
r_list = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PAGINATED_LIST, "variables": {"listId": list_id, "after": after}}, headers=self._get_gql_headers())
|
|
||||||
list_resp = r_list.json().get("data", {}).get("list")
|
|
||||||
if not list_resp: break
|
|
||||||
|
|
||||||
items_container = list_resp.get("paginatedItems")
|
|
||||||
nodes = [e["node"] for e in items_container["edges"]] if items_container else list_resp.get("items", [])
|
|
||||||
|
|
||||||
for node in nodes:
|
|
||||||
if not node.get("action"): continue
|
|
||||||
link = node["action"]["link"]
|
|
||||||
s_match = re.search(r'/(\d+)/.+s(\d+)a(\d+)', link)
|
|
||||||
episodes.append(Episode(
|
|
||||||
id_=link,
|
|
||||||
service=self.__class__,
|
|
||||||
title=series_name,
|
|
||||||
season=int(s_match.group(2)) if s_match else 1,
|
|
||||||
number=int(s_match.group(3)) if s_match else 0,
|
|
||||||
name=node["title"],
|
|
||||||
language=Language.get("nl"),
|
|
||||||
data={"page_id": link}
|
|
||||||
))
|
|
||||||
|
|
||||||
if items_container and items_container["pageInfo"]["hasNextPage"]:
|
|
||||||
after = items_container["pageInfo"]["endCursor"]
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
|
|
||||||
if not episodes:
|
|
||||||
raise ValueError("No episodes found for this series.")
|
|
||||||
|
|
||||||
return Series(episodes)
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
page_id = title.data["page_id"]
|
|
||||||
r_meta = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": page_id}}, headers=self._get_gql_headers())
|
|
||||||
stream_id = r_meta.json()["data"]["page"]["player"]["modes"][0]["streamId"]
|
|
||||||
|
|
||||||
p_info = base64.urlsafe_b64encode(json.dumps(self.config["player_info"]).encode()).decode().replace("=", "")
|
|
||||||
r_tok = self.session.post(self.config["endpoints"]["player_token"], json={"identityToken": self.video_token, "playerInfo": f"eyJhbGciOiJIUzI1NiJ9.{p_info}."})
|
|
||||||
vrt_player_token = r_tok.json()["vrtPlayerToken"]
|
|
||||||
|
|
||||||
r_agg = self.session.get(self.config["endpoints"]["aggregator"].format(stream_id=stream_id), params={"client": self.config["settings"]["client_id"], "vrtPlayerToken": vrt_player_token})
|
|
||||||
agg_data = r_agg.json()
|
|
||||||
|
|
||||||
dash_url = next(u["url"] for u in agg_data["targetUrls"] if u["type"] == "mpeg_dash")
|
|
||||||
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
|
||||||
self.drm_token = agg_data["drm"]
|
|
||||||
|
|
||||||
for sub in agg_data.get("subtitleUrls", []):
|
|
||||||
tracks.add(Subtitle(id_=sub.get("label", "nl"), url=sub["url"], codec=Subtitle.Codec.WebVTT, language=Language.get(sub.get("language", "nl"))))
|
|
||||||
|
|
||||||
for tr in tracks.videos + tracks.audio:
|
|
||||||
if tr.drm: tr.drm.license = lambda challenge, **kw: self.get_widevine_license(challenge, title, tr)
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
|
||||||
r = self.session.post(self.config["endpoints"]["license"], data=challenge, headers={"x-vudrm-token": self.drm_token, "Origin": "https://www.vrt.be", "Referer": "https://www.vrt.be/"})
|
|
||||||
return r.content
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
@ -1,18 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
gigya_login: "https://accounts.eu1.gigya.com/accounts.login"
|
|
||||||
vrt_sso: "https://www.vrt.be/vrtmax/sso/login"
|
|
||||||
graphql: "https://www.vrt.be/vrtnu-api/graphql/v1"
|
|
||||||
player_token: "https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v2/tokens"
|
|
||||||
aggregator: "https://media-services-public.vrt.be/media-aggregator/v2/media-items/{stream_id}"
|
|
||||||
license: "https://widevine-proxy.drm.technology/proxy"
|
|
||||||
|
|
||||||
settings:
|
|
||||||
api_key: "3_qhEcPa5JGFROVwu5SWKqJ4mVOIkwlFNMSKwzPDAh8QZOtHqu6L4nD5Q7lk0eXOOG"
|
|
||||||
client_name: "WEB"
|
|
||||||
client_id: "vrtnu-web@PROD"
|
|
||||||
client_version: "1.5.15"
|
|
||||||
|
|
||||||
player_info:
|
|
||||||
drm: { widevine: "L3" }
|
|
||||||
platform: "desktop"
|
|
||||||
app: { type: "browser", name: "Firefox", version: "146.0" }
|
|
||||||
Loading…
x
Reference in New Issue
Block a user