forked from FairTrade/unshackle-services
Compare commits
No commits in common. "main" and "main" have entirely different histories.
334
HIDI/__init__.py
Normal file
334
HIDI/__init__.py
Normal file
@ -0,0 +1,334 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from typing import Optional, Iterable
|
||||||
|
from langcodes import Language
|
||||||
|
import base64
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Episode, Series, Movie, Movies, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Chapter, Tracks, Subtitle, Audio
|
||||||
|
|
||||||
|
|
||||||
|
class HIDI(Service):
|
||||||
|
"""
|
||||||
|
Service code for HiDive (hidive.com)
|
||||||
|
Version: 1.2.0
|
||||||
|
Authorization: Email + password login, with automatic token refresh.
|
||||||
|
Security: FHD@L3
|
||||||
|
"""
|
||||||
|
|
||||||
|
TITLE_RE = r"^https?://(?:www\.)?hidive\.com/(?:season/(?P<season_id>\d+)|playlist/(?P<playlist_id>\d+))$"
|
||||||
|
GEOFENCE = ()
|
||||||
|
NO_SUBTITLES = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="HIDI", short_help="https://hidive.com")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return HIDI(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
m = re.match(self.TITLE_RE, title)
|
||||||
|
if not m:
|
||||||
|
raise ValueError("Unsupported HiDive URL. Use /season/<id> or /playlist/<id>")
|
||||||
|
|
||||||
|
self.season_id = m.group("season_id")
|
||||||
|
self.playlist_id = m.group("playlist_id")
|
||||||
|
self.kind = "serie" if self.season_id else "movie"
|
||||||
|
self.content_id = int(self.season_id or self.playlist_id)
|
||||||
|
|
||||||
|
if not self.config:
|
||||||
|
raise EnvironmentError("Missing HIDI service config.")
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
self._auth_token = None
|
||||||
|
self._refresh_token = None
|
||||||
|
self._drm_cache = {}
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
base_headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||||
|
"Accept": "application/json, text/plain, */*",
|
||||||
|
"Accept-Language": "en-US",
|
||||||
|
"Referer": "https://www.hidive.com/",
|
||||||
|
"Origin": "https://www.hidive.com",
|
||||||
|
"x-api-key": self.config["x_api_key"],
|
||||||
|
"app": "dice",
|
||||||
|
"Realm": "dce.hidive",
|
||||||
|
"x-app-var": self.config["x_app_var"],
|
||||||
|
}
|
||||||
|
self.session.headers.update(base_headers)
|
||||||
|
|
||||||
|
if not credential or not credential.username or not credential.password:
|
||||||
|
raise ValueError("HiDive requires email + password")
|
||||||
|
|
||||||
|
r_login = self.session.post(
|
||||||
|
self.config["endpoints"]["login"],
|
||||||
|
json={"id": credential.username, "secret": credential.password}
|
||||||
|
)
|
||||||
|
if r_login.status_code == 401:
|
||||||
|
raise PermissionError("Invalid email or password.")
|
||||||
|
r_login.raise_for_status()
|
||||||
|
|
||||||
|
login_data = r_login.json()
|
||||||
|
self._auth_token = login_data["authorisationToken"]
|
||||||
|
self._refresh_token = login_data["refreshToken"]
|
||||||
|
|
||||||
|
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
||||||
|
self.log.info("HiDive login successful.")
|
||||||
|
|
||||||
|
def _refresh_auth(self):
|
||||||
|
if not self._refresh_token:
|
||||||
|
raise PermissionError("No refresh token available to renew session.")
|
||||||
|
|
||||||
|
self.log.warning("Auth token expired, refreshing...")
|
||||||
|
r = self.session.post(
|
||||||
|
self.config["endpoints"]["refresh"],
|
||||||
|
json={"refreshToken": self._refresh_token}
|
||||||
|
)
|
||||||
|
if r.status_code == 401:
|
||||||
|
raise PermissionError("Refresh token is invalid. Please log in again.")
|
||||||
|
r.raise_for_status()
|
||||||
|
|
||||||
|
data = r.json()
|
||||||
|
self._auth_token = data["authorisationToken"]
|
||||||
|
self.session.headers["Authorization"] = f"Bearer {self._auth_token}"
|
||||||
|
self.log.info("Auth token refreshed successfully.")
|
||||||
|
|
||||||
|
def _api_get(self, url, **kwargs):
|
||||||
|
resp = self.session.get(url, **kwargs)
|
||||||
|
if resp.status_code == 401:
|
||||||
|
self._refresh_auth()
|
||||||
|
resp = self.session.get(url, **kwargs)
|
||||||
|
resp.raise_for_status()
|
||||||
|
return resp
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
# One endpoint for both season and playlist
|
||||||
|
resp = self._api_get(
|
||||||
|
self.config["endpoints"]["view"],
|
||||||
|
params={"type": ("playlist" if self.kind == "movie" else "season"),
|
||||||
|
"id": self.content_id,
|
||||||
|
"timezone": "Europe/Amsterdam"}
|
||||||
|
)
|
||||||
|
data = resp.json()
|
||||||
|
|
||||||
|
if self.kind == "movie":
|
||||||
|
# Find the playlist bucket, then the single VOD
|
||||||
|
vod_id = None
|
||||||
|
movie_title = None
|
||||||
|
description = ""
|
||||||
|
for elem in data.get("elements", []):
|
||||||
|
if elem.get("$type") == "hero":
|
||||||
|
hdr = (elem.get("attributes", {}).get("header", {}) or {}).get("attributes", {})
|
||||||
|
movie_title = hdr.get("text", movie_title)
|
||||||
|
for c in elem.get("attributes", {}).get("content", []):
|
||||||
|
if c.get("$type") == "textblock":
|
||||||
|
description = c.get("attributes", {}).get("text", description)
|
||||||
|
if elem.get("$type") == "bucket" and elem.get("attributes", {}).get("type") == "playlist":
|
||||||
|
items = elem.get("attributes", {}).get("items", [])
|
||||||
|
if items:
|
||||||
|
vod_id = items[0]["id"]
|
||||||
|
if not movie_title:
|
||||||
|
movie_title = items[0].get("title")
|
||||||
|
if not description:
|
||||||
|
description = items[0].get("description", "")
|
||||||
|
break
|
||||||
|
|
||||||
|
if not vod_id:
|
||||||
|
raise ValueError("No VOD found in playlist data.")
|
||||||
|
|
||||||
|
return Movies([
|
||||||
|
Movie(
|
||||||
|
id_=vod_id,
|
||||||
|
service=self.__class__,
|
||||||
|
name=movie_title or "Unknown Title",
|
||||||
|
description=description or "",
|
||||||
|
year=None,
|
||||||
|
language=Language.get("en"),
|
||||||
|
data={"playlistId": self.content_id}
|
||||||
|
)
|
||||||
|
])
|
||||||
|
|
||||||
|
# Series
|
||||||
|
episodes = []
|
||||||
|
series_title = None
|
||||||
|
for elem in data.get("elements", []):
|
||||||
|
if elem.get("$type") == "bucket" and elem["attributes"].get("type") == "season":
|
||||||
|
for item in elem["attributes"].get("items", []):
|
||||||
|
if item.get("type") != "SEASON_VOD":
|
||||||
|
continue
|
||||||
|
ep_title = item["title"]
|
||||||
|
ep_num = 1
|
||||||
|
if ep_title.startswith("E") and " - " in ep_title:
|
||||||
|
try:
|
||||||
|
ep_num = int(ep_title.split(" - ")[0][1:])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
episodes.append(Episode(
|
||||||
|
id_=item["id"],
|
||||||
|
service=self.__class__,
|
||||||
|
title=data.get("metadata", {}).get("series", {}).get("title", "") or "HiDive",
|
||||||
|
season=1,
|
||||||
|
number=ep_num,
|
||||||
|
name=item["title"],
|
||||||
|
description=item.get("description", ""),
|
||||||
|
language=Language.get("en"),
|
||||||
|
data=item,
|
||||||
|
))
|
||||||
|
break
|
||||||
|
|
||||||
|
if not episodes:
|
||||||
|
raise ValueError("No episodes found in season data.")
|
||||||
|
return Series(sorted(episodes, key=lambda x: x.number))
|
||||||
|
|
||||||
|
def _get_audio_for_langs(self, mpd_url: str, langs: Iterable[Language]) -> list[Audio]:
|
||||||
|
merged: list[Audio] = []
|
||||||
|
seen = set()
|
||||||
|
|
||||||
|
# Use first available language as fallback, or "en" as ultimate fallback
|
||||||
|
fallback_lang = langs[0] if langs else Language.get("en")
|
||||||
|
|
||||||
|
dash = DASH.from_url(mpd_url, session=self.session)
|
||||||
|
try:
|
||||||
|
# Parse with a valid fallback language
|
||||||
|
base_tracks = dash.to_tracks(language=fallback_lang)
|
||||||
|
except Exception:
|
||||||
|
# Try with English as ultimate fallback
|
||||||
|
base_tracks = dash.to_tracks(language=Language.get("en"))
|
||||||
|
|
||||||
|
all_audio = base_tracks.audio or []
|
||||||
|
|
||||||
|
for lang in langs:
|
||||||
|
# Match by language prefix (e.g. en, ja)
|
||||||
|
for audio in all_audio:
|
||||||
|
lang_code = getattr(audio.language, "language", "en")
|
||||||
|
if lang_code.startswith(lang.language[:2]):
|
||||||
|
key = (lang_code, getattr(audio, "codec", None), getattr(audio, "bitrate", None))
|
||||||
|
if key in seen:
|
||||||
|
continue
|
||||||
|
merged.append(audio)
|
||||||
|
seen.add(key)
|
||||||
|
|
||||||
|
# If nothing matched, just return all available audio tracks
|
||||||
|
if not merged and all_audio:
|
||||||
|
merged = all_audio
|
||||||
|
|
||||||
|
return merged
|
||||||
|
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
vod_resp = self._api_get(
|
||||||
|
self.config["endpoints"]["vod"].format(vod_id=title.id),
|
||||||
|
params={"includePlaybackDetails": "URL"},
|
||||||
|
)
|
||||||
|
vod = vod_resp.json()
|
||||||
|
|
||||||
|
playback_url = vod.get("playerUrlCallback")
|
||||||
|
if not playback_url:
|
||||||
|
raise ValueError("No playback URL found.")
|
||||||
|
|
||||||
|
stream_data = self._api_get(playback_url).json()
|
||||||
|
dash_list = stream_data.get("dash", [])
|
||||||
|
if not dash_list:
|
||||||
|
raise ValueError("No DASH streams available.")
|
||||||
|
|
||||||
|
entry = dash_list[0]
|
||||||
|
mpd_url = entry["url"]
|
||||||
|
|
||||||
|
# Collect available HiDive metadata languages
|
||||||
|
meta_audio_tracks = vod.get("onlinePlaybackMetadata", {}).get("audioTracks", [])
|
||||||
|
available_langs = []
|
||||||
|
for m in meta_audio_tracks:
|
||||||
|
lang_code = (m.get("languageCode") or "").split("-")[0]
|
||||||
|
if not lang_code:
|
||||||
|
continue
|
||||||
|
try:
|
||||||
|
available_langs.append(Language.get(lang_code))
|
||||||
|
except Exception:
|
||||||
|
continue
|
||||||
|
|
||||||
|
# Use first available language as fallback, or English as ultimate fallback
|
||||||
|
fallback_lang = available_langs[0] if available_langs else Language.get("en")
|
||||||
|
|
||||||
|
# Parse DASH manifest with a valid fallback language
|
||||||
|
base_tracks = DASH.from_url(mpd_url, session=self.session).to_tracks(language=fallback_lang)
|
||||||
|
|
||||||
|
audio_tracks = self._get_audio_for_langs(mpd_url, available_langs)
|
||||||
|
|
||||||
|
# Map metadata labels
|
||||||
|
meta_audio_map = {m.get("languageCode", "").split("-")[0]: m.get("label") for m in meta_audio_tracks}
|
||||||
|
for a in audio_tracks:
|
||||||
|
lang_code = getattr(a.language, "language", "en")
|
||||||
|
a.name = meta_audio_map.get(lang_code, lang_code)
|
||||||
|
a.is_original_lang = (lang_code == title.language.language)
|
||||||
|
|
||||||
|
base_tracks.audio = audio_tracks
|
||||||
|
|
||||||
|
# Subtitles
|
||||||
|
subtitles = []
|
||||||
|
for sub in entry.get("subtitles", []):
|
||||||
|
if sub.get("format", "").lower() != "vtt":
|
||||||
|
continue
|
||||||
|
lang_code = sub.get("language", "en").replace("-", "_")
|
||||||
|
try:
|
||||||
|
lang = Language.get(lang_code)
|
||||||
|
except Exception:
|
||||||
|
lang = Language.get("en")
|
||||||
|
subtitles.append(Subtitle(
|
||||||
|
id_=f"{lang_code}:vtt",
|
||||||
|
url=sub.get("url"),
|
||||||
|
language=lang,
|
||||||
|
codec=Subtitle.Codec.WebVTT,
|
||||||
|
name=lang.language_name(),
|
||||||
|
))
|
||||||
|
base_tracks.subtitles = subtitles
|
||||||
|
|
||||||
|
# DRM info
|
||||||
|
drm = entry.get("drm", {}) or {}
|
||||||
|
jwt = drm.get("jwtToken")
|
||||||
|
lic_url = (drm.get("url") or "").strip()
|
||||||
|
if jwt and lic_url:
|
||||||
|
self._drm_cache[title.id] = (jwt, lic_url)
|
||||||
|
|
||||||
|
return base_tracks
|
||||||
|
|
||||||
|
|
||||||
|
def _hidive_get_drm_info(self, title: Title_T) -> tuple[str, str]:
|
||||||
|
if title.id in self._drm_cache:
|
||||||
|
return self._drm_cache[title.id]
|
||||||
|
self.get_tracks(title)
|
||||||
|
return self._drm_cache[title.id]
|
||||||
|
|
||||||
|
def _decode_hidive_license_payload(self, payload: bytes) -> bytes:
|
||||||
|
text = payload.decode("utf-8", errors="ignore")
|
||||||
|
prefix = "data:application/octet-stream;base64,"
|
||||||
|
if text.startswith(prefix):
|
||||||
|
b64 = text.split(",", 1)[1]
|
||||||
|
return base64.b64decode(b64)
|
||||||
|
return payload
|
||||||
|
|
||||||
|
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes | str | None:
|
||||||
|
jwt_token, license_url = self._hidive_get_drm_info(title)
|
||||||
|
headers = {
|
||||||
|
"Authorization": f"Bearer {jwt_token}",
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
"Accept": "*/*",
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36",
|
||||||
|
"Origin": "https://www.hidive.com",
|
||||||
|
"Referer": "https://www.hidive.com/",
|
||||||
|
"X-DRM-INFO": "eyJzeXN0ZW0iOiJjb20ud2lkZXZpbmUuYWxwaGEifQ==",
|
||||||
|
}
|
||||||
|
r = self.session.post(license_url, data=challenge, headers=headers, timeout=30)
|
||||||
|
r.raise_for_status()
|
||||||
|
return self._decode_hidive_license_payload(r.content)
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||||
|
return []
|
||||||
10
HIDI/config.yaml
Normal file
10
HIDI/config.yaml
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
x_api_key: "857a1e5d-e35e-4fdf-805b-a87b6f8364bf"
|
||||||
|
x_app_var: "6.59.1.e16cdfd"
|
||||||
|
|
||||||
|
endpoints:
|
||||||
|
init: "https://dce-frontoffice.imggaming.com/api/v1/init/"
|
||||||
|
login: "https://dce-frontoffice.imggaming.com/api/v2/login"
|
||||||
|
vod: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}?includePlaybackDetails=URL"
|
||||||
|
adjacent: "https://dce-frontoffice.imggaming.com/api/v4/vod/{vod_id}/adjacent"
|
||||||
|
view: "https://dce-frontoffice.imggaming.com/api/v1/view" # Changed from season_view
|
||||||
|
refresh: "https://dce-frontoffice.imggaming.com/api/v2/token/refresh"
|
||||||
128
KIJK/__init__.py
128
KIJK/__init__.py
@ -1,128 +0,0 @@
|
|||||||
import re
|
|
||||||
from collections.abc import Generator
|
|
||||||
from typing import Optional, Union
|
|
||||||
import urllib.parse
|
|
||||||
import json
|
|
||||||
|
|
||||||
import click
|
|
||||||
from langcodes import Language
|
|
||||||
|
|
||||||
from unshackle.core.constants import AnyTrack
|
|
||||||
from unshackle.core.credential import Credential
|
|
||||||
from unshackle.core.manifests import DASH
|
|
||||||
from unshackle.core.search_result import SearchResult
|
|
||||||
from unshackle.core.service import Service
|
|
||||||
from unshackle.core.titles import Movie, Movies, Title_T, Titles_T
|
|
||||||
from unshackle.core.tracks import Tracks, Chapter
|
|
||||||
|
|
||||||
|
|
||||||
class KIJK(Service):
|
|
||||||
"""
|
|
||||||
Service code for kijk.nl
|
|
||||||
Version: 1.0.0
|
|
||||||
|
|
||||||
Authorization: None
|
|
||||||
|
|
||||||
Security: FHD@L3, UHD@L3
|
|
||||||
"""
|
|
||||||
|
|
||||||
TITLE_RE = r"https?://(?:www\.)?kijk\.nl/programmas/[^/]+/([^/?]+)"
|
|
||||||
GEOFENCE = ("NL",)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
@click.command(name="KIJK", short_help="https://kijk.nl")
|
|
||||||
@click.argument("title", type=str)
|
|
||||||
@click.pass_context
|
|
||||||
def cli(ctx, **kwargs):
|
|
||||||
return KIJK(ctx, **kwargs)
|
|
||||||
|
|
||||||
def __init__(self, ctx, title):
|
|
||||||
super().__init__(ctx)
|
|
||||||
self.title = title
|
|
||||||
if self.config is None:
|
|
||||||
raise Exception("Config is missing!")
|
|
||||||
|
|
||||||
self.session.headers.update({"user-agent": self.config["client"]["default"]["user_agent"]})
|
|
||||||
self.token = None
|
|
||||||
self.license_url = None
|
|
||||||
|
|
||||||
def authenticate(self, cookies=None, credential=None):
|
|
||||||
super().authenticate(cookies, credential)
|
|
||||||
|
|
||||||
self.log.info("Retrieving new token")
|
|
||||||
query = {
|
|
||||||
"query": "query DrmTokenQuery($provider: DrmProvider) {\n drmToken(drmProvider: $provider) {\n expiration\n token\n }\n }",
|
|
||||||
"variables": {
|
|
||||||
"provider": "JWP"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
res = self.session.post(self.config["endpoints"]["graphql"], json=query)
|
|
||||||
res.raise_for_status()
|
|
||||||
self.token = res.json()["data"]["drmToken"]["token"]
|
|
||||||
|
|
||||||
|
|
||||||
def search(self) -> Generator[SearchResult, None, None]:
|
|
||||||
raise NotImplementedError("Search is not supported for this service.")
|
|
||||||
|
|
||||||
def get_titles(self) -> Titles_T:
|
|
||||||
guid_match = re.match(self.TITLE_RE, self.title)
|
|
||||||
if not guid_match:
|
|
||||||
raise ValueError("Invalid KIJK URL. Could not extract GUID.")
|
|
||||||
|
|
||||||
guid = guid_match.group(1)
|
|
||||||
|
|
||||||
query_graphql = "query GetVideoQuery($guid:[String]){programs(guid:$guid){items{guid type metadata availableRegion ...Media ...Tracks ...Sources}}}fragment Media on Program{media{type availableDate availabilityState airedDateTime expirationDate}}fragment Tracks on Program{tracks{file kind label}}fragment Sources on Program{sources{type file drm}}"
|
|
||||||
variables_graphql = json.dumps({"guid": guid})
|
|
||||||
|
|
||||||
url = f"{self.config['endpoints']['graphql']}?query={urllib.parse.quote(query_graphql)}&variables={urllib.parse.quote(variables_graphql)}"
|
|
||||||
|
|
||||||
res = self.session.get(url)
|
|
||||||
res.raise_for_status()
|
|
||||||
|
|
||||||
metadata = res.json()["data"]["programs"]["items"][0]
|
|
||||||
|
|
||||||
return Movies(
|
|
||||||
[
|
|
||||||
Movie(
|
|
||||||
id_=metadata["guid"],
|
|
||||||
service=self.__class__,
|
|
||||||
name=metadata["metadata"]["media_program_name"],
|
|
||||||
description=metadata["metadata"].get("media_description", ""),
|
|
||||||
year=int(metadata["media"][0]["airedDateTime"].split('-')[0]),
|
|
||||||
language=Language.get("nl"), # Hardcoded as it's a Dutch service
|
|
||||||
data=metadata,
|
|
||||||
)
|
|
||||||
]
|
|
||||||
)
|
|
||||||
|
|
||||||
def get_tracks(self, title: Title_T) -> Tracks:
|
|
||||||
dash_link = None
|
|
||||||
for source in title.data["sources"]:
|
|
||||||
if source.get("type") == "dash" and source.get("drm") and "widevine" in source.get("drm"):
|
|
||||||
dash_link = source["file"]
|
|
||||||
self.license_url = source["drm"]["widevine"]["url"]
|
|
||||||
break
|
|
||||||
|
|
||||||
if not dash_link:
|
|
||||||
raise ValueError("Could not find a DASH manifest for this title.")
|
|
||||||
|
|
||||||
self.log.debug(f"Manifest URL: {dash_link}")
|
|
||||||
tracks = DASH.from_url(url=dash_link, session=self.session).to_tracks(language=title.language)
|
|
||||||
|
|
||||||
return tracks
|
|
||||||
|
|
||||||
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
|
||||||
return []
|
|
||||||
|
|
||||||
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> Optional[Union[bytes, str]]:
|
|
||||||
if not self.license_url:
|
|
||||||
raise ValueError("Widevine license endpoint not configured")
|
|
||||||
|
|
||||||
headers = {'x-vudrm-token': self.token} if self.token else {}
|
|
||||||
response = self.session.post(
|
|
||||||
url=self.license_url,
|
|
||||||
data=challenge,
|
|
||||||
headers=headers
|
|
||||||
)
|
|
||||||
response.raise_for_status()
|
|
||||||
return response.content
|
|
||||||
@ -1,6 +0,0 @@
|
|||||||
endpoints:
|
|
||||||
graphql: https://api.prd.video.talpa.network/graphql
|
|
||||||
|
|
||||||
client:
|
|
||||||
default:
|
|
||||||
user_agent: "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/141.0.0.0 Safari/537.36"
|
|
||||||
407
KNPY/__init__.py
Normal file
407
KNPY/__init__.py
Normal file
@ -0,0 +1,407 @@
|
|||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import re
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from typing import List, Optional
|
||||||
|
|
||||||
|
import click
|
||||||
|
import jwt
|
||||||
|
from langcodes import Language
|
||||||
|
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.search_result import SearchResult
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Subtitle, Tracks
|
||||||
|
|
||||||
|
|
||||||
|
class KNPY(Service):
|
||||||
|
"""
|
||||||
|
Service code for Kanopy (kanopy.com).
|
||||||
|
Version: 1.0.0
|
||||||
|
|
||||||
|
Auth: Credential (username + password)
|
||||||
|
Security: FHD@L3
|
||||||
|
|
||||||
|
Handles both Movies and Series (Playlists).
|
||||||
|
Detects and stops for movies that require tickets.
|
||||||
|
Caching included
|
||||||
|
"""
|
||||||
|
|
||||||
|
# Updated regex to match the new URL structure with library subdomain and path
|
||||||
|
TITLE_RE = r"^https?://(?:www\.)?kanopy\.com/.+/(?P<id>\d+)$"
|
||||||
|
GEOFENCE = ()
|
||||||
|
NO_SUBTITLES = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="KNPY", short_help="https://kanopy.com")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return KNPY(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
if not self.config:
|
||||||
|
raise ValueError("KNPY configuration not found. Ensure config.yaml exists.")
|
||||||
|
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
|
||||||
|
match = re.match(self.TITLE_RE, title)
|
||||||
|
if match:
|
||||||
|
self.content_id = match.group("id")
|
||||||
|
else:
|
||||||
|
self.content_id = None
|
||||||
|
self.search_query = title
|
||||||
|
|
||||||
|
self.API_VERSION = self.config["client"]["api_version"]
|
||||||
|
self.USER_AGENT = self.config["client"]["user_agent"]
|
||||||
|
self.WIDEVINE_UA = self.config["client"]["widevine_ua"]
|
||||||
|
|
||||||
|
self.session.headers.update({
|
||||||
|
"x-version": self.API_VERSION,
|
||||||
|
"user-agent": self.USER_AGENT
|
||||||
|
})
|
||||||
|
|
||||||
|
self._jwt = None
|
||||||
|
self._visitor_id = None
|
||||||
|
self._user_id = None
|
||||||
|
self._domain_id = None
|
||||||
|
self.widevine_license_url = None
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
if not credential or not credential.username or not credential.password:
|
||||||
|
raise ValueError("Kanopy requires email and password for authentication.")
|
||||||
|
|
||||||
|
cache = self.cache.get("auth_token")
|
||||||
|
|
||||||
|
if cache and not cache.expired:
|
||||||
|
cached_data = cache.data
|
||||||
|
valid_token = None
|
||||||
|
|
||||||
|
if isinstance(cached_data, dict) and "token" in cached_data:
|
||||||
|
if cached_data.get("username") == credential.username:
|
||||||
|
valid_token = cached_data["token"]
|
||||||
|
self.log.info("Using cached authentication token")
|
||||||
|
else:
|
||||||
|
self.log.info(f"Cached token belongs to '{cached_data.get('username')}', but logging in as '{credential.username}'. Re-authenticating.")
|
||||||
|
|
||||||
|
elif isinstance(cached_data, str):
|
||||||
|
self.log.info("Found legacy cached token format. Re-authenticating to ensure correct user.")
|
||||||
|
|
||||||
|
if valid_token:
|
||||||
|
self._jwt = valid_token
|
||||||
|
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||||
|
|
||||||
|
if not self._user_id or not self._domain_id or not self._visitor_id:
|
||||||
|
try:
|
||||||
|
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||||
|
self._user_id = decoded_jwt["data"]["uid"]
|
||||||
|
self._visitor_id = decoded_jwt["data"]["visitor_id"]
|
||||||
|
self.log.info(f"Extracted user_id and visitor_id from cached token.")
|
||||||
|
self._fetch_user_details()
|
||||||
|
return
|
||||||
|
except (KeyError, jwt.DecodeError) as e:
|
||||||
|
self.log.error(f"Could not decode cached token: {e}. Re-authenticating.")
|
||||||
|
|
||||||
|
self.log.info("Performing handshake to get visitor token...")
|
||||||
|
r = self.session.get(self.config["endpoints"]["handshake"])
|
||||||
|
r.raise_for_status()
|
||||||
|
handshake_data = r.json()
|
||||||
|
self._visitor_id = handshake_data["visitorId"]
|
||||||
|
initial_jwt = handshake_data["jwt"]
|
||||||
|
|
||||||
|
self.log.info(f"Logging in as {credential.username}...")
|
||||||
|
login_payload = {
|
||||||
|
"credentialType": "email",
|
||||||
|
"emailUser": {
|
||||||
|
"email": credential.username,
|
||||||
|
"password": credential.password
|
||||||
|
}
|
||||||
|
}
|
||||||
|
r = self.session.post(
|
||||||
|
self.config["endpoints"]["login"],
|
||||||
|
json=login_payload,
|
||||||
|
headers={"authorization": f"Bearer {initial_jwt}"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
login_data = r.json()
|
||||||
|
self._jwt = login_data["jwt"]
|
||||||
|
self._user_id = login_data["userId"]
|
||||||
|
|
||||||
|
self.session.headers.update({"authorization": f"Bearer {self._jwt}"})
|
||||||
|
self.log.info(f"Successfully authenticated as {credential.username}")
|
||||||
|
|
||||||
|
self._fetch_user_details()
|
||||||
|
|
||||||
|
try:
|
||||||
|
decoded_jwt = jwt.decode(self._jwt, options={"verify_signature": False})
|
||||||
|
exp_timestamp = decoded_jwt.get("exp")
|
||||||
|
|
||||||
|
cache_payload = {
|
||||||
|
"token": self._jwt,
|
||||||
|
"username": credential.username
|
||||||
|
}
|
||||||
|
|
||||||
|
if exp_timestamp:
|
||||||
|
expiration_in_seconds = int(exp_timestamp - datetime.now(timezone.utc).timestamp())
|
||||||
|
self.log.info(f"Caching token for {expiration_in_seconds / 60:.2f} minutes.")
|
||||||
|
cache.set(data=cache_payload, expiration=expiration_in_seconds)
|
||||||
|
else:
|
||||||
|
self.log.warning("JWT has no 'exp' claim, caching for 1 hour as a fallback.")
|
||||||
|
cache.set(data=cache_payload, expiration=3600)
|
||||||
|
except Exception as e:
|
||||||
|
self.log.error(f"Failed to decode JWT for caching: {e}. Caching for 1 hour as a fallback.")
|
||||||
|
cache.set(
|
||||||
|
data={"token": self._jwt, "username": credential.username},
|
||||||
|
expiration=3600
|
||||||
|
)
|
||||||
|
|
||||||
|
def _fetch_user_details(self):
|
||||||
|
self.log.info("Fetching user library memberships...")
|
||||||
|
r = self.session.get(self.config["endpoints"]["memberships"].format(user_id=self._user_id))
|
||||||
|
r.raise_for_status()
|
||||||
|
memberships = r.json()
|
||||||
|
|
||||||
|
for membership in memberships.get("list", []):
|
||||||
|
if membership.get("status") == "active" and membership.get("isDefault", False):
|
||||||
|
self._domain_id = str(membership["domainId"])
|
||||||
|
self.log.info(f"Using default library domain: {membership.get('sitename', 'Unknown')} (ID: {self._domain_id})")
|
||||||
|
return
|
||||||
|
|
||||||
|
if memberships.get("list"):
|
||||||
|
self._domain_id = str(memberships["list"][0]["domainId"])
|
||||||
|
self.log.warning(f"No default library found. Using first active domain: {self._domain_id}")
|
||||||
|
else:
|
||||||
|
raise ValueError("No active library memberships found for this user.")
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
if not self.content_id:
|
||||||
|
raise ValueError("A content ID is required to get titles. Use a URL or run a search first.")
|
||||||
|
if not self._domain_id:
|
||||||
|
raise ValueError("Domain ID not set. Authentication may have failed.")
|
||||||
|
|
||||||
|
r = self.session.get(self.config["endpoints"]["video_info"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||||
|
r.raise_for_status()
|
||||||
|
content_data = r.json()
|
||||||
|
|
||||||
|
content_type = content_data.get("type")
|
||||||
|
|
||||||
|
def parse_lang(data):
|
||||||
|
try:
|
||||||
|
langs = data.get("languages", [])
|
||||||
|
if langs and isinstance(langs, list) and len(langs) > 0:
|
||||||
|
return Language.find(langs[0])
|
||||||
|
except:
|
||||||
|
pass
|
||||||
|
return Language.get("en")
|
||||||
|
|
||||||
|
if content_type == "video":
|
||||||
|
video_data = content_data["video"]
|
||||||
|
movie = Movie(
|
||||||
|
id_=str(video_data["videoId"]),
|
||||||
|
service=self.__class__,
|
||||||
|
name=video_data["title"],
|
||||||
|
year=video_data.get("productionYear"),
|
||||||
|
description=video_data.get("descriptionHtml", ""),
|
||||||
|
language=parse_lang(video_data),
|
||||||
|
data=video_data,
|
||||||
|
)
|
||||||
|
return Movies([movie])
|
||||||
|
|
||||||
|
elif content_type == "playlist":
|
||||||
|
playlist_data = content_data["playlist"]
|
||||||
|
series_title = playlist_data["title"]
|
||||||
|
series_year = playlist_data.get("productionYear")
|
||||||
|
|
||||||
|
season_match = re.search(r'(?:Season|S)\s*(\d+)', series_title, re.IGNORECASE)
|
||||||
|
season_num = int(season_match.group(1)) if season_match else 1
|
||||||
|
|
||||||
|
r = self.session.get(self.config["endpoints"]["video_items"].format(video_id=self.content_id, domain_id=self._domain_id))
|
||||||
|
r.raise_for_status()
|
||||||
|
items_data = r.json()
|
||||||
|
|
||||||
|
episodes = []
|
||||||
|
for i, item in enumerate(items_data.get("list", [])):
|
||||||
|
if item.get("type") != "video":
|
||||||
|
continue
|
||||||
|
|
||||||
|
video_data = item["video"]
|
||||||
|
ep_num = i + 1
|
||||||
|
|
||||||
|
ep_title = video_data.get("title", "")
|
||||||
|
ep_match = re.search(r'Ep(?:isode)?\.?\s*(\d+)', ep_title, re.IGNORECASE)
|
||||||
|
if ep_match:
|
||||||
|
ep_num = int(ep_match.group(1))
|
||||||
|
|
||||||
|
episodes.append(
|
||||||
|
Episode(
|
||||||
|
id_=str(video_data["videoId"]),
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_title,
|
||||||
|
season=season_num,
|
||||||
|
number=ep_num,
|
||||||
|
name=video_data["title"],
|
||||||
|
description=video_data.get("descriptionHtml", ""),
|
||||||
|
year=video_data.get("productionYear", series_year),
|
||||||
|
language=parse_lang(video_data),
|
||||||
|
data=video_data,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
series = Series(episodes)
|
||||||
|
series.name = series_title
|
||||||
|
series.description = playlist_data.get("descriptionHtml", "")
|
||||||
|
series.year = series_year
|
||||||
|
return series
|
||||||
|
|
||||||
|
else:
|
||||||
|
raise ValueError(f"Unsupported content type: {content_type}")
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
play_payload = {
|
||||||
|
"videoId": int(title.id),
|
||||||
|
"domainId": int(self._domain_id),
|
||||||
|
"userId": int(self._user_id),
|
||||||
|
"visitorId": self._visitor_id
|
||||||
|
}
|
||||||
|
|
||||||
|
self.session.headers.setdefault("authorization", f"Bearer {self._jwt}")
|
||||||
|
self.session.headers.setdefault("x-version", self.API_VERSION)
|
||||||
|
self.session.headers.setdefault("user-agent", self.USER_AGENT)
|
||||||
|
|
||||||
|
r = self.session.post(self.config["endpoints"]["plays"], json=play_payload)
|
||||||
|
response_json = None
|
||||||
|
try:
|
||||||
|
response_json = r.json()
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Handle known errors gracefully
|
||||||
|
if r.status_code == 403:
|
||||||
|
if response_json and response_json.get("errorSubcode") == "playRegionRestricted":
|
||||||
|
self.log.error("Kanopy reports: This video is not available in your country.")
|
||||||
|
raise PermissionError(
|
||||||
|
"Playback blocked by region restriction. Try connecting through a supported country or verify your library’s access region."
|
||||||
|
)
|
||||||
|
else:
|
||||||
|
self.log.error(f"Access forbidden (HTTP 403). Response: {response_json}")
|
||||||
|
raise PermissionError("Kanopy denied access to this video. It may require a different library membership or authentication.")
|
||||||
|
|
||||||
|
# Raise for any other HTTP errors
|
||||||
|
r.raise_for_status()
|
||||||
|
play_data = response_json or r.json()
|
||||||
|
|
||||||
|
manifest_url = None
|
||||||
|
for manifest in play_data.get("manifests", []):
|
||||||
|
if manifest["manifestType"] == "dash":
|
||||||
|
url = manifest["url"]
|
||||||
|
manifest_url = f"https://kanopy.com{url}" if url.startswith("/") else url
|
||||||
|
drm_type = manifest.get("drmType")
|
||||||
|
if drm_type == "kanopyDrm":
|
||||||
|
play_id = play_data.get("playId")
|
||||||
|
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(license_id=f"{play_id}-0")
|
||||||
|
elif drm_type == "studioDrm":
|
||||||
|
license_id = manifest.get("drmLicenseID", f"{play_data.get('playId')}-1")
|
||||||
|
self.widevine_license_url = self.config["endpoints"]["widevine_license"].format(license_id=license_id)
|
||||||
|
else:
|
||||||
|
self.log.warning(f"Unknown drmType: {drm_type}")
|
||||||
|
self.widevine_license_url = None
|
||||||
|
break
|
||||||
|
|
||||||
|
if not manifest_url:
|
||||||
|
raise ValueError("Could not find a DASH manifest for this title.")
|
||||||
|
if not self.widevine_license_url:
|
||||||
|
raise ValueError("Could not construct Widevine license URL.")
|
||||||
|
|
||||||
|
self.log.info(f"Fetching DASH manifest from: {manifest_url}")
|
||||||
|
r = self.session.get(manifest_url)
|
||||||
|
r.raise_for_status()
|
||||||
|
|
||||||
|
# Refresh headers for manifest parsing
|
||||||
|
self.session.headers.clear()
|
||||||
|
self.session.headers.update({
|
||||||
|
"User-Agent": self.WIDEVINE_UA,
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Accept-Encoding": "gzip, deflate",
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
})
|
||||||
|
|
||||||
|
tracks = DASH.from_text(r.text, url=manifest_url).to_tracks(language=title.language)
|
||||||
|
for caption_data in play_data.get("captions", []):
|
||||||
|
lang = caption_data.get("language", "en")
|
||||||
|
for file_info in caption_data.get("files", []):
|
||||||
|
if file_info.get("type") == "webvtt":
|
||||||
|
tracks.add(Subtitle(
|
||||||
|
id_=f"caption-{lang}",
|
||||||
|
url=file_info["url"],
|
||||||
|
codec=Subtitle.Codec.WebVTT,
|
||||||
|
language=Language.get(lang)
|
||||||
|
))
|
||||||
|
break
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
|
||||||
|
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
if not self.widevine_license_url:
|
||||||
|
raise ValueError("Widevine license URL was not set. Call get_tracks first.")
|
||||||
|
|
||||||
|
license_headers = {
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
"User-Agent": self.WIDEVINE_UA,
|
||||||
|
"Authorization": f"Bearer {self._jwt}",
|
||||||
|
"X-Version": self.API_VERSION
|
||||||
|
}
|
||||||
|
|
||||||
|
r = self.session.post(
|
||||||
|
self.widevine_license_url,
|
||||||
|
data=challenge,
|
||||||
|
headers=license_headers
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
return r.content
|
||||||
|
|
||||||
|
# def search(self) -> List[SearchResult]:
|
||||||
|
# if not hasattr(self, 'search_query'):
|
||||||
|
# self.log.error("Search query not set. Cannot search.")
|
||||||
|
# return []
|
||||||
|
|
||||||
|
# self.log.info(f"Searching for '{self.search_query}'...")
|
||||||
|
# params = {
|
||||||
|
# "query": self.search_query,
|
||||||
|
# "sort": "relevance",
|
||||||
|
# "domainId": self._domain_id,
|
||||||
|
# "page": 0,
|
||||||
|
# "perPage": 20
|
||||||
|
# }
|
||||||
|
# r = self.session.get(self.config["endpoints"]["search"], params=params)
|
||||||
|
# r.raise_for_status()
|
||||||
|
# search_data = r.json()
|
||||||
|
|
||||||
|
# results = []
|
||||||
|
# for item in search_data.get("list", []):
|
||||||
|
# item_type = item.get("type")
|
||||||
|
# if item_type not in ["playlist", "video"]:
|
||||||
|
# continue
|
||||||
|
|
||||||
|
# video_id = item.get("videoId")
|
||||||
|
# title = item.get("title", "No Title")
|
||||||
|
# label = "Series" if item_type == "playlist" else "Movie"
|
||||||
|
|
||||||
|
# results.append(
|
||||||
|
# SearchResult(
|
||||||
|
# id_=str(video_id),
|
||||||
|
# title=title,
|
||||||
|
# description="",
|
||||||
|
# label=label,
|
||||||
|
# url=f"https://www.kanopy.com/watch/{video_id}"
|
||||||
|
# )
|
||||||
|
# )
|
||||||
|
# return results
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list:
|
||||||
|
return []
|
||||||
15
KNPY/config.yaml
Normal file
15
KNPY/config.yaml
Normal file
@ -0,0 +1,15 @@
|
|||||||
|
client:
|
||||||
|
api_version: "Android/com.kanopy/6.21.0/952 (SM-A525F; Android 15)"
|
||||||
|
user_agent: "okhttp/5.2.1"
|
||||||
|
widevine_ua: "KanopyApplication/6.21.0 (Linux;Android 15) AndroidXMedia3/1.8.0"
|
||||||
|
|
||||||
|
endpoints:
|
||||||
|
handshake: "https://kanopy.com/kapi/handshake"
|
||||||
|
login: "https://kanopy.com/kapi/login"
|
||||||
|
memberships: "https://kanopy.com/kapi/memberships?userId={user_id}"
|
||||||
|
video_info: "https://kanopy.com/kapi/videos/{video_id}?domainId={domain_id}"
|
||||||
|
video_items: "https://kanopy.com/kapi/videos/{video_id}/items?domainId={domain_id}"
|
||||||
|
search: "https://kanopy.com/kapi/search/videos"
|
||||||
|
plays: "https://kanopy.com/kapi/plays"
|
||||||
|
access_expires_in: "https://kanopy.com/kapi/users/{user_id}/history/videos/{video_id}/access_expires_in?domainId={domain_id}"
|
||||||
|
widevine_license: "https://kanopy.com/kapi/licenses/widevine/{license_id}"
|
||||||
297
KOWP/__init__.py
Normal file
297
KOWP/__init__.py
Normal file
@ -0,0 +1,297 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from typing import Optional, List, Dict, Any
|
||||||
|
|
||||||
|
import click
|
||||||
|
from langcodes import Language
|
||||||
|
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.search_result import SearchResult
|
||||||
|
from unshackle.core.titles import Episode, Series, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Subtitle, Tracks
|
||||||
|
from unshackle.core.utilities import is_close_match
|
||||||
|
|
||||||
|
class KOWP(Service):
|
||||||
|
"""
|
||||||
|
Service code for Kocowa Plus (kocowa.com).
|
||||||
|
Version: 1.0.0
|
||||||
|
|
||||||
|
Auth: Credential (username + password)
|
||||||
|
Security: FHD@L3
|
||||||
|
"""
|
||||||
|
|
||||||
|
TITLE_RE = r"^(?:https?://(?:www\.)?kocowa\.com/[^/]+/season/)?(?P<title_id>\d+)"
|
||||||
|
GEOFENCE = ()
|
||||||
|
NO_SUBTITLES = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="kowp", short_help="https://www.kocowa.com")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.option("--extras", is_flag=True, default=False, help="Include teasers/extras")
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return KOWP(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str, extras: bool = False):
|
||||||
|
super().__init__(ctx)
|
||||||
|
match = re.match(self.TITLE_RE, title)
|
||||||
|
if match:
|
||||||
|
self.title_id = match.group("title_id")
|
||||||
|
else:
|
||||||
|
self.title_id = title # fallback to use as search keyword
|
||||||
|
self.include_extras = extras
|
||||||
|
self.brightcove_account_id = None
|
||||||
|
self.brightcove_pk = None
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
if not credential:
|
||||||
|
raise ValueError("KOWP requires username and password")
|
||||||
|
|
||||||
|
payload = {
|
||||||
|
"username": credential.username,
|
||||||
|
"password": credential.password,
|
||||||
|
"device_id": f"{credential.username}_browser",
|
||||||
|
"device_type": "browser",
|
||||||
|
"device_model": "Firefox",
|
||||||
|
"device_version": "firefox/143.0",
|
||||||
|
"push_token": None,
|
||||||
|
"app_version": "v4.0.16",
|
||||||
|
}
|
||||||
|
r = self.session.post(
|
||||||
|
self.config["endpoints"]["login"],
|
||||||
|
json=payload,
|
||||||
|
headers={"Authorization": "anonymous", "Origin": "https://www.kocowa.com"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
res = r.json()
|
||||||
|
if res.get("code") != "0000":
|
||||||
|
raise PermissionError(f"Login failed: {res.get('message')}")
|
||||||
|
|
||||||
|
self.access_token = res["object"]["access_token"]
|
||||||
|
|
||||||
|
r = self.session.post(
|
||||||
|
self.config["endpoints"]["middleware_auth"],
|
||||||
|
json={"token": f"wA-Auth.{self.access_token}"},
|
||||||
|
headers={"Origin": "https://www.kocowa.com"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
self.middleware_token = r.json()["token"]
|
||||||
|
|
||||||
|
self._fetch_brightcove_config()
|
||||||
|
|
||||||
|
def _fetch_brightcove_config(self):
|
||||||
|
"""Fetch Brightcove account_id and policy_key from Kocowa's public config endpoint."""
|
||||||
|
try:
|
||||||
|
r = self.session.get(
|
||||||
|
"https://middleware.bcmw.kocowa.com/api/config",
|
||||||
|
headers={
|
||||||
|
"Origin": "https://www.kocowa.com",
|
||||||
|
"Referer": "https://www.kocowa.com/",
|
||||||
|
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/142.0.0.0 Safari/537.36 Edg/142.0.0.0"
|
||||||
|
}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
config = r.json()
|
||||||
|
|
||||||
|
self.brightcove_account_id = config.get("VC_ACCOUNT_ID")
|
||||||
|
self.brightcove_pk = config.get("BCOV_POLICY_KEY")
|
||||||
|
|
||||||
|
if not self.brightcove_account_id:
|
||||||
|
raise ValueError("VC_ACCOUNT_ID missing in /api/config response")
|
||||||
|
if not self.brightcove_pk:
|
||||||
|
raise ValueError("BCOV_POLICY_KEY missing in /api/config response")
|
||||||
|
|
||||||
|
self.log.info(f"Brightcove config loaded: account_id={self.brightcove_account_id}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
raise RuntimeError(f"Failed to fetch or parse Brightcove config: {e}")
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
all_episodes = []
|
||||||
|
offset = 0
|
||||||
|
limit = 20
|
||||||
|
series_title = None # Store the title from the first request
|
||||||
|
|
||||||
|
while True:
|
||||||
|
url = self.config["endpoints"]["metadata"].format(title_id=self.title_id)
|
||||||
|
sep = "&" if "?" in url else "?"
|
||||||
|
url += f"{sep}offset={offset}&limit={limit}"
|
||||||
|
|
||||||
|
r = self.session.get(
|
||||||
|
url,
|
||||||
|
headers={"Authorization": self.access_token, "Origin": "https://www.kocowa.com"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
data = r.json()["object"]
|
||||||
|
|
||||||
|
# Extract the series title only from the very first page
|
||||||
|
if series_title is None and "meta" in data:
|
||||||
|
series_title = data["meta"]["title"]["en"]
|
||||||
|
|
||||||
|
page_objects = data.get("next_episodes", {}).get("objects", [])
|
||||||
|
if not page_objects:
|
||||||
|
break
|
||||||
|
|
||||||
|
for ep in page_objects:
|
||||||
|
is_episode = ep.get("detail_type") == "episode"
|
||||||
|
is_extra = ep.get("detail_type") in ("teaser", "extra")
|
||||||
|
if is_episode or (self.include_extras and is_extra):
|
||||||
|
all_episodes.append(ep)
|
||||||
|
|
||||||
|
offset += limit
|
||||||
|
total = data.get("next_episodes", {}).get("total_count", 0)
|
||||||
|
if len(all_episodes) >= total or len(page_objects) < limit:
|
||||||
|
break
|
||||||
|
|
||||||
|
# If we never got the series title, exit with an error
|
||||||
|
if series_title is None:
|
||||||
|
raise ValueError("Could not retrieve series metadata to get the title.")
|
||||||
|
|
||||||
|
episodes = []
|
||||||
|
for ep in all_episodes:
|
||||||
|
meta = ep["meta"]
|
||||||
|
ep_type = "Episode" if ep["detail_type"] == "episode" else ep["detail_type"].capitalize()
|
||||||
|
ep_num = meta.get("episode_number", 0)
|
||||||
|
title = meta["title"].get("en") or f"{ep_type} {ep_num}"
|
||||||
|
desc = meta["description"].get("en") or ""
|
||||||
|
|
||||||
|
episodes.append(
|
||||||
|
Episode(
|
||||||
|
id_=str(ep["id"]),
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_title,
|
||||||
|
season=meta.get("season_number", 1),
|
||||||
|
number=ep_num,
|
||||||
|
name=title,
|
||||||
|
description=desc,
|
||||||
|
year=None,
|
||||||
|
language=Language.get("en"),
|
||||||
|
data=ep,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
return Series(episodes)
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
# Authorize playback
|
||||||
|
r = self.session.post(
|
||||||
|
self.config["endpoints"]["authorize"].format(episode_id=title.id),
|
||||||
|
headers={"Authorization": f"Bearer {self.middleware_token}"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
auth_data = r.json()
|
||||||
|
if not auth_data.get("Success"):
|
||||||
|
raise PermissionError("Playback authorization failed")
|
||||||
|
self.playback_token = auth_data["token"]
|
||||||
|
|
||||||
|
# Fetch Brightcove manifest
|
||||||
|
manifest_url = (
|
||||||
|
f"https://edge.api.brightcove.com/playback/v1/accounts/{self.brightcove_account_id}/videos/ref:{title.id}"
|
||||||
|
)
|
||||||
|
r = self.session.get(
|
||||||
|
manifest_url,
|
||||||
|
headers={"Accept": f"application/json;pk={self.brightcove_pk}"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
manifest = r.json()
|
||||||
|
|
||||||
|
# Get DASH URL + Widevine license
|
||||||
|
dash_url = widevine_url = None
|
||||||
|
for src in manifest.get("sources", []):
|
||||||
|
if src.get("type") == "application/dash+xml":
|
||||||
|
dash_url = src["src"]
|
||||||
|
widevine_url = (
|
||||||
|
src.get("key_systems", {})
|
||||||
|
.get("com.widevine.alpha", {})
|
||||||
|
.get("license_url")
|
||||||
|
)
|
||||||
|
if dash_url and widevine_url:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not dash_url or not widevine_url:
|
||||||
|
raise ValueError("No Widevine DASH stream found")
|
||||||
|
|
||||||
|
self.widevine_license_url = widevine_url
|
||||||
|
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
for sub in manifest.get("text_tracks", []):
|
||||||
|
srclang = sub.get("srclang")
|
||||||
|
if not srclang or srclang == "thumbnails":
|
||||||
|
continue
|
||||||
|
|
||||||
|
subtitle_track = Subtitle(
|
||||||
|
id_=sub["id"],
|
||||||
|
url=sub["src"],
|
||||||
|
codec=Subtitle.Codec.WebVTT,
|
||||||
|
language=Language.get(srclang),
|
||||||
|
sdh=True, # Kocowa subs are SDH - mark them as such
|
||||||
|
forced=False,
|
||||||
|
)
|
||||||
|
tracks.add(subtitle_track)
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
r = self.session.post(
|
||||||
|
self.widevine_license_url,
|
||||||
|
data=challenge,
|
||||||
|
headers={
|
||||||
|
"BCOV-Auth": self.playback_token,
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
"Origin": "https://www.kocowa.com",
|
||||||
|
"Referer": "https://www.kocowa.com/",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
return r.content
|
||||||
|
|
||||||
|
def search(self) -> List[SearchResult]:
|
||||||
|
url = "https://prod-fms.kocowa.com/api/v01/fe/gks/autocomplete"
|
||||||
|
params = {
|
||||||
|
"search_category": "All",
|
||||||
|
"search_input": self.title_id,
|
||||||
|
"include_webtoon": "true",
|
||||||
|
}
|
||||||
|
|
||||||
|
r = self.session.get(
|
||||||
|
url,
|
||||||
|
params=params,
|
||||||
|
headers={
|
||||||
|
"Authorization": self.access_token,
|
||||||
|
"Origin": "https://www.kocowa.com ",
|
||||||
|
"Referer": "https://www.kocowa.com/ ",
|
||||||
|
}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
response = r.json()
|
||||||
|
contents = response.get("object", {}).get("contents", [])
|
||||||
|
|
||||||
|
results = []
|
||||||
|
for item in contents:
|
||||||
|
if item.get("detail_type") != "season":
|
||||||
|
continue
|
||||||
|
|
||||||
|
meta = item["meta"]
|
||||||
|
title_en = meta["title"].get("en") or "[No Title]"
|
||||||
|
description_en = meta["description"].get("en") or ""
|
||||||
|
show_id = str(item["id"])
|
||||||
|
|
||||||
|
results.append(
|
||||||
|
SearchResult(
|
||||||
|
id_=show_id,
|
||||||
|
title=title_en,
|
||||||
|
description=description_en,
|
||||||
|
label="season",
|
||||||
|
url=f"https://www.kocowa.com/en_us/season/{show_id}/"
|
||||||
|
)
|
||||||
|
)
|
||||||
|
return results
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list:
|
||||||
|
return []
|
||||||
|
|
||||||
5
KOWP/config.yaml
Normal file
5
KOWP/config.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
endpoints:
|
||||||
|
login: "https://prod-sgwv3.kocowa.com/api/v01/user/signin"
|
||||||
|
middleware_auth: "https://middleware.bcmw.kocowa.com/authenticate-user"
|
||||||
|
metadata: "https://prod-fms.kocowa.com/api/v01/fe/content/get?id={title_id}"
|
||||||
|
authorize: "https://middleware.bcmw.kocowa.com/api/playback/authorize/{episode_id}"
|
||||||
396
MUBI/__init__.py
Normal file
396
MUBI/__init__.py
Normal file
@ -0,0 +1,396 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
import uuid
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from typing import Optional, Generator
|
||||||
|
from langcodes import Language
|
||||||
|
import base64
|
||||||
|
import click
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Episode, Movie, Movies, Title_T, Titles_T, Series
|
||||||
|
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||||
|
|
||||||
|
|
||||||
|
class MUBI(Service):
|
||||||
|
"""
|
||||||
|
Service code for MUBI (mubi.com)
|
||||||
|
Version: 1.2.0
|
||||||
|
|
||||||
|
Authorization: Required cookies (lt token + session)
|
||||||
|
Security: FHD @ L3 (Widevine)
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
• Series ↦ https://mubi.com/en/nl/series/twin-peaks
|
||||||
|
• Movies ↦ https://mubi.com/en/nl/films/the-substance
|
||||||
|
|
||||||
|
"""
|
||||||
|
SERIES_TITLE_RE = r"^https?://(?:www\.)?mubi\.com(?:/[^/]+)*?/series/(?P<series_slug>[^/]+)(?:/season/(?P<season_slug>[^/]+))?$"
|
||||||
|
TITLE_RE = r"^(?:https?://(?:www\.)?mubi\.com)(?:/[^/]+)*?/films/(?P<slug>[^/?#]+)$"
|
||||||
|
NO_SUBTITLES = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="MUBI", short_help="https://mubi.com")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return MUBI(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
|
||||||
|
m_film = re.match(self.TITLE_RE, title)
|
||||||
|
m_series = re.match(self.SERIES_TITLE_RE, title)
|
||||||
|
|
||||||
|
if not m_film and not m_series:
|
||||||
|
raise ValueError(f"Invalid MUBI URL: {title}")
|
||||||
|
|
||||||
|
self.is_series = bool(m_series)
|
||||||
|
self.slug = m_film.group("slug") if m_film else None
|
||||||
|
self.series_slug = m_series.group("series_slug") if m_series else None
|
||||||
|
self.season_slug = m_series.group("season_slug") if m_series else None
|
||||||
|
|
||||||
|
self.film_id: Optional[int] = None
|
||||||
|
self.lt_token: Optional[str] = None
|
||||||
|
self.session_token: Optional[str] = None
|
||||||
|
self.user_id: Optional[int] = None
|
||||||
|
self.country_code: Optional[str] = None
|
||||||
|
self.anonymous_user_id: Optional[str] = None
|
||||||
|
self.default_country: Optional[str] = None
|
||||||
|
self.reels_data: Optional[list] = None
|
||||||
|
|
||||||
|
# Store CDM reference
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
|
||||||
|
if self.config is None:
|
||||||
|
raise EnvironmentError("Missing service config for MUBI.")
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
super().authenticate(cookies, credential)
|
||||||
|
|
||||||
|
try:
|
||||||
|
r_ip = self.session.get(self.config["endpoints"]["ip_geolocation"], timeout=5)
|
||||||
|
r_ip.raise_for_status()
|
||||||
|
ip_data = r_ip.json()
|
||||||
|
if ip_data.get("country"):
|
||||||
|
self.default_country = ip_data["country"]
|
||||||
|
self.log.debug(f"Detected country from IP: {self.default_country}")
|
||||||
|
else:
|
||||||
|
self.log.warning("IP geolocation response did not contain a country code.")
|
||||||
|
except Exception as e:
|
||||||
|
raise ValueError(f"Failed to fetch IP geolocation: {e}")
|
||||||
|
|
||||||
|
if not cookies:
|
||||||
|
raise PermissionError("MUBI requires login cookies.")
|
||||||
|
|
||||||
|
# Extract essential tokens
|
||||||
|
lt_cookie = next((c for c in cookies if c.name == "lt"), None)
|
||||||
|
session_cookie = next((c for c in cookies if c.name == "_mubi_session"), None)
|
||||||
|
snow_id_cookie = next((c for c in cookies if c.name == "_snow_id.c006"), None)
|
||||||
|
|
||||||
|
if not lt_cookie:
|
||||||
|
raise PermissionError("Missing 'lt' cookie (Bearer token).")
|
||||||
|
if not session_cookie:
|
||||||
|
raise PermissionError("Missing '_mubi_session' cookie.")
|
||||||
|
|
||||||
|
self.lt_token = lt_cookie.value
|
||||||
|
self.session_token = session_cookie.value
|
||||||
|
|
||||||
|
# Extract anonymous_user_id from _snow_id.c006
|
||||||
|
if snow_id_cookie and "." in snow_id_cookie.value:
|
||||||
|
self.anonymous_user_id = snow_id_cookie.value.split(".")[0]
|
||||||
|
else:
|
||||||
|
self.anonymous_user_id = str(uuid.uuid4())
|
||||||
|
self.log.warning(f"No _snow_id.c006 cookie found — generated new anonymous_user_id: {self.anonymous_user_id}")
|
||||||
|
|
||||||
|
base_headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) Firefox/143.0",
|
||||||
|
"Origin": "https://mubi.com",
|
||||||
|
"Referer": "https://mubi.com/",
|
||||||
|
"CLIENT": "web",
|
||||||
|
"Client-Accept-Video-Codecs": "h265,vp9,h264",
|
||||||
|
"Client-Accept-Audio-Codecs": "aac",
|
||||||
|
"Authorization": f"Bearer {self.lt_token}",
|
||||||
|
"ANONYMOUS_USER_ID": self.anonymous_user_id,
|
||||||
|
"Client-Country": self.default_country,
|
||||||
|
"Sec-Fetch-Dest": "empty",
|
||||||
|
"Sec-Fetch-Mode": "cors",
|
||||||
|
"Sec-Fetch-Site": "same-site",
|
||||||
|
"Pragma": "no-cache",
|
||||||
|
"Cache-Control": "no-cache",
|
||||||
|
}
|
||||||
|
|
||||||
|
self.session.headers.update(base_headers)
|
||||||
|
|
||||||
|
r_account = self.session.get(self.config["endpoints"]["account"])
|
||||||
|
if not r_account.ok:
|
||||||
|
raise PermissionError(f"Failed to fetch MUBI account: {r_account.status_code} {r_account.text}")
|
||||||
|
|
||||||
|
account_data = r_account.json()
|
||||||
|
self.user_id = account_data.get("id")
|
||||||
|
self.country_code = (account_data.get("country") or {}).get("code", "NL")
|
||||||
|
|
||||||
|
self.session.headers["Client-Country"] = self.country_code
|
||||||
|
self.GEOFENCE = (self.country_code,)
|
||||||
|
|
||||||
|
self._bind_anonymous_user()
|
||||||
|
|
||||||
|
self.log.info(
|
||||||
|
f"Authenticated as user {self.user_id}, "
|
||||||
|
f"country: {self.country_code}, "
|
||||||
|
f"anonymous_id: {self.anonymous_user_id}"
|
||||||
|
)
|
||||||
|
|
||||||
|
def _bind_anonymous_user(self):
|
||||||
|
try:
|
||||||
|
r = self.session.put(
|
||||||
|
self.config["endpoints"]["current_user"],
|
||||||
|
json={"anonymous_user_uuid": self.anonymous_user_id},
|
||||||
|
headers={"Content-Type": "application/json"}
|
||||||
|
)
|
||||||
|
if r.ok:
|
||||||
|
self.log.debug("Anonymous user ID successfully bound to account.")
|
||||||
|
else:
|
||||||
|
self.log.warning(f"Failed to bind anonymous_user_uuid: {r.status_code}")
|
||||||
|
except Exception as e:
|
||||||
|
self.log.warning(f"Exception while binding anonymous_user_uuid: {e}")
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
if self.is_series:
|
||||||
|
return self._get_series_titles()
|
||||||
|
else:
|
||||||
|
return self._get_film_title()
|
||||||
|
|
||||||
|
def _get_film_title(self) -> Movies:
|
||||||
|
url = self.config["endpoints"]["film_by_slug"].format(slug=self.slug)
|
||||||
|
r = self.session.get(url)
|
||||||
|
r.raise_for_status()
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
|
self.film_id = data["id"]
|
||||||
|
|
||||||
|
# Fetch reels to get definitive language code and cache the response
|
||||||
|
url_reels = self.config["endpoints"]["reels"].format(film_id=self.film_id)
|
||||||
|
r_reels = self.session.get(url_reels)
|
||||||
|
r_reels.raise_for_status()
|
||||||
|
self.reels_data = r_reels.json()
|
||||||
|
|
||||||
|
# Extract original language from the first audio track of the first reel
|
||||||
|
original_language_code = "en" # Default fallback
|
||||||
|
if self.reels_data and self.reels_data[0].get("audio_tracks"):
|
||||||
|
first_audio_track = self.reels_data[0]["audio_tracks"][0]
|
||||||
|
if "language_code" in first_audio_track:
|
||||||
|
original_language_code = first_audio_track["language_code"]
|
||||||
|
self.log.debug(f"Detected original language from reels: '{original_language_code}'")
|
||||||
|
|
||||||
|
genres = ", ".join(data.get("genres", [])) or "Unknown"
|
||||||
|
description = (
|
||||||
|
data.get("default_editorial_html", "")
|
||||||
|
.replace("<p>", "").replace("</p>", "").replace("<em>", "").replace("</em>", "").strip()
|
||||||
|
)
|
||||||
|
year = data.get("year")
|
||||||
|
name = data.get("title", "Unknown")
|
||||||
|
|
||||||
|
movie = Movie(
|
||||||
|
id_=self.film_id,
|
||||||
|
service=self.__class__,
|
||||||
|
name=name,
|
||||||
|
year=year,
|
||||||
|
description=description,
|
||||||
|
language=Language.get(original_language_code),
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
|
||||||
|
return Movies([movie])
|
||||||
|
|
||||||
|
def _get_series_titles(self) -> Titles_T:
|
||||||
|
# Fetch series metadata
|
||||||
|
series_url = self.config["endpoints"]["series"].format(series_slug=self.series_slug)
|
||||||
|
r_series = self.session.get(series_url)
|
||||||
|
r_series.raise_for_status()
|
||||||
|
series_data = r_series.json()
|
||||||
|
|
||||||
|
episodes = []
|
||||||
|
|
||||||
|
# If season is explicitly specified, only fetch that season
|
||||||
|
if self.season_slug:
|
||||||
|
eps_url = self.config["endpoints"]["season_episodes"].format(
|
||||||
|
series_slug=self.series_slug,
|
||||||
|
season_slug=self.season_slug
|
||||||
|
)
|
||||||
|
r_eps = self.session.get(eps_url)
|
||||||
|
if r_eps.status_code == 404:
|
||||||
|
raise ValueError(f"Season '{self.season_slug}' not found.")
|
||||||
|
r_eps.raise_for_status()
|
||||||
|
episodes_data = r_eps.json().get("episodes", [])
|
||||||
|
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
||||||
|
else:
|
||||||
|
# No season specified fetch ALL seasons
|
||||||
|
seasons = series_data.get("seasons", [])
|
||||||
|
if not seasons:
|
||||||
|
raise ValueError("No seasons found for this series.")
|
||||||
|
|
||||||
|
for season in seasons:
|
||||||
|
season_slug = season["slug"]
|
||||||
|
eps_url = self.config["endpoints"]["season_episodes"].format(
|
||||||
|
series_slug=self.series_slug,
|
||||||
|
season_slug=season_slug
|
||||||
|
)
|
||||||
|
|
||||||
|
self.log.debug(f"Fetching episodes for season: {season_slug}")
|
||||||
|
|
||||||
|
r_eps = self.session.get(eps_url)
|
||||||
|
|
||||||
|
# Stop if season returns 404 or empty
|
||||||
|
if r_eps.status_code == 404:
|
||||||
|
self.log.info(f"Season '{season_slug}' not available, skipping.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
r_eps.raise_for_status()
|
||||||
|
episodes_data = r_eps.json().get("episodes", [])
|
||||||
|
|
||||||
|
if not episodes_data:
|
||||||
|
self.log.info(f"No episodes found in season '{season_slug}'.")
|
||||||
|
continue
|
||||||
|
|
||||||
|
self._add_episodes_to_list(episodes, episodes_data, series_data)
|
||||||
|
|
||||||
|
from unshackle.core.titles import Series
|
||||||
|
return Series(sorted(episodes, key=lambda x: (x.season, x.number)))
|
||||||
|
|
||||||
|
def _add_episodes_to_list(self, episodes_list: list, episodes_data: list, series_data: dict):
|
||||||
|
"""Helper to avoid code duplication when adding episodes."""
|
||||||
|
for ep in episodes_data:
|
||||||
|
# Use episode's own language detection via its consumable.playback_languages
|
||||||
|
playback_langs = ep.get("consumable", {}).get("playback_languages", {})
|
||||||
|
audio_langs = playback_langs.get("audio_options", ["English"])
|
||||||
|
lang_code = audio_langs[0].split()[0].lower() if audio_langs else "en"
|
||||||
|
|
||||||
|
try:
|
||||||
|
detected_lang = Language.get(lang_code)
|
||||||
|
except:
|
||||||
|
detected_lang = Language.get("en")
|
||||||
|
|
||||||
|
episodes_list.append(Episode(
|
||||||
|
id_=ep["id"],
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_data["title"], # Series title
|
||||||
|
season=ep["episode"]["season_number"],
|
||||||
|
number=ep["episode"]["number"],
|
||||||
|
name=ep["title"], # Episode title
|
||||||
|
description=ep.get("short_synopsis", ""),
|
||||||
|
language=detected_lang,
|
||||||
|
data=ep, # Full episode data for later use in get_tracks
|
||||||
|
))
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
film_id = getattr(title, "id", None)
|
||||||
|
if not film_id:
|
||||||
|
raise RuntimeError("Title ID not found.")
|
||||||
|
|
||||||
|
# For series episodes, we don't have reels cached, so skip reel-based logic
|
||||||
|
url_view = self.config["endpoints"]["initiate_viewing"].format(film_id=film_id)
|
||||||
|
r_view = self.session.post(url_view, json={}, headers={"Content-Type": "application/json"})
|
||||||
|
r_view.raise_for_status()
|
||||||
|
view_data = r_view.json()
|
||||||
|
reel_id = view_data["reel_id"]
|
||||||
|
|
||||||
|
# For films, use reels data for language/audio mapping
|
||||||
|
if not self.is_series:
|
||||||
|
if not self.film_id:
|
||||||
|
raise RuntimeError("film_id not set. Call get_titles() first.")
|
||||||
|
|
||||||
|
if not self.reels_data:
|
||||||
|
self.log.warning("Reels data not cached, fetching now.")
|
||||||
|
url_reels = self.config["endpoints"]["reels"].format(film_id=film_id)
|
||||||
|
r_reels = self.session.get(url_reels)
|
||||||
|
r_reels.raise_for_status()
|
||||||
|
reels = r_reels.json()
|
||||||
|
else:
|
||||||
|
reels = self.reels_data
|
||||||
|
|
||||||
|
reel = next((r for r in reels if r["id"] == reel_id), reels[0])
|
||||||
|
else:
|
||||||
|
# For episodes, we don’t need reel-based logic — just proceed
|
||||||
|
pass
|
||||||
|
|
||||||
|
# Request secure streaming URL, works for both films and episodes
|
||||||
|
url_secure = self.config["endpoints"]["secure_url"].format(film_id=film_id)
|
||||||
|
r_secure = self.session.get(url_secure)
|
||||||
|
r_secure.raise_for_status()
|
||||||
|
secure_data = r_secure.json()
|
||||||
|
|
||||||
|
manifest_url = None
|
||||||
|
for entry in secure_data.get("urls", []):
|
||||||
|
if entry.get("content_type") == "application/dash+xml":
|
||||||
|
manifest_url = entry["src"]
|
||||||
|
break
|
||||||
|
|
||||||
|
if not manifest_url:
|
||||||
|
raise ValueError("No DASH manifest URL found.")
|
||||||
|
|
||||||
|
# Parse DASH, use title.language as fallback
|
||||||
|
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
# Add subtitles
|
||||||
|
subtitles = []
|
||||||
|
for sub in secure_data.get("text_track_urls", []):
|
||||||
|
lang_code = sub.get("language_code", "und")
|
||||||
|
vtt_url = sub.get("url")
|
||||||
|
if not vtt_url:
|
||||||
|
continue
|
||||||
|
|
||||||
|
is_original = lang_code == title.language.language
|
||||||
|
|
||||||
|
subtitles.append(
|
||||||
|
Subtitle(
|
||||||
|
id_=sub["id"],
|
||||||
|
url=vtt_url,
|
||||||
|
language=Language.get(lang_code),
|
||||||
|
is_original_lang=is_original,
|
||||||
|
codec=Subtitle.Codec.WebVTT,
|
||||||
|
name=sub.get("display_name", lang_code.upper()),
|
||||||
|
forced=False,
|
||||||
|
sdh=False,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
tracks.subtitles = subtitles
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
if not self.user_id:
|
||||||
|
raise RuntimeError("user_id not set — authenticate first.")
|
||||||
|
|
||||||
|
dt_custom_data = {
|
||||||
|
"userId": self.user_id,
|
||||||
|
"sessionId": self.lt_token,
|
||||||
|
"merchant": "mubi"
|
||||||
|
}
|
||||||
|
|
||||||
|
dt_custom_data_b64 = base64.b64encode(json.dumps(dt_custom_data).encode()).decode()
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Origin": "https://mubi.com",
|
||||||
|
"Referer": "https://mubi.com/",
|
||||||
|
"dt-custom-data": dt_custom_data_b64,
|
||||||
|
}
|
||||||
|
|
||||||
|
r = self.session.post(
|
||||||
|
self.config["endpoints"]["license"],
|
||||||
|
data=challenge,
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
license_data = r.json()
|
||||||
|
if license_data.get("status") != "OK":
|
||||||
|
raise PermissionError(f"DRM license error: {license_data}")
|
||||||
|
return base64.b64decode(license_data["license"])
|
||||||
|
|
||||||
12
MUBI/config.yaml
Normal file
12
MUBI/config.yaml
Normal file
@ -0,0 +1,12 @@
|
|||||||
|
endpoints:
|
||||||
|
account: "https://api.mubi.com/v4/account"
|
||||||
|
current_user: "https://api.mubi.com/v4/current_user"
|
||||||
|
film_by_slug: "https://api.mubi.com/v4/films/{slug}"
|
||||||
|
playback_languages: "https://api.mubi.com/v4/films/{film_id}/playback_languages"
|
||||||
|
initiate_viewing: "https://api.mubi.com/v4/films/{film_id}/viewing?parental_lock_enabled=true"
|
||||||
|
reels: "https://api.mubi.com/v4/films/{film_id}/reels"
|
||||||
|
secure_url: "https://api.mubi.com/v4/films/{film_id}/viewing/secure_url"
|
||||||
|
license: "https://lic.drmtoday.com/license-proxy-widevine/cenc/"
|
||||||
|
ip_geolocation: "https://directory.cookieyes.com/api/v1/ip"
|
||||||
|
series: "https://api.mubi.com/v4/series/{series_slug}"
|
||||||
|
season_episodes: "https://api.mubi.com/v4/series/{series_slug}/seasons/{season_slug}/episodes/available"
|
||||||
149
PTHS/__init__.py
Normal file
149
PTHS/__init__.py
Normal file
@ -0,0 +1,149 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
from typing import Optional
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from langcodes import Language
|
||||||
|
import click
|
||||||
|
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Movie, Movies, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Tracks
|
||||||
|
|
||||||
|
|
||||||
|
class PTHS(Service):
|
||||||
|
"""
|
||||||
|
Service code for Pathé Thuis (pathe-thuis.nl)
|
||||||
|
Version: 1.0.0
|
||||||
|
|
||||||
|
Security: SD @ L3 (Widevine)
|
||||||
|
FHD @ L1
|
||||||
|
Authorization: Cookies or authentication token
|
||||||
|
|
||||||
|
Supported:
|
||||||
|
• Movies → https://www.pathe-thuis.nl/film/{id}
|
||||||
|
|
||||||
|
Note:
|
||||||
|
Pathé Thuis does not have episodic content, only movies.
|
||||||
|
"""
|
||||||
|
|
||||||
|
TITLE_RE = (
|
||||||
|
r"^(?:https?://(?:www\.)?pathe-thuis\.nl/film/)?(?P<id>\d+)(?:/[^/]+)?$"
|
||||||
|
)
|
||||||
|
GEOFENCE = ("NL",)
|
||||||
|
NO_SUBTITLES = True
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="PTHS", short_help="https://www.pathe-thuis.nl")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return PTHS(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
|
||||||
|
m = re.match(self.TITLE_RE, title)
|
||||||
|
if not m:
|
||||||
|
raise ValueError(
|
||||||
|
f"Unsupported Pathé Thuis URL or ID: {title}\n"
|
||||||
|
"Use e.g. https://www.pathe-thuis.nl/film/30591"
|
||||||
|
)
|
||||||
|
|
||||||
|
self.movie_id = m.group("id")
|
||||||
|
self.drm_token = None
|
||||||
|
|
||||||
|
if self.config is None:
|
||||||
|
raise EnvironmentError("Missing service config for Pathé Thuis.")
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
super().authenticate(cookies, credential)
|
||||||
|
|
||||||
|
if not cookies:
|
||||||
|
self.log.warning("No cookies provided, proceeding unauthenticated.")
|
||||||
|
return
|
||||||
|
|
||||||
|
token = next((c.value for c in cookies if c.name == "authenticationToken"), None)
|
||||||
|
if not token:
|
||||||
|
self.log.info("No authenticationToken cookie found, unauthenticated mode.")
|
||||||
|
return
|
||||||
|
|
||||||
|
self.session.headers.update({
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||||
|
"X-Pathe-Device-Identifier": "web-widevine-1",
|
||||||
|
"X-Pathe-Auth-Session-Token": token,
|
||||||
|
})
|
||||||
|
self.log.info("Authentication token successfully attached to session.")
|
||||||
|
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
url = self.config["endpoints"]["metadata"].format(movie_id=self.movie_id)
|
||||||
|
r = self.session.get(url)
|
||||||
|
r.raise_for_status()
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
|
movie = Movie(
|
||||||
|
id_=str(data["id"]),
|
||||||
|
service=self.__class__,
|
||||||
|
name=data["name"],
|
||||||
|
description=data.get("intro", ""),
|
||||||
|
year=data.get("year"),
|
||||||
|
language=Language.get(data.get("language", "en")),
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
return Movies([movie])
|
||||||
|
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
ticket_id = self._get_ticket_id(title)
|
||||||
|
url = self.config["endpoints"]["ticket"].format(ticket_id=ticket_id)
|
||||||
|
|
||||||
|
r = self.session.get(url)
|
||||||
|
r.raise_for_status()
|
||||||
|
data = r.json()
|
||||||
|
stream = data["stream"]
|
||||||
|
|
||||||
|
manifest_url = stream.get("url") or stream.get("drmurl")
|
||||||
|
if not manifest_url:
|
||||||
|
raise ValueError("No stream manifest URL found.")
|
||||||
|
|
||||||
|
self.drm_token = stream["token"]
|
||||||
|
self.license_url = stream["rawData"]["licenseserver"]
|
||||||
|
|
||||||
|
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
|
||||||
|
def _get_ticket_id(self, title: Title_T) -> str:
|
||||||
|
"""Fetch the user's owned ticket ID if present."""
|
||||||
|
data = title.data
|
||||||
|
for t in (data.get("tickets") or []):
|
||||||
|
if t.get("playable") and str(t.get("movieId")) == str(self.movie_id):
|
||||||
|
return str(t["id"])
|
||||||
|
raise ValueError("No valid ticket found for this movie. Ensure purchase or login.")
|
||||||
|
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T):
|
||||||
|
return []
|
||||||
|
|
||||||
|
|
||||||
|
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
if not self.license_url or not self.drm_token:
|
||||||
|
raise ValueError("Missing license URL or token.")
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
"Authorization": f"Bearer {self.drm_token}",
|
||||||
|
}
|
||||||
|
|
||||||
|
params = {"custom_data": self.drm_token}
|
||||||
|
|
||||||
|
r = self.session.post(self.license_url, params=params, data=challenge, headers=headers)
|
||||||
|
r.raise_for_status()
|
||||||
|
|
||||||
|
if not r.content:
|
||||||
|
raise ValueError("Empty license response, likely invalid or expired token.")
|
||||||
|
return r.content
|
||||||
3
PTHS/config.yaml
Normal file
3
PTHS/config.yaml
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
endpoints:
|
||||||
|
metadata: "https://www.pathe-thuis.nl/api/movies/{movie_id}?include=editions"
|
||||||
|
ticket: "https://www.pathe-thuis.nl/api/tickets/{ticket_id}"
|
||||||
39
README.md
39
README.md
@ -1,17 +1,40 @@
|
|||||||
# DISCLAMER, WHATEVER YOU DO WITH THIS SERVICE IS ALL YOUR RESPONSIBILITY!, IF YOU GET DMCA ITS YOUR FAULT NOT MINE, im just sharing knowledge for educational purposes, aka don't sue me npo.nl pls?
|
|
||||||
|
|
||||||
This service is relatively new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
# These services is new and in development. Please feel free to submit pull requests or issue a ticket for any mistakes or suggestions.
|
||||||
|
|
||||||
NPO
|
### If you have personal questions or want to request a service, DM me at discord (jerukpurut)
|
||||||
|
|
||||||
|
|
||||||
|
- Roadmap:
|
||||||
|
|
||||||
|
1. NPO:
|
||||||
- To add search functionality
|
- To add search functionality
|
||||||
- More accurate metadata (the year of showing is not according the year of release)
|
- More accurate metadata (the year of showing is not according the year of release)
|
||||||
- Have a automatic CDM recognition option instead of the user puts it manually in the config for drmType
|
- Have a automatic CDM recognition option instead of the user puts it manually in the config for drmType
|
||||||
|
2. KOWP:
|
||||||
|
- Audio mislabel as English
|
||||||
KIJK
|
- To add Playready Support
|
||||||
- works
|
3. PTHS:
|
||||||
|
- To add Playready Support (is needed since L3 is just 480p)
|
||||||
|
- Search Functionality
|
||||||
|
- Account login if possible
|
||||||
|
4. HIDI:
|
||||||
|
- Subtitle is a bit misplace if second sentences came up making the last sentence on the first order and vice versa (needs to be fixed)
|
||||||
|
5. MUBI:
|
||||||
|
- Search Functionality
|
||||||
|
6. VIKI:
|
||||||
|
- CSRF Token is now scraped, would be from a api requests soon
|
||||||
|
7. VIDO:
|
||||||
|
- Subtitle has little quirk of having javanese and sundanese language labeled on the HLS one but not the DASH one
|
||||||
|
- Search functionality not available yet
|
||||||
|
8. KNPY:
|
||||||
|
- Need to fix the search function
|
||||||
|
9. VRT:
|
||||||
|
- Search functionality
|
||||||
|
- Fixing few hickups
|
||||||
|
10. SKST (the hardest service I ever dealt upon now):
|
||||||
|
- Subtitles is a litte bit hit or miss for movies and for series there's still no subtitles
|
||||||
|
|
||||||
- Acknowledgment
|
- Acknowledgment
|
||||||
|
|
||||||
Thanks to FairTrade for turing NPO into an unshackle service
|
Thanks to Adef for the NPO start downloader.
|
||||||
|
|
||||||
|
|||||||
1048
SKST/__init__.py
Normal file
1048
SKST/__init__.py
Normal file
File diff suppressed because it is too large
Load Diff
42
SKST/config.yaml
Normal file
42
SKST/config.yaml
Normal file
@ -0,0 +1,42 @@
|
|||||||
|
endpoints:
|
||||||
|
signin: "https://rango.id.skyshowtime.com/signin/service/international"
|
||||||
|
tokens: "https://ovp.skyshowtime.com/auth/tokens"
|
||||||
|
personas: "https://web.clients.skyshowtime.com/bff/personas/v2"
|
||||||
|
atom_node: "https://atom.skyshowtime.com/adapter-calypso/v3/query/node"
|
||||||
|
atom_search: "https://atom.skyshowtime.com/adapter-calypso/v3/query/search"
|
||||||
|
playback: "https://ovp.skyshowtime.com/video/playouts/vod"
|
||||||
|
|
||||||
|
params:
|
||||||
|
provider: "SKYSHOWTIME"
|
||||||
|
proposition: "SKYSHOWTIME"
|
||||||
|
platform: "PC"
|
||||||
|
device: "COMPUTER"
|
||||||
|
client_version: "6.11.21-gsp"
|
||||||
|
|
||||||
|
signature:
|
||||||
|
app_id: "SHOWMAX-ANDROID-v1"
|
||||||
|
key: "kC2UFjsH6PHrc5ENGfyTgC5bPA7aBVZ4aJAyqBBP"
|
||||||
|
version: "1.0"
|
||||||
|
|
||||||
|
territories:
|
||||||
|
- NL
|
||||||
|
- PL
|
||||||
|
- ES
|
||||||
|
- PT
|
||||||
|
- SE
|
||||||
|
- NO
|
||||||
|
- DK
|
||||||
|
- FI
|
||||||
|
- CZ
|
||||||
|
- SK
|
||||||
|
- HU
|
||||||
|
- RO
|
||||||
|
- BG
|
||||||
|
- HR
|
||||||
|
- SI
|
||||||
|
- BA
|
||||||
|
- RS
|
||||||
|
- ME
|
||||||
|
- MK
|
||||||
|
- AL
|
||||||
|
- XK
|
||||||
452
VIDO/__init__.py
Normal file
452
VIDO/__init__.py
Normal file
@ -0,0 +1,452 @@
|
|||||||
|
import re
|
||||||
|
import uuid
|
||||||
|
import xml.etree.ElementTree as ET
|
||||||
|
from urllib.parse import urljoin
|
||||||
|
from hashlib import md5
|
||||||
|
from typing import Optional, Union
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from langcodes import Language
|
||||||
|
|
||||||
|
import click
|
||||||
|
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import HLS, DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from datetime import datetime, timezone
|
||||||
|
|
||||||
|
|
||||||
|
class VIDO(Service):
|
||||||
|
"""
|
||||||
|
Vidio.com service, Series and Movies, login required.
|
||||||
|
Version: 2.3.0
|
||||||
|
|
||||||
|
Supports URLs like:
|
||||||
|
• https://www.vidio.com/premier/2978/giligilis (Series)
|
||||||
|
• https://www.vidio.com/watch/7454613-marantau-short-movie (Movie)
|
||||||
|
|
||||||
|
Security: HD@L3 (Widevine DRM when available)
|
||||||
|
"""
|
||||||
|
|
||||||
|
TITLE_RE = r"^https?://(?:www\.)?vidio\.com/(?:premier|series|watch)/(?P<id>\d+)"
|
||||||
|
GEOFENCE = ("ID",)
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="VIDO", short_help="https://vidio.com (login required)")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return VIDO(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
|
||||||
|
match = re.match(self.TITLE_RE, title)
|
||||||
|
if not match:
|
||||||
|
raise ValueError(f"Unsupported or invalid Vidio URL: {title}")
|
||||||
|
self.content_id = match.group("id")
|
||||||
|
|
||||||
|
self.is_movie = "watch" in title
|
||||||
|
|
||||||
|
# Static app identifiers from Android traffic
|
||||||
|
self.API_AUTH = "laZOmogezono5ogekaso5oz4Mezimew1"
|
||||||
|
self.USER_AGENT = "vidioandroid/7.14.6-e4d1de87f2 (3191683)"
|
||||||
|
self.API_APP_INFO = "android/15/7.14.6-e4d1de87f2-3191683"
|
||||||
|
self.VISITOR_ID = str(uuid.uuid4())
|
||||||
|
|
||||||
|
# Auth state
|
||||||
|
self._email = None
|
||||||
|
self._user_token = None
|
||||||
|
self._access_token = None
|
||||||
|
|
||||||
|
# DRM state
|
||||||
|
self.license_url = None
|
||||||
|
self.custom_data = None
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
if not credential or not credential.username or not credential.password:
|
||||||
|
raise ValueError("Vidio requires email and password login.")
|
||||||
|
|
||||||
|
self._email = credential.username
|
||||||
|
password = credential.password
|
||||||
|
|
||||||
|
cache_key = f"auth_tokens_{self._email}"
|
||||||
|
cache = self.cache.get(cache_key)
|
||||||
|
|
||||||
|
# Check if valid tokens are already in the cache
|
||||||
|
if cache and not cache.expired:
|
||||||
|
self.log.info("Using cached authentication tokens")
|
||||||
|
cached_data = cache.data
|
||||||
|
self._user_token = cached_data.get("user_token")
|
||||||
|
self._access_token = cached_data.get("access_token")
|
||||||
|
if self._user_token and self._access_token:
|
||||||
|
return
|
||||||
|
|
||||||
|
# If no valid cache, proceed with login
|
||||||
|
self.log.info("Authenticating with username and password")
|
||||||
|
headers = {
|
||||||
|
"referer": "android-app://com.vidio.android",
|
||||||
|
"x-api-platform": "app-android",
|
||||||
|
"x-api-auth": self.API_AUTH,
|
||||||
|
"user-agent": self.USER_AGENT,
|
||||||
|
"x-api-app-info": self.API_APP_INFO,
|
||||||
|
"accept-language": "en",
|
||||||
|
"content-type": "application/x-www-form-urlencoded",
|
||||||
|
"x-visitor-id": self.VISITOR_ID,
|
||||||
|
}
|
||||||
|
|
||||||
|
data = f"login={self._email}&password={password}"
|
||||||
|
r = self.session.post("https://api.vidio.com/api/login", headers=headers, data=data)
|
||||||
|
r.raise_for_status()
|
||||||
|
|
||||||
|
auth_data = r.json()
|
||||||
|
self._user_token = auth_data["auth"]["authentication_token"]
|
||||||
|
self._access_token = auth_data["auth_tokens"]["access_token"]
|
||||||
|
self.log.info(f"Authenticated as {self._email}")
|
||||||
|
|
||||||
|
try:
|
||||||
|
expires_at_str = auth_data["auth_tokens"]["access_token_expires_at"]
|
||||||
|
expires_at_dt = datetime.fromisoformat(expires_at_str)
|
||||||
|
now_utc = datetime.now(timezone.utc)
|
||||||
|
expiration_in_seconds = max(0, int((expires_at_dt - now_utc).total_seconds()))
|
||||||
|
self.log.info(f"Token expires in {expiration_in_seconds / 60:.2f} minutes.")
|
||||||
|
except (KeyError, ValueError) as e:
|
||||||
|
self.log.warning(f"Could not parse token expiration: {e}. Defaulting to 1 hour.")
|
||||||
|
expiration_in_seconds = 3600
|
||||||
|
|
||||||
|
cache.set({
|
||||||
|
"user_token": self._user_token,
|
||||||
|
"access_token": self._access_token
|
||||||
|
}, expiration=expiration_in_seconds)
|
||||||
|
|
||||||
|
def _headers(self):
|
||||||
|
if not self._user_token or not self._access_token:
|
||||||
|
raise RuntimeError("Not authenticated. Call authenticate() first.")
|
||||||
|
return {
|
||||||
|
"referer": "android-app://com.vidio.android",
|
||||||
|
"x-api-platform": "app-android",
|
||||||
|
"x-api-auth": self.API_AUTH,
|
||||||
|
"user-agent": self.USER_AGENT,
|
||||||
|
"x-api-app-info": self.API_APP_INFO,
|
||||||
|
"x-visitor-id": self.VISITOR_ID,
|
||||||
|
"x-user-email": self._email,
|
||||||
|
"x-user-token": self._user_token,
|
||||||
|
"x-authorization": self._access_token,
|
||||||
|
"accept-language": "en",
|
||||||
|
"accept": "application/json",
|
||||||
|
"accept-charset": "UTF-8",
|
||||||
|
"content-type": "application/vnd.api+json",
|
||||||
|
}
|
||||||
|
|
||||||
|
def _extract_subtitles_from_mpd(self, mpd_url: str) -> list[Subtitle]:
|
||||||
|
"""
|
||||||
|
Manually parse the MPD to extract subtitle tracks.
|
||||||
|
Handles plain VTT format (for free content).
|
||||||
|
"""
|
||||||
|
subtitles = []
|
||||||
|
|
||||||
|
try:
|
||||||
|
r = self.session.get(mpd_url)
|
||||||
|
r.raise_for_status()
|
||||||
|
mpd_content = r.text
|
||||||
|
|
||||||
|
# Get base URL for resolving relative paths
|
||||||
|
base_url = mpd_url.rsplit('/', 1)[0] + '/'
|
||||||
|
|
||||||
|
# Remove namespace for easier parsing
|
||||||
|
mpd_content_clean = re.sub(r'\sxmlns="[^"]+"', '', mpd_content)
|
||||||
|
root = ET.fromstring(mpd_content_clean)
|
||||||
|
|
||||||
|
for adaptation_set in root.findall('.//AdaptationSet'):
|
||||||
|
content_type = adaptation_set.get('contentType', '')
|
||||||
|
|
||||||
|
if content_type != 'text':
|
||||||
|
continue
|
||||||
|
|
||||||
|
lang = adaptation_set.get('lang', 'und')
|
||||||
|
|
||||||
|
for rep in adaptation_set.findall('Representation'):
|
||||||
|
mime_type = rep.get('mimeType', '')
|
||||||
|
|
||||||
|
# Handle plain VTT (free content)
|
||||||
|
if mime_type == 'text/vtt':
|
||||||
|
segment_list = rep.find('SegmentList')
|
||||||
|
if segment_list is not None:
|
||||||
|
for segment_url in segment_list.findall('SegmentURL'):
|
||||||
|
media = segment_url.get('media')
|
||||||
|
if media:
|
||||||
|
full_url = urljoin(base_url, media)
|
||||||
|
|
||||||
|
# Determine if auto-generated
|
||||||
|
is_auto = '-auto' in lang
|
||||||
|
clean_lang = lang.replace('-auto', '')
|
||||||
|
|
||||||
|
subtitle = Subtitle(
|
||||||
|
id_=md5(full_url.encode()).hexdigest()[0:16],
|
||||||
|
url=full_url,
|
||||||
|
codec=Subtitle.Codec.WebVTT,
|
||||||
|
language=Language.get(clean_lang),
|
||||||
|
forced=False,
|
||||||
|
sdh=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
subtitles.append(subtitle)
|
||||||
|
self.log.debug(f"Found VTT subtitle: {lang} -> {full_url}")
|
||||||
|
|
||||||
|
except Exception as e:
|
||||||
|
self.log.warning(f"Failed to extract subtitles from MPD: {e}")
|
||||||
|
|
||||||
|
return subtitles
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
headers = self._headers()
|
||||||
|
|
||||||
|
if self.is_movie:
|
||||||
|
r = self.session.get(f"https://api.vidio.com/api/videos/{self.content_id}/detail", headers=headers)
|
||||||
|
r.raise_for_status()
|
||||||
|
video_data = r.json()["video"]
|
||||||
|
year = None
|
||||||
|
if video_data.get("publish_date"):
|
||||||
|
try:
|
||||||
|
year = int(video_data["publish_date"][:4])
|
||||||
|
except (ValueError, TypeError):
|
||||||
|
pass
|
||||||
|
return Movies([
|
||||||
|
Movie(
|
||||||
|
id_=video_data["id"],
|
||||||
|
service=self.__class__,
|
||||||
|
name=video_data["title"],
|
||||||
|
description=video_data.get("description", ""),
|
||||||
|
year=year,
|
||||||
|
language=Language.get("id"),
|
||||||
|
data=video_data,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
else:
|
||||||
|
r = self.session.get(f"https://api.vidio.com/content_profiles/{self.content_id}", headers=headers)
|
||||||
|
r.raise_for_status()
|
||||||
|
root = r.json()["data"]
|
||||||
|
series_title = root["attributes"]["title"]
|
||||||
|
|
||||||
|
r_playlists = self.session.get(
|
||||||
|
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists",
|
||||||
|
headers=headers
|
||||||
|
)
|
||||||
|
r_playlists.raise_for_status()
|
||||||
|
playlists_data = r_playlists.json()
|
||||||
|
|
||||||
|
# Use metadata to identify season playlists
|
||||||
|
season_playlist_ids = set()
|
||||||
|
if "meta" in playlists_data and "playlist_group" in playlists_data["meta"]:
|
||||||
|
for group in playlists_data["meta"]["playlist_group"]:
|
||||||
|
if group.get("type") == "season":
|
||||||
|
season_playlist_ids.update(group.get("playlist_ids", []))
|
||||||
|
|
||||||
|
season_playlists = []
|
||||||
|
for pl in playlists_data["data"]:
|
||||||
|
playlist_id = int(pl["id"])
|
||||||
|
name = pl["attributes"]["name"].lower()
|
||||||
|
|
||||||
|
if season_playlist_ids:
|
||||||
|
if playlist_id in season_playlist_ids:
|
||||||
|
season_playlists.append(pl)
|
||||||
|
else:
|
||||||
|
if ("season" in name or name == "episode" or name == "episodes") and \
|
||||||
|
"trailer" not in name and "extra" not in name:
|
||||||
|
season_playlists.append(pl)
|
||||||
|
|
||||||
|
if not season_playlists:
|
||||||
|
raise ValueError("No season playlists found for this series.")
|
||||||
|
|
||||||
|
def extract_season_number(pl):
|
||||||
|
name = pl["attributes"]["name"]
|
||||||
|
match = re.search(r"season\s*(\d+)", name, re.IGNORECASE)
|
||||||
|
if match:
|
||||||
|
return int(match.group(1))
|
||||||
|
elif name.lower() in ["season", "episodes", "episode"]:
|
||||||
|
return 1
|
||||||
|
else:
|
||||||
|
return 0
|
||||||
|
|
||||||
|
season_playlists.sort(key=extract_season_number)
|
||||||
|
|
||||||
|
all_episodes = []
|
||||||
|
|
||||||
|
for playlist in season_playlists:
|
||||||
|
playlist_id = playlist["id"]
|
||||||
|
season_number = extract_season_number(playlist)
|
||||||
|
|
||||||
|
if season_number == 0:
|
||||||
|
season_number = 1
|
||||||
|
|
||||||
|
self.log.debug(f"Processing playlist '{playlist['attributes']['name']}' as Season {season_number}")
|
||||||
|
|
||||||
|
page = 1
|
||||||
|
while True:
|
||||||
|
r_eps = self.session.get(
|
||||||
|
f"https://api.vidio.com/content_profiles/{self.content_id}/playlists/{playlist_id}/videos",
|
||||||
|
params={
|
||||||
|
"page[number]": page,
|
||||||
|
"page[size]": 20,
|
||||||
|
"sort": "order",
|
||||||
|
"included": "upcoming_videos"
|
||||||
|
},
|
||||||
|
headers=headers,
|
||||||
|
)
|
||||||
|
r_eps.raise_for_status()
|
||||||
|
page_data = r_eps.json()
|
||||||
|
|
||||||
|
for raw_ep in page_data["data"]:
|
||||||
|
attrs = raw_ep["attributes"]
|
||||||
|
ep_number = len([e for e in all_episodes if e.season == season_number]) + 1
|
||||||
|
all_episodes.append(
|
||||||
|
Episode(
|
||||||
|
id_=int(raw_ep["id"]),
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_title,
|
||||||
|
season=season_number,
|
||||||
|
number=ep_number,
|
||||||
|
name=attrs["title"],
|
||||||
|
description=attrs.get("description", ""),
|
||||||
|
language=Language.get("id"),
|
||||||
|
data=raw_ep,
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
if not page_data["links"].get("next"):
|
||||||
|
break
|
||||||
|
page += 1
|
||||||
|
|
||||||
|
if not all_episodes:
|
||||||
|
raise ValueError("No episodes found in any season.")
|
||||||
|
|
||||||
|
return Series(all_episodes)
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
headers = self._headers()
|
||||||
|
headers.update({
|
||||||
|
"x-device-brand": "samsung",
|
||||||
|
"x-device-model": "SM-A525F",
|
||||||
|
"x-device-form-factor": "phone",
|
||||||
|
"x-device-soc": "Qualcomm SM7125",
|
||||||
|
"x-device-os": "Android 15 (API 35)",
|
||||||
|
"x-device-android-mpc": "0",
|
||||||
|
"x-device-cpu-arch": "arm64-v8a",
|
||||||
|
"x-device-platform": "android",
|
||||||
|
"x-app-version": "7.14.6-e4d1de87f2-3191683",
|
||||||
|
})
|
||||||
|
|
||||||
|
video_id = str(title.id)
|
||||||
|
url = f"https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
||||||
|
|
||||||
|
r = self.session.get(url, headers=headers)
|
||||||
|
r.raise_for_status()
|
||||||
|
stream = r.json()
|
||||||
|
|
||||||
|
if not isinstance(stream, dict):
|
||||||
|
raise ValueError("Vidio returned invalid stream data.")
|
||||||
|
|
||||||
|
# Extract DRM info
|
||||||
|
custom_data = stream.get("custom_data") or {}
|
||||||
|
license_servers = stream.get("license_servers") or {}
|
||||||
|
widevine_data = custom_data.get("widevine") if isinstance(custom_data, dict) else None
|
||||||
|
license_url = license_servers.get("drm_license_url") if isinstance(license_servers, dict) else None
|
||||||
|
|
||||||
|
# Get stream URLs, check all possible HLS and DASH fields
|
||||||
|
# HLS URLs (prefer in this order)
|
||||||
|
hls_url = (
|
||||||
|
stream.get("stream_hls_url") or
|
||||||
|
stream.get("stream_token_hls_url") or
|
||||||
|
stream.get("stream_token_url") # This is also HLS (m3u8)
|
||||||
|
)
|
||||||
|
|
||||||
|
# DASH URLs
|
||||||
|
dash_url = stream.get("stream_dash_url") or stream.get("stream_token_dash_url")
|
||||||
|
|
||||||
|
has_drm = widevine_data and license_url and dash_url and isinstance(widevine_data, str)
|
||||||
|
|
||||||
|
if has_drm:
|
||||||
|
# DRM content: must use DASH
|
||||||
|
self.log.info("Widevine DRM detected, using DASH")
|
||||||
|
self.custom_data = widevine_data
|
||||||
|
self.license_url = license_url
|
||||||
|
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
elif hls_url:
|
||||||
|
# Non-DRM: prefer HLS (H.264, proper frame_rate metadata)
|
||||||
|
self.log.info("No DRM detected, using HLS")
|
||||||
|
self.custom_data = None
|
||||||
|
self.license_url = None
|
||||||
|
tracks = HLS.from_url(hls_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
# Clear HLS subtitles (they're segmented and incompatible)
|
||||||
|
if tracks.subtitles:
|
||||||
|
self.log.debug("Clearing HLS subtitles (incompatible format)")
|
||||||
|
tracks.subtitles.clear()
|
||||||
|
|
||||||
|
# Get subtitles from DASH manifest (plain VTT) if available
|
||||||
|
if dash_url:
|
||||||
|
self.log.debug("Extracting subtitles from DASH manifest")
|
||||||
|
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
||||||
|
if manual_subs:
|
||||||
|
for sub in manual_subs:
|
||||||
|
tracks.add(sub)
|
||||||
|
self.log.info(f"Added {len(manual_subs)} subtitle tracks from DASH")
|
||||||
|
|
||||||
|
elif dash_url:
|
||||||
|
# Fallback to DASH only if no HLS available
|
||||||
|
self.log.warning("No HLS available, using DASH (VP9 codec - may have issues)")
|
||||||
|
self.custom_data = None
|
||||||
|
self.license_url = None
|
||||||
|
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
# Try manual subtitle extraction for non-DRM DASH
|
||||||
|
if not tracks.subtitles:
|
||||||
|
manual_subs = self._extract_subtitles_from_mpd(dash_url)
|
||||||
|
if manual_subs:
|
||||||
|
for sub in manual_subs:
|
||||||
|
tracks.add(sub)
|
||||||
|
else:
|
||||||
|
raise ValueError("No playable stream (DASH or HLS) available.")
|
||||||
|
|
||||||
|
self.log.info(f"Found {len(tracks.videos)} video tracks, {len(tracks.audio)} audio tracks, {len(tracks.subtitles)} subtitle tracks")
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||||
|
return []
|
||||||
|
|
||||||
|
def search(self):
|
||||||
|
raise NotImplementedError("Search not implemented for Vidio.")
|
||||||
|
|
||||||
|
def get_widevine_service_certificate(self, **_) -> Union[bytes, str, None]:
|
||||||
|
return None
|
||||||
|
|
||||||
|
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
if not self.license_url or not self.custom_data:
|
||||||
|
raise ValueError("DRM license info missing.")
|
||||||
|
|
||||||
|
headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||||
|
"Referer": "https://www.vidio.com/",
|
||||||
|
"Origin": "https://www.vidio.com",
|
||||||
|
"pallycon-customdata-v2": self.custom_data,
|
||||||
|
"Content-Type": "application/octet-stream",
|
||||||
|
}
|
||||||
|
|
||||||
|
self.log.debug(f"Requesting Widevine license from: {self.license_url}")
|
||||||
|
response = self.session.post(
|
||||||
|
self.license_url,
|
||||||
|
data=challenge,
|
||||||
|
headers=headers
|
||||||
|
)
|
||||||
|
|
||||||
|
if not response.ok:
|
||||||
|
error_summary = response.text[:200] if response.text else "No response body"
|
||||||
|
raise Exception(f"License request failed ({response.status_code}): {error_summary}")
|
||||||
|
|
||||||
|
return response.content
|
||||||
|
|
||||||
5
VIDO/config.yaml
Normal file
5
VIDO/config.yaml
Normal file
@ -0,0 +1,5 @@
|
|||||||
|
endpoints:
|
||||||
|
content_profile: "https://api.vidio.com/content_profiles/{content_id}"
|
||||||
|
playlists: "https://api.vidio.com/content_profiles/{content_id}/playlists"
|
||||||
|
playlist_videos: "https://api.vidio.com/content_profiles/{content_id}/playlists/{playlist_id}/videos"
|
||||||
|
stream: "https://api.vidio.com/api/stream/v1/video_data/{video_id}?initialize=true"
|
||||||
328
VIKI/__init__.py
Normal file
328
VIKI/__init__.py
Normal file
@ -0,0 +1,328 @@
|
|||||||
|
import base64
|
||||||
|
import json
|
||||||
|
import os
|
||||||
|
import re
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from typing import Optional, Generator
|
||||||
|
|
||||||
|
import click
|
||||||
|
from unshackle.core.search_result import SearchResult
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Movie, Movies, Series, Episode, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||||
|
from unshackle.core.drm import Widevine
|
||||||
|
from langcodes import Language
|
||||||
|
|
||||||
|
|
||||||
|
class VIKI(Service):
|
||||||
|
"""
|
||||||
|
Service code for Rakuten Viki (viki.com)
|
||||||
|
Version: 1.4.0
|
||||||
|
|
||||||
|
Authorization: Required cookies (_viki_session, device_id).
|
||||||
|
Security: FHD @ L3 (Widevine)
|
||||||
|
|
||||||
|
Supports:
|
||||||
|
• Movies and TV Series
|
||||||
|
"""
|
||||||
|
|
||||||
|
TITLE_RE = r"^(?:https?://(?:www\.)?viki\.com)?/(?:movies|tv)/(?P<id>\d+c)-.+$"
|
||||||
|
GEOFENCE = ()
|
||||||
|
NO_SUBTITLES = False
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="VIKI", short_help="https://viki.com")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return VIKI(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
|
||||||
|
m = re.match(self.TITLE_RE, title)
|
||||||
|
if not m:
|
||||||
|
self.search_term = title
|
||||||
|
self.title_url = None
|
||||||
|
return
|
||||||
|
|
||||||
|
self.container_id = m.group("id")
|
||||||
|
self.title_url = title
|
||||||
|
self.video_id: Optional[str] = None
|
||||||
|
self.api_access_key: Optional[str] = None
|
||||||
|
self.drm_license_url: Optional[str] = None
|
||||||
|
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
if self.config is None:
|
||||||
|
raise EnvironmentError("Missing service config for VIKI.")
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
super().authenticate(cookies, credential)
|
||||||
|
|
||||||
|
if not cookies:
|
||||||
|
raise PermissionError("VIKI requires a cookie file for authentication.")
|
||||||
|
|
||||||
|
session_cookie = next((c for c in cookies if c.name == "_viki_session"), None)
|
||||||
|
device_cookie = next((c for c in cookies if c.name == "device_id"), None)
|
||||||
|
|
||||||
|
if not session_cookie or not device_cookie:
|
||||||
|
raise PermissionError("Your cookie file is missing '_viki_session' or 'device_id'.")
|
||||||
|
|
||||||
|
self.session.headers.update({
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||||
|
"X-Viki-App-Ver": "14.64.0",
|
||||||
|
"X-Viki-Device-ID": device_cookie.value,
|
||||||
|
"Origin": "https://www.viki.com",
|
||||||
|
"Referer": "https://www.viki.com/",
|
||||||
|
})
|
||||||
|
self.log.info("VIKI authentication cookies loaded successfully.")
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
if not self.title_url:
|
||||||
|
raise ValueError("No URL provided to process.")
|
||||||
|
|
||||||
|
self.log.debug(f"Scraping page for API access key: {self.title_url}")
|
||||||
|
r_page = self.session.get(self.title_url)
|
||||||
|
r_page.raise_for_status()
|
||||||
|
|
||||||
|
match = re.search(r'"token":"([^"]+)"', r_page.text)
|
||||||
|
if not match:
|
||||||
|
raise RuntimeError("Failed to extract API access key from page source.")
|
||||||
|
|
||||||
|
self.api_access_key = match.group(1)
|
||||||
|
self.log.debug(f"Extracted API access key: {self.api_access_key[:10]}...")
|
||||||
|
|
||||||
|
url = self.config["endpoints"]["container"].format(container_id=self.container_id)
|
||||||
|
params = {
|
||||||
|
"app": self.config["params"]["app"],
|
||||||
|
"token": self.api_access_key,
|
||||||
|
}
|
||||||
|
r = self.session.get(url, params=params)
|
||||||
|
r.raise_for_status()
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
|
content_type = data.get("type")
|
||||||
|
if content_type == "film":
|
||||||
|
return self._parse_movie(data)
|
||||||
|
elif content_type == "series":
|
||||||
|
return self._parse_series(data)
|
||||||
|
else:
|
||||||
|
self.log.error(f"Unknown content type '{content_type}' found.")
|
||||||
|
return Movies([])
|
||||||
|
|
||||||
|
def _parse_movie(self, data: dict) -> Movies:
|
||||||
|
name = data.get("titles", {}).get("en", "Unknown Title")
|
||||||
|
year = int(data["created_at"][:4]) if "created_at" in data else None
|
||||||
|
description = data.get("descriptions", {}).get("en", "")
|
||||||
|
original_lang_code = data.get("origin", {}).get("language", "en")
|
||||||
|
self.video_id = data.get("watch_now", {}).get("id")
|
||||||
|
|
||||||
|
if not self.video_id:
|
||||||
|
raise ValueError(f"Could not find a playable video ID for container {self.container_id}.")
|
||||||
|
|
||||||
|
return Movies([
|
||||||
|
Movie(
|
||||||
|
id_=self.container_id,
|
||||||
|
service=self.__class__,
|
||||||
|
name=name,
|
||||||
|
year=year,
|
||||||
|
description=description,
|
||||||
|
language=Language.get(original_lang_code),
|
||||||
|
data=data,
|
||||||
|
)
|
||||||
|
])
|
||||||
|
|
||||||
|
def _parse_series(self, data: dict) -> Series:
|
||||||
|
"""Parse series metadata and fetch episodes."""
|
||||||
|
series_name = data.get("titles", {}).get("en", "Unknown Title")
|
||||||
|
year = int(data["created_at"][:4]) if "created_at" in data else None
|
||||||
|
description = data.get("descriptions", {}).get("en", "")
|
||||||
|
original_lang_code = data.get("origin", {}).get("language", "en")
|
||||||
|
|
||||||
|
self.log.info(f"Parsing series: {series_name}")
|
||||||
|
|
||||||
|
# Fetch episode list IDs
|
||||||
|
episodes_url = self.config["endpoints"]["episodes"].format(container_id=self.container_id)
|
||||||
|
params = {
|
||||||
|
"app": self.config["params"]["app"],
|
||||||
|
"token": self.api_access_key,
|
||||||
|
"direction": "asc",
|
||||||
|
"with_upcoming": "true",
|
||||||
|
"sort": "number",
|
||||||
|
"blocked": "true",
|
||||||
|
"only_ids": "true"
|
||||||
|
}
|
||||||
|
|
||||||
|
r = self.session.get(episodes_url, params=params)
|
||||||
|
r.raise_for_status()
|
||||||
|
episodes_data = r.json()
|
||||||
|
|
||||||
|
episode_ids = episodes_data.get("response", [])
|
||||||
|
self.log.info(f"Found {len(episode_ids)} episodes")
|
||||||
|
|
||||||
|
episodes = []
|
||||||
|
for idx, ep_id in enumerate(episode_ids, 1):
|
||||||
|
# Fetch individual episode metadata
|
||||||
|
ep_url = self.config["endpoints"]["episode_meta"].format(video_id=ep_id)
|
||||||
|
ep_params = {
|
||||||
|
"app": self.config["params"]["app"],
|
||||||
|
"token": self.api_access_key,
|
||||||
|
}
|
||||||
|
|
||||||
|
try:
|
||||||
|
r_ep = self.session.get(ep_url, params=ep_params)
|
||||||
|
r_ep.raise_for_status()
|
||||||
|
ep_data = r_ep.json()
|
||||||
|
|
||||||
|
ep_number = ep_data.get("number", idx)
|
||||||
|
ep_title = ep_data.get("titles", {}).get("en", "")
|
||||||
|
ep_description = ep_data.get("descriptions", {}).get("en", "")
|
||||||
|
|
||||||
|
# If no episode title, use generic name
|
||||||
|
if not ep_title:
|
||||||
|
ep_title = f"Episode {ep_number}"
|
||||||
|
|
||||||
|
# Store the video_id in the data dict
|
||||||
|
ep_data["video_id"] = ep_id
|
||||||
|
|
||||||
|
self.log.debug(f"Episode {ep_number}: {ep_title} ({ep_id})")
|
||||||
|
|
||||||
|
episodes.append(
|
||||||
|
Episode(
|
||||||
|
id_=ep_id,
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_name, # Series title
|
||||||
|
season=1, # VIKI typically doesn't separate seasons clearly
|
||||||
|
number=ep_number,
|
||||||
|
name=ep_title, # Episode title
|
||||||
|
description=ep_description,
|
||||||
|
language=Language.get(original_lang_code),
|
||||||
|
data=ep_data
|
||||||
|
)
|
||||||
|
)
|
||||||
|
except Exception as e:
|
||||||
|
self.log.warning(f"Failed to fetch episode {ep_id}: {e}")
|
||||||
|
# Create a basic episode entry even if metadata fetch fails
|
||||||
|
episodes.append(
|
||||||
|
Episode(
|
||||||
|
id_=ep_id,
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_name,
|
||||||
|
season=1,
|
||||||
|
number=idx,
|
||||||
|
name=f"Episode {idx}",
|
||||||
|
description="",
|
||||||
|
language=Language.get(original_lang_code),
|
||||||
|
data={"video_id": ep_id} # Store video_id in data
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
# Return Series with just the episodes list
|
||||||
|
return Series(episodes)
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
# For episodes, get the video_id from the data dict
|
||||||
|
if isinstance(title, Episode):
|
||||||
|
self.video_id = title.data.get("video_id")
|
||||||
|
if not self.video_id:
|
||||||
|
# Fallback to episode id if video_id not in data
|
||||||
|
self.video_id = title.data.get("id")
|
||||||
|
elif not self.video_id:
|
||||||
|
raise RuntimeError("video_id not set. Call get_titles() first.")
|
||||||
|
|
||||||
|
if not self.video_id:
|
||||||
|
raise ValueError("Could not determine video_id for this title")
|
||||||
|
|
||||||
|
self.log.info(f"Getting tracks for video ID: {self.video_id}")
|
||||||
|
|
||||||
|
url = self.config["endpoints"]["playback"].format(video_id=self.video_id)
|
||||||
|
r = self.session.get(url)
|
||||||
|
r.raise_for_status()
|
||||||
|
data = r.json()
|
||||||
|
|
||||||
|
# Get the DRM-protected manifest from queue
|
||||||
|
manifest_url = None
|
||||||
|
for item in data.get("queue", []):
|
||||||
|
if item.get("type") == "video" and item.get("format") == "mpd":
|
||||||
|
manifest_url = item.get("url")
|
||||||
|
break
|
||||||
|
|
||||||
|
if not manifest_url:
|
||||||
|
raise ValueError("No DRM-protected manifest URL found in queue")
|
||||||
|
|
||||||
|
self.log.debug(f"Found DRM-protected manifest URL: {manifest_url}")
|
||||||
|
|
||||||
|
# Create headers for manifest download
|
||||||
|
manifest_headers = {
|
||||||
|
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64; rv:143.0) Gecko/20100101 Firefox/143.0",
|
||||||
|
"Accept": "*/*",
|
||||||
|
"Accept-Language": "en",
|
||||||
|
"Accept-Encoding": "gzip, deflate, br, zstd",
|
||||||
|
"X-Viki-App-Ver": "14.64.0",
|
||||||
|
"X-Viki-Device-ID": self.session.headers.get("X-Viki-Device-ID", ""),
|
||||||
|
"Origin": "https://www.viki.com",
|
||||||
|
"Referer": "https://www.viki.com/",
|
||||||
|
"Connection": "keep-alive",
|
||||||
|
"Sec-Fetch-Dest": "empty",
|
||||||
|
"Sec-Fetch-Mode": "cors",
|
||||||
|
"Sec-Fetch-Site": "cross-site",
|
||||||
|
"Pragma": "no-cache",
|
||||||
|
"Cache-Control": "no-cache",
|
||||||
|
}
|
||||||
|
|
||||||
|
# Parse tracks from the DRM-protected manifest
|
||||||
|
tracks = DASH.from_url(manifest_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
|
||||||
|
# Subtitles
|
||||||
|
title_language = title.language.language
|
||||||
|
subtitles = []
|
||||||
|
for sub in data.get("subtitles", []):
|
||||||
|
sub_url = sub.get("src")
|
||||||
|
lang_code = sub.get("srclang")
|
||||||
|
if not sub_url or not lang_code:
|
||||||
|
continue
|
||||||
|
|
||||||
|
subtitles.append(
|
||||||
|
Subtitle(
|
||||||
|
id_=lang_code,
|
||||||
|
url=sub_url,
|
||||||
|
language=Language.get(lang_code),
|
||||||
|
is_original_lang=lang_code == title_language,
|
||||||
|
codec=Subtitle.Codec.WebVTT,
|
||||||
|
name=sub.get("label", lang_code.upper()).split(" (")[0]
|
||||||
|
)
|
||||||
|
)
|
||||||
|
tracks.subtitles = subtitles
|
||||||
|
|
||||||
|
# Store DRM license URL (only dt3) at service level
|
||||||
|
drm_b64 = data.get("drm")
|
||||||
|
if drm_b64:
|
||||||
|
drm_data = json.loads(base64.b64decode(drm_b64))
|
||||||
|
self.drm_license_url = drm_data.get("dt3") # Use dt3 as requested
|
||||||
|
else:
|
||||||
|
self.log.warning("No DRM info found, assuming unencrypted stream.")
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
def get_widevine_license(self, *, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
if not hasattr(self, 'drm_license_url') or not self.drm_license_url:
|
||||||
|
raise ValueError("DRM license URL not available.")
|
||||||
|
|
||||||
|
r = self.session.post(
|
||||||
|
self.drm_license_url,
|
||||||
|
data=challenge,
|
||||||
|
headers={"Content-type": "application/octet-stream"}
|
||||||
|
)
|
||||||
|
r.raise_for_status()
|
||||||
|
return r.content
|
||||||
|
|
||||||
|
def search(self) -> Generator[SearchResult, None, None]:
|
||||||
|
self.log.warning("Search not yet implemented for VIKI.")
|
||||||
|
return
|
||||||
|
yield
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||||
|
return []
|
||||||
8
VIKI/config.yaml
Normal file
8
VIKI/config.yaml
Normal file
@ -0,0 +1,8 @@
|
|||||||
|
params:
|
||||||
|
app: "100000a"
|
||||||
|
endpoints:
|
||||||
|
container: "https://api.viki.io/v4/containers/{container_id}.json"
|
||||||
|
episodes: "https://api.viki.io/v4/series/{container_id}/episodes.json" # New
|
||||||
|
episode_meta: "https://api.viki.io/v4/videos/{video_id}.json" # New
|
||||||
|
playback: "https://www.viki.com/api/videos/{video_id}"
|
||||||
|
search: "https://api.viki.io/v4/search/all.json"
|
||||||
264
VRT/__init__.py
Normal file
264
VRT/__init__.py
Normal file
@ -0,0 +1,264 @@
|
|||||||
|
import json
|
||||||
|
import re
|
||||||
|
import time
|
||||||
|
import base64
|
||||||
|
import warnings # Added
|
||||||
|
from http.cookiejar import CookieJar
|
||||||
|
from typing import Optional, List
|
||||||
|
from langcodes import Language
|
||||||
|
|
||||||
|
import click
|
||||||
|
import jwt
|
||||||
|
from bs4 import XMLParsedAsHTMLWarning # Added
|
||||||
|
from collections.abc import Generator
|
||||||
|
from unshackle.core.search_result import SearchResult
|
||||||
|
from unshackle.core.constants import AnyTrack
|
||||||
|
from unshackle.core.credential import Credential
|
||||||
|
from unshackle.core.manifests import DASH
|
||||||
|
from unshackle.core.service import Service
|
||||||
|
from unshackle.core.titles import Episode, Movie, Movies, Series, Title_T, Titles_T
|
||||||
|
from unshackle.core.tracks import Chapter, Tracks, Subtitle
|
||||||
|
|
||||||
|
# Ignore the BeautifulSoup XML warning caused by STPP subtitles
|
||||||
|
warnings.filterwarnings("ignore", category=XMLParsedAsHTMLWarning)
|
||||||
|
|
||||||
|
# GraphQL Fragments and Queries
|
||||||
|
FRAGMENTS = """
|
||||||
|
fragment tileFragment on Tile {
|
||||||
|
... on ITile {
|
||||||
|
title
|
||||||
|
action { ... on LinkAction { link } }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
QUERY_PROGRAM = """
|
||||||
|
query VideoProgramPage($pageId: ID!) {
|
||||||
|
page(id: $pageId) {
|
||||||
|
... on ProgramPage {
|
||||||
|
title
|
||||||
|
components {
|
||||||
|
__typename
|
||||||
|
... on PaginatedTileList { listId title }
|
||||||
|
... on StaticTileList { listId title }
|
||||||
|
... on ContainerNavigation {
|
||||||
|
items {
|
||||||
|
title
|
||||||
|
components {
|
||||||
|
__typename
|
||||||
|
... on PaginatedTileList { listId }
|
||||||
|
... on StaticTileList { listId }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
QUERY_PAGINATED_LIST = FRAGMENTS + """
|
||||||
|
query PaginatedTileListPage($listId: ID!, $after: ID) {
|
||||||
|
list(listId: $listId) {
|
||||||
|
... on PaginatedTileList {
|
||||||
|
paginatedItems(first: 50, after: $after) {
|
||||||
|
edges { node { ...tileFragment } }
|
||||||
|
pageInfo { endCursor hasNextPage }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
... on StaticTileList {
|
||||||
|
items { ...tileFragment }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
QUERY_PLAYBACK = """
|
||||||
|
query EpisodePage($pageId: ID!) {
|
||||||
|
page(id: $pageId) {
|
||||||
|
... on PlaybackPage {
|
||||||
|
title
|
||||||
|
player { modes { streamId } }
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
"""
|
||||||
|
|
||||||
|
class VRT(Service):
|
||||||
|
"""
|
||||||
|
Service code for VRT MAX (vrt.be)
|
||||||
|
Version: 2.1.1
|
||||||
|
Auth: Gigya + OIDC flow
|
||||||
|
Security: FHD @ L3 (Widevine)
|
||||||
|
Supports:
|
||||||
|
- Movies: https://www.vrt.be/vrtmax/a-z/rikkie-de-ooievaar-2/
|
||||||
|
Series: https://www.vrt.be/vrtmax/a-z/schaar-steen-papier/
|
||||||
|
"""
|
||||||
|
|
||||||
|
TITLE_RE = r"^(?:https?://(?:www\.)?vrt\.be/vrtmax/a-z/)?(?P<slug>[^/]+)(?:/(?P<season_num>\d+)/(?P<episode_slug>[^/]+))?/?$"
|
||||||
|
|
||||||
|
@staticmethod
|
||||||
|
@click.command(name="VRT", short_help="https://www.vrt.be/vrtmax/")
|
||||||
|
@click.argument("title", type=str)
|
||||||
|
@click.pass_context
|
||||||
|
def cli(ctx, **kwargs):
|
||||||
|
return VRT(ctx, **kwargs)
|
||||||
|
|
||||||
|
def __init__(self, ctx, title: str):
|
||||||
|
super().__init__(ctx)
|
||||||
|
self.cdm = ctx.obj.cdm
|
||||||
|
|
||||||
|
m = re.match(self.TITLE_RE, title)
|
||||||
|
if m:
|
||||||
|
self.slug = m.group("slug")
|
||||||
|
self.is_series_root = m.group("episode_slug") is None
|
||||||
|
if "vrtmax/a-z" in title:
|
||||||
|
self.page_id = "/" + title.split("vrt.be/")[1].split("?")[0]
|
||||||
|
else:
|
||||||
|
self.page_id = f"/vrtmax/a-z/{self.slug}/"
|
||||||
|
else:
|
||||||
|
self.search_term = title
|
||||||
|
|
||||||
|
self.access_token = None
|
||||||
|
self.video_token = None
|
||||||
|
|
||||||
|
def authenticate(self, cookies: Optional[CookieJar] = None, credential: Optional[Credential] = None) -> None:
|
||||||
|
cache = self.cache.get("auth_data")
|
||||||
|
if cache and not cache.expired:
|
||||||
|
self.log.info("Using cached VRT session.")
|
||||||
|
self.access_token = cache.data["access_token"]
|
||||||
|
self.video_token = cache.data["video_token"]
|
||||||
|
return
|
||||||
|
|
||||||
|
if not credential or not credential.username or not credential.password: return
|
||||||
|
|
||||||
|
self.log.info(f"Logging in to VRT as {credential.username}...")
|
||||||
|
login_params = {
|
||||||
|
"apiKey": self.config["settings"]["api_key"],
|
||||||
|
"loginID": credential.username,
|
||||||
|
"password": credential.password,
|
||||||
|
"format": "json",
|
||||||
|
"sdk": "Android_6.1.0"
|
||||||
|
}
|
||||||
|
r = self.session.post(self.config["endpoints"]["gigya_login"], data=login_params)
|
||||||
|
gigya_data = r.json()
|
||||||
|
if gigya_data.get("errorCode") != 0: raise PermissionError("Gigya login failed")
|
||||||
|
|
||||||
|
sso_params = {"UID": gigya_data["UID"], "UIDSignature": gigya_data["UIDSignature"], "signatureTimestamp": gigya_data["signatureTimestamp"]}
|
||||||
|
r = self.session.get(self.config["endpoints"]["vrt_sso"], params=sso_params)
|
||||||
|
|
||||||
|
match = re.search(r'var response = "(.*?)";', r.text)
|
||||||
|
token_data = json.loads(match.group(1).replace('\\"', '"'))
|
||||||
|
self.access_token = token_data["tokens"]["access_token"]
|
||||||
|
self.video_token = token_data["tokens"]["video_token"]
|
||||||
|
|
||||||
|
decoded = jwt.decode(self.access_token, options={"verify_signature": False})
|
||||||
|
cache.set(data={"access_token": self.access_token, "video_token": self.video_token}, expiration=int(decoded["exp"] - time.time()) - 300)
|
||||||
|
|
||||||
|
def _get_gql_headers(self):
|
||||||
|
return {
|
||||||
|
"x-vrt-client-name": self.config["settings"]["client_name"],
|
||||||
|
"x-vrt-client-version": self.config["settings"]["client_version"],
|
||||||
|
"x-vrt-zone": "default",
|
||||||
|
"authorization": f"Bearer {self.access_token}" if self.access_token else None,
|
||||||
|
"Content-Type": "application/json"
|
||||||
|
}
|
||||||
|
|
||||||
|
def get_titles(self) -> Titles_T:
|
||||||
|
if not self.is_series_root:
|
||||||
|
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
||||||
|
data = r.json()["data"]["page"]
|
||||||
|
return Movies([Movie(id_=data["player"]["modes"][0]["streamId"], service=self.__class__, name=data["title"], language=Language.get("nl"), data={"page_id": self.page_id})])
|
||||||
|
|
||||||
|
r = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PROGRAM, "variables": {"pageId": self.page_id}}, headers=self._get_gql_headers())
|
||||||
|
program_data = r.json().get("data", {}).get("page")
|
||||||
|
if not program_data:
|
||||||
|
raise ValueError(f"Series page not found: {self.page_id}")
|
||||||
|
|
||||||
|
series_name = program_data["title"]
|
||||||
|
episodes = []
|
||||||
|
list_ids = []
|
||||||
|
|
||||||
|
for comp in program_data.get("components", []):
|
||||||
|
typename = comp.get("__typename")
|
||||||
|
if typename in ("PaginatedTileList", "StaticTileList") and "listId" in comp:
|
||||||
|
list_ids.append((comp.get("title") or "Episodes", comp["listId"]))
|
||||||
|
elif typename == "ContainerNavigation":
|
||||||
|
for item in comp.get("items", []):
|
||||||
|
item_title = item.get("title", "Episodes")
|
||||||
|
for sub in item.get("components", []):
|
||||||
|
if "listId" in sub:
|
||||||
|
list_ids.append((item_title, sub["listId"]))
|
||||||
|
|
||||||
|
seen_lists = set()
|
||||||
|
unique_list_ids = []
|
||||||
|
for title, lid in list_ids:
|
||||||
|
if lid not in seen_lists:
|
||||||
|
unique_list_ids.append((title, lid))
|
||||||
|
seen_lists.add(lid)
|
||||||
|
|
||||||
|
for season_title, list_id in unique_list_ids:
|
||||||
|
after = None
|
||||||
|
while True:
|
||||||
|
r_list = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PAGINATED_LIST, "variables": {"listId": list_id, "after": after}}, headers=self._get_gql_headers())
|
||||||
|
list_resp = r_list.json().get("data", {}).get("list")
|
||||||
|
if not list_resp: break
|
||||||
|
|
||||||
|
items_container = list_resp.get("paginatedItems")
|
||||||
|
nodes = [e["node"] for e in items_container["edges"]] if items_container else list_resp.get("items", [])
|
||||||
|
|
||||||
|
for node in nodes:
|
||||||
|
if not node.get("action"): continue
|
||||||
|
link = node["action"]["link"]
|
||||||
|
s_match = re.search(r'/(\d+)/.+s(\d+)a(\d+)', link)
|
||||||
|
episodes.append(Episode(
|
||||||
|
id_=link,
|
||||||
|
service=self.__class__,
|
||||||
|
title=series_name,
|
||||||
|
season=int(s_match.group(2)) if s_match else 1,
|
||||||
|
number=int(s_match.group(3)) if s_match else 0,
|
||||||
|
name=node["title"],
|
||||||
|
language=Language.get("nl"),
|
||||||
|
data={"page_id": link}
|
||||||
|
))
|
||||||
|
|
||||||
|
if items_container and items_container["pageInfo"]["hasNextPage"]:
|
||||||
|
after = items_container["pageInfo"]["endCursor"]
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
if not episodes:
|
||||||
|
raise ValueError("No episodes found for this series.")
|
||||||
|
|
||||||
|
return Series(episodes)
|
||||||
|
|
||||||
|
def get_tracks(self, title: Title_T) -> Tracks:
|
||||||
|
page_id = title.data["page_id"]
|
||||||
|
r_meta = self.session.post(self.config["endpoints"]["graphql"], json={"query": QUERY_PLAYBACK, "variables": {"pageId": page_id}}, headers=self._get_gql_headers())
|
||||||
|
stream_id = r_meta.json()["data"]["page"]["player"]["modes"][0]["streamId"]
|
||||||
|
|
||||||
|
p_info = base64.urlsafe_b64encode(json.dumps(self.config["player_info"]).encode()).decode().replace("=", "")
|
||||||
|
r_tok = self.session.post(self.config["endpoints"]["player_token"], json={"identityToken": self.video_token, "playerInfo": f"eyJhbGciOiJIUzI1NiJ9.{p_info}."})
|
||||||
|
vrt_player_token = r_tok.json()["vrtPlayerToken"]
|
||||||
|
|
||||||
|
r_agg = self.session.get(self.config["endpoints"]["aggregator"].format(stream_id=stream_id), params={"client": self.config["settings"]["client_id"], "vrtPlayerToken": vrt_player_token})
|
||||||
|
agg_data = r_agg.json()
|
||||||
|
|
||||||
|
dash_url = next(u["url"] for u in agg_data["targetUrls"] if u["type"] == "mpeg_dash")
|
||||||
|
tracks = DASH.from_url(dash_url, session=self.session).to_tracks(language=title.language)
|
||||||
|
self.drm_token = agg_data["drm"]
|
||||||
|
|
||||||
|
for sub in agg_data.get("subtitleUrls", []):
|
||||||
|
tracks.add(Subtitle(id_=sub.get("label", "nl"), url=sub["url"], codec=Subtitle.Codec.WebVTT, language=Language.get(sub.get("language", "nl"))))
|
||||||
|
|
||||||
|
for tr in tracks.videos + tracks.audio:
|
||||||
|
if tr.drm: tr.drm.license = lambda challenge, **kw: self.get_widevine_license(challenge, title, tr)
|
||||||
|
|
||||||
|
return tracks
|
||||||
|
|
||||||
|
def get_widevine_license(self, challenge: bytes, title: Title_T, track: AnyTrack) -> bytes:
|
||||||
|
r = self.session.post(self.config["endpoints"]["license"], data=challenge, headers={"x-vudrm-token": self.drm_token, "Origin": "https://www.vrt.be", "Referer": "https://www.vrt.be/"})
|
||||||
|
return r.content
|
||||||
|
|
||||||
|
def get_chapters(self, title: Title_T) -> list[Chapter]:
|
||||||
|
return []
|
||||||
18
VRT/config.yaml
Normal file
18
VRT/config.yaml
Normal file
@ -0,0 +1,18 @@
|
|||||||
|
endpoints:
|
||||||
|
gigya_login: "https://accounts.eu1.gigya.com/accounts.login"
|
||||||
|
vrt_sso: "https://www.vrt.be/vrtmax/sso/login"
|
||||||
|
graphql: "https://www.vrt.be/vrtnu-api/graphql/v1"
|
||||||
|
player_token: "https://media-services-public.vrt.be/vualto-video-aggregator-web/rest/external/v2/tokens"
|
||||||
|
aggregator: "https://media-services-public.vrt.be/media-aggregator/v2/media-items/{stream_id}"
|
||||||
|
license: "https://widevine-proxy.drm.technology/proxy"
|
||||||
|
|
||||||
|
settings:
|
||||||
|
api_key: "3_qhEcPa5JGFROVwu5SWKqJ4mVOIkwlFNMSKwzPDAh8QZOtHqu6L4nD5Q7lk0eXOOG"
|
||||||
|
client_name: "WEB"
|
||||||
|
client_id: "vrtnu-web@PROD"
|
||||||
|
client_version: "1.5.15"
|
||||||
|
|
||||||
|
player_info:
|
||||||
|
drm: { widevine: "L3" }
|
||||||
|
platform: "desktop"
|
||||||
|
app: { type: "browser", name: "Firefox", version: "146.0" }
|
||||||
Loading…
x
Reference in New Issue
Block a user