diff --git a/lib/yt_dlp/extractor/_extractors.py b/lib/yt_dlp/extractor/_extractors.py index e9cd38a65..0f599c9db 100644 --- a/lib/yt_dlp/extractor/_extractors.py +++ b/lib/yt_dlp/extractor/_extractors.py @@ -1755,7 +1755,10 @@ RTVETelevisionIE, ) from .rtvs import RTVSIE -from .rtvslo import RTVSLOIE +from .rtvslo import ( + RTVSLOIE, + RTVSLOShowIE, +) from .rudovideo import RudoVideoIE from .rule34video import Rule34VideoIE from .rumble import ( diff --git a/lib/yt_dlp/extractor/francetv.py b/lib/yt_dlp/extractor/francetv.py index f732d5677..ab08f1c6b 100644 --- a/lib/yt_dlp/extractor/francetv.py +++ b/lib/yt_dlp/extractor/francetv.py @@ -5,6 +5,7 @@ from .dailymotion import DailymotionIE from ..networking import HEADRequest from ..utils import ( + clean_html, determine_ext, filter_dict, format_field, @@ -33,6 +34,7 @@ class FranceTVIE(InfoExtractor): _GEO_BYPASS = False _TESTS = [{ + # tokenized url is in dinfo['video']['token'] 'url': 'francetv:ec217ecc-0733-48cf-ac06-af1347b849d1', 'info_dict': { 'id': 'ec217ecc-0733-48cf-ac06-af1347b849d1', @@ -44,6 +46,19 @@ class FranceTVIE(InfoExtractor): 'upload_date': '20170813', }, 'params': {'skip_download': 'm3u8'}, + }, { + # tokenized url is in dinfo['video']['token']['akamai'] + 'url': 'francetv:c5bda21d-2c6f-4470-8849-3d8327adb2ba', + 'info_dict': { + 'id': 'c5bda21d-2c6f-4470-8849-3d8327adb2ba', + 'ext': 'mp4', + 'title': '13h15, le dimanche... - Les mystères de Jésus', + 'timestamp': 1514118300, + 'duration': 2880, + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20171224', + }, + 'params': {'skip_download': 'm3u8'}, }, { 'url': 'francetv:162311093', 'only_matching': True, @@ -68,6 +83,7 @@ class FranceTVIE(InfoExtractor): def _extract_video(self, video_id, hostname=None): is_live = None videos = [] + drm_formats = False title = None subtitle = None episode_number = None @@ -85,13 +101,12 @@ def _extract_video(self, video_id, hostname=None): 'device_type': device_type, 'browser': browser, 'domain': hostname, - }), fatal=False) + }), fatal=False, expected_status=422) # 422 json gives detailed error code/message if not dinfo: continue - video = traverse_obj(dinfo, ('video', {dict})) - if video: + if video := traverse_obj(dinfo, ('video', {dict})): videos.append(video) if duration is None: duration = video.get('duration') @@ -99,9 +114,19 @@ def _extract_video(self, video_id, hostname=None): is_live = video.get('is_live') if spritesheets is None: spritesheets = video.get('spritesheets') + elif code := traverse_obj(dinfo, ('code', {int})): + if code == 2009: + self.raise_geo_restricted(countries=self._GEO_COUNTRIES) + elif code in (2015, 2017): + # 2015: L'accès à cette vidéo est impossible. (DRM-only) + # 2017: Cette vidéo n'est pas disponible depuis le site web mobile (b/c DRM) + drm_formats = True + continue + self.report_warning( + f'{self.IE_NAME} said: {code} "{clean_html(dinfo.get("message"))}"') + continue - meta = traverse_obj(dinfo, ('meta', {dict})) - if meta: + if meta := traverse_obj(dinfo, ('meta', {dict})): if title is None: title = meta.get('title') # meta['pre_title'] contains season and episode number for series in format "S E" @@ -114,12 +139,15 @@ def _extract_video(self, video_id, hostname=None): if timestamp is None: timestamp = parse_iso8601(meta.get('broadcasted_at')) + if not videos and drm_formats: + self.report_drm(video_id) + formats, subtitles, video_url = [], {}, None for video in traverse_obj(videos, lambda _, v: url_or_none(v['url'])): video_url = video['url'] format_id = video.get('format') - if token_url := url_or_none(video.get('token')): + if token_url := traverse_obj(video, ('token', (None, 'akamai'), {url_or_none}, any)): tokenized_url = traverse_obj(self._download_json( token_url, video_id, f'Downloading signed {format_id} manifest URL', fatal=False, query={ @@ -225,13 +253,13 @@ class FranceTVSiteIE(FranceTVBaseInfoExtractor): _TESTS = [{ 'url': 'https://www.france.tv/france-2/13h15-le-dimanche/140921-les-mysteres-de-jesus.html', 'info_dict': { - 'id': 'ec217ecc-0733-48cf-ac06-af1347b849d1', + 'id': 'c5bda21d-2c6f-4470-8849-3d8327adb2ba', 'ext': 'mp4', 'title': '13h15, le dimanche... - Les mystères de Jésus', - 'timestamp': 1502623500, - 'duration': 2580, + 'timestamp': 1514118300, + 'duration': 2880, 'thumbnail': r're:^https?://.*\.jpg$', - 'upload_date': '20170813', + 'upload_date': '20171224', }, 'params': { 'skip_download': True, diff --git a/lib/yt_dlp/extractor/nhk.py b/lib/yt_dlp/extractor/nhk.py index 0ff25a690..0bd6edfcb 100644 --- a/lib/yt_dlp/extractor/nhk.py +++ b/lib/yt_dlp/extractor/nhk.py @@ -4,6 +4,7 @@ from ..utils import ( ExtractorError, clean_html, + filter_dict, get_element_by_class, int_or_none, join_nonempty, @@ -590,21 +591,22 @@ class NhkRadiruIE(InfoExtractor): IE_DESC = 'NHK らじる (Radiru/Rajiru)' _VALID_URL = r'https?://www\.nhk\.or\.jp/radio/(?:player/ondemand|ondemand/detail)\.html\?p=(?P[\da-zA-Z]+)_(?P[\da-zA-Z]+)(?:_(?P[\da-zA-Z]+))?' _TESTS = [{ - 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=0449_01_3926210', - 'skip': 'Episode expired on 2024-02-24', + 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=0449_01_4003239', + 'skip': 'Episode expired on 2024-06-09', 'info_dict': { - 'title': 'ジャズ・トゥナイト シリーズJAZZジャイアンツ 56 ジョニー・ホッジス', - 'id': '0449_01_3926210', + 'title': 'ジャズ・トゥナイト ジャズ「Night and Day」特集', + 'id': '0449_01_4003239', 'ext': 'm4a', + 'uploader': 'NHK FM 東京', + 'description': 'md5:ad05f3c3f3f6e99b2e69f9b5e49551dc', 'series': 'ジャズ・トゥナイト', - 'uploader': 'NHK-FM', - 'channel': 'NHK-FM', + 'channel': 'NHK FM 東京', 'thumbnail': 'https://www.nhk.or.jp/prog/img/449/g449.jpg', - 'release_date': '20240217', - 'description': 'md5:a456ee8e5e59e6dd2a7d32e62386e811', - 'timestamp': 1708185600, - 'release_timestamp': 1708178400, - 'upload_date': '20240217', + 'upload_date': '20240601', + 'series_id': '0449_01', + 'release_date': '20240601', + 'timestamp': 1717257600, + 'release_timestamp': 1717250400, }, }, { # playlist, airs every weekday so it should _hopefully_ be okay forever @@ -613,71 +615,145 @@ class NhkRadiruIE(InfoExtractor): 'id': '0458_01', 'title': 'ベストオブクラシック', 'description': '世界中の上質な演奏会をじっくり堪能する本格派クラシック番組。', - 'channel': 'NHK-FM', - 'uploader': 'NHK-FM', 'thumbnail': 'https://www.nhk.or.jp/prog/img/458/g458.jpg', + 'series_id': '0458_01', + 'uploader': 'NHK FM', + 'channel': 'NHK FM', + 'series': 'ベストオブクラシック', }, 'playlist_mincount': 3, }, { # one with letters in the id - 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=F300_06_3738470', - 'note': 'Expires on 2024-03-31', + 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=F683_01_3910688', + 'note': 'Expires on 2025-03-31', 'info_dict': { - 'id': 'F300_06_3738470', + 'id': 'F683_01_3910688', 'ext': 'm4a', - 'title': '有島武郎「一房のぶどう」', - 'description': '朗読:川野一宇(ラジオ深夜便アンカー)\r\n\r\n(2016年12月8日放送「ラジオ深夜便『アンカー朗読シリーズ』」より)', - 'channel': 'NHKラジオ第1、NHK-FM', - 'uploader': 'NHKラジオ第1、NHK-FM', - 'timestamp': 1635757200, - 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F300/img/corner/box_109_thumbnail.jpg', - 'release_date': '20161207', - 'series': 'らじる文庫 by ラジオ深夜便 ', - 'release_timestamp': 1481126700, - 'upload_date': '20211101', + 'title': '夏目漱石「文鳥」第1回', + 'series': '【らじる文庫】夏目漱石「文鳥」(全4回)', + 'series_id': 'F683_01', + 'description': '朗読:浅井理アナウンサー', + 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F683/img/roudoku_05_rod_640.jpg', + 'upload_date': '20240106', + 'release_date': '20240106', + 'uploader': 'NHK R1', + 'release_timestamp': 1704511800, + 'channel': 'NHK R1', + 'timestamp': 1704512700, }, - 'expected_warnings': ['Unable to download JSON metadata', 'Failed to get extended description'], + 'expected_warnings': ['Unable to download JSON metadata', + 'Failed to get extended metadata. API returned Error 1: Invalid parameters'], }, { # news - 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=F261_01_3855109', - 'skip': 'Expires on 2023-04-17', + 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=F261_01_4012173', 'info_dict': { - 'id': 'F261_01_3855109', + 'id': 'F261_01_4012173', 'ext': 'm4a', 'channel': 'NHKラジオ第1', 'uploader': 'NHKラジオ第1', - 'timestamp': 1681635900, - 'release_date': '20230416', 'series': 'NHKラジオニュース', - 'title': '午後6時のNHKニュース', + 'title': '午前0時のNHKニュース', 'thumbnail': 'https://www.nhk.or.jp/radioondemand/json/F261/img/RADIONEWS_640.jpg', - 'upload_date': '20230416', - 'release_timestamp': 1681635600, + 'release_timestamp': 1718290800, + 'release_date': '20240613', + 'timestamp': 1718291400, + 'upload_date': '20240613', }, + }, { + # fallback when extended metadata fails + 'url': 'https://www.nhk.or.jp/radio/player/ondemand.html?p=2834_01_4009298', + 'skip': 'Expires on 2024-06-07', + 'info_dict': { + 'id': '2834_01_4009298', + 'title': 'まち☆キラ!開成町特集', + 'ext': 'm4a', + 'release_date': '20240531', + 'upload_date': '20240531', + 'series': 'はま☆キラ!', + 'thumbnail': 'https://www.nhk.or.jp/prog/img/2834/g2834.jpg', + 'channel': 'NHK R1,FM', + 'description': '', + 'timestamp': 1717123800, + 'uploader': 'NHK R1,FM', + 'release_timestamp': 1717120800, + 'series_id': '2834_01', + }, + 'expected_warnings': ['Failed to get extended metadata. API returned empty list.'], }] _API_URL_TMPL = None - def _extract_extended_description(self, episode_id, episode): - service, _, area = traverse_obj(episode, ('aa_vinfo2', {str}, {lambda x: (x or '').partition(',')})) - aa_vinfo3 = traverse_obj(episode, ('aa_vinfo3', {str})) + def _extract_extended_metadata(self, episode_id, aa_vinfo): + service, _, area = traverse_obj(aa_vinfo, (2, {str}, {lambda x: (x or '').partition(',')})) detail_url = try_call( - lambda: self._API_URL_TMPL.format(service=service, area=area, dateid=aa_vinfo3)) + lambda: self._API_URL_TMPL.format(area=area, service=service, dateid=aa_vinfo[3])) if not detail_url: - return + return {} + + response = self._download_json( + detail_url, episode_id, 'Downloading extended metadata', + 'Failed to download extended metadata', fatal=False, expected_status=400) + if not response: + return {} + + if error := traverse_obj(response, ('error', {dict})): + self.report_warning( + 'Failed to get extended metadata. API returned ' + f'Error {join_nonempty("code", "message", from_dict=error, delim=": ")}') + return {} + + full_meta = traverse_obj(response, ('list', service, 0, {dict})) + if not full_meta: + self.report_warning('Failed to get extended metadata. API returned empty list.') + return {} + + station = ' '.join(traverse_obj(full_meta, (('service', 'area'), 'name', {str}))) or None + thumbnails = [{ + 'id': str(id_), + 'preference': 1 if id_.startswith('thumbnail') else -2 if id_.startswith('logo') else -1, + **traverse_obj(thumb, { + 'url': 'url', + 'width': ('width', {int_or_none}), + 'height': ('height', {int_or_none}), + }), + } for id_, thumb in traverse_obj(full_meta, ('images', {dict.items}, lambda _, v: v[1]['url']))] + + return filter_dict({ + 'channel': station, + 'uploader': station, + 'description': join_nonempty( + 'subtitle', 'content', 'act', 'music', delim='\n\n', from_dict=full_meta), + 'thumbnails': thumbnails, + **traverse_obj(full_meta, { + 'title': ('title', {str}), + 'timestamp': ('end_time', {unified_timestamp}), + 'release_timestamp': ('start_time', {unified_timestamp}), + }), + }) - full_meta = traverse_obj( - self._download_json(detail_url, episode_id, 'Downloading extended metadata', fatal=False), - ('list', service, 0, {dict})) or {} - return join_nonempty('subtitle', 'content', 'act', 'music', delim='\n\n', from_dict=full_meta) + def _extract_episode_info(self, episode, programme_id, series_meta): + episode_id = f'{programme_id}_{episode["id"]}' + aa_vinfo = traverse_obj(episode, ('aa_contents_id', {lambda x: x.split(';')})) + extended_metadata = self._extract_extended_metadata(episode_id, aa_vinfo) + fallback_start_time, _, fallback_end_time = traverse_obj( + aa_vinfo, (4, {str}, {lambda x: (x or '').partition('_')})) - def _extract_episode_info(self, headline, programme_id, series_meta): + return { + **series_meta, + 'id': episode_id, + 'formats': self._extract_m3u8_formats(episode.get('stream_url'), episode_id, fatal=False), + 'container': 'm4a_dash', # force fixup, AAC-only HLS + 'was_live': True, + 'title': episode.get('program_title'), + 'description': episode.get('program_sub_title'), # fallback + 'timestamp': unified_timestamp(fallback_end_time), + 'release_timestamp': unified_timestamp(fallback_start_time), + **extended_metadata, + } + + def _extract_news_info(self, headline, programme_id, series_meta): episode_id = f'{programme_id}_{headline["headline_id"]}' episode = traverse_obj(headline, ('file_list', 0, {dict})) - description = self._extract_extended_description(episode_id, episode) - if not description: - self.report_warning('Failed to get extended description, falling back to summary') - description = traverse_obj(episode, ('file_title_sub', {str})) return { **series_meta, @@ -687,9 +763,9 @@ def _extract_episode_info(self, headline, programme_id, series_meta): 'was_live': True, 'series': series_meta.get('title'), 'thumbnail': url_or_none(headline.get('headline_image')) or series_meta.get('thumbnail'), - 'description': description, **traverse_obj(episode, { - 'title': 'file_title', + 'title': ('file_title', {str}), + 'description': ('file_title_sub', {str}), 'timestamp': ('open_time', {unified_timestamp}), 'release_timestamp': ('aa_vinfo4', {lambda x: x.split('_')[0]}, {unified_timestamp}), }), @@ -706,32 +782,58 @@ def _real_extract(self, url): site_id, corner_id, headline_id = self._match_valid_url(url).group('site', 'corner', 'headline') programme_id = f'{site_id}_{corner_id}' - if site_id == 'F261': - json_url = 'https://www.nhk.or.jp/s-media/news/news-site/list/v1/all.json' - else: - json_url = f'https://www.nhk.or.jp/radioondemand/json/{site_id}/bangumi_{programme_id}.json' - - meta = self._download_json(json_url, programme_id)['main'] + if site_id == 'F261': # XXX: News programmes use old API (for now?) + meta = self._download_json( + 'https://www.nhk.or.jp/s-media/news/news-site/list/v1/all.json', programme_id)['main'] + series_meta = traverse_obj(meta, { + 'title': ('program_name', {str}), + 'channel': ('media_name', {str}), + 'uploader': ('media_name', {str}), + 'thumbnail': (('thumbnail_c', 'thumbnail_p'), {url_or_none}), + }, get_all=False) + + if headline_id: + headline = traverse_obj( + meta, ('detail_list', lambda _, v: v['headline_id'] == headline_id, any)) + if not headline: + raise ExtractorError('Content not found; it has most likely expired', expected=True) + return self._extract_news_info(headline, programme_id, series_meta) + + def news_entries(): + for headline in traverse_obj(meta, ('detail_list', ..., {dict})): + yield self._extract_news_info(headline, programme_id, series_meta) + + return self.playlist_result( + news_entries(), programme_id, description=meta.get('site_detail'), **series_meta) + + meta = self._download_json( + 'https://www.nhk.or.jp/radio-api/app/v1/web/ondemand/series', programme_id, query={ + 'site_id': site_id, + 'corner_site_id': corner_id, + }) - series_meta = traverse_obj(meta, { - 'title': 'program_name', - 'channel': 'media_name', - 'uploader': 'media_name', - 'thumbnail': (('thumbnail_c', 'thumbnail_p'), {url_or_none}), - }, get_all=False) + fallback_station = join_nonempty('NHK', traverse_obj(meta, ('radio_broadcast', {str})), delim=' ') + series_meta = { + 'series': join_nonempty('title', 'corner_name', delim=' ', from_dict=meta), + 'series_id': programme_id, + 'thumbnail': traverse_obj(meta, ('thumbnail_url', {url_or_none})), + 'channel': fallback_station, + 'uploader': fallback_station, + } if headline_id: - return self._extract_episode_info( - traverse_obj(meta, ( - 'detail_list', lambda _, v: v['headline_id'] == headline_id), get_all=False), - programme_id, series_meta) + episode = traverse_obj(meta, ('episodes', lambda _, v: v['id'] == int(headline_id), any)) + if not episode: + raise ExtractorError('Content not found; it has most likely expired', expected=True) + return self._extract_episode_info(episode, programme_id, series_meta) def entries(): - for headline in traverse_obj(meta, ('detail_list', ..., {dict})): - yield self._extract_episode_info(headline, programme_id, series_meta) + for episode in traverse_obj(meta, ('episodes', ..., {dict})): + yield self._extract_episode_info(episode, programme_id, series_meta) return self.playlist_result( - entries(), programme_id, playlist_description=meta.get('site_detail'), **series_meta) + entries(), programme_id, title=series_meta.get('series'), + description=meta.get('series_description'), **series_meta) class NhkRadioNewsPageIE(InfoExtractor): diff --git a/lib/yt_dlp/extractor/rtvslo.py b/lib/yt_dlp/extractor/rtvslo.py index e71d01d1e..9c2e6fb6b 100644 --- a/lib/yt_dlp/extractor/rtvslo.py +++ b/lib/yt_dlp/extractor/rtvslo.py @@ -1,3 +1,5 @@ +import re + from .common import InfoExtractor from ..utils import ( ExtractorError, @@ -6,6 +8,7 @@ traverse_obj, unified_timestamp, url_or_none, + urljoin, ) @@ -21,75 +24,73 @@ class RTVSLOIE(InfoExtractor): _API_BASE = 'https://api.rtvslo.si/ava/{}/{}?client_id=82013fb3a531d5414f478747c1aca622' SUB_LANGS_MAP = {'Slovenski': 'sl'} - _TESTS = [ - { - 'url': 'https://www.rtvslo.si/rtv365/arhiv/174842550?s=tv', - 'info_dict': { - 'id': '174842550', - 'ext': 'mp4', - 'release_timestamp': 1643140032, - 'upload_date': '20220125', - 'series': 'Dnevnik', - 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/92/dnevnik_3_wide2.jpg', - 'description': 'md5:76a18692757aeb8f0f51221106277dd2', - 'timestamp': 1643137046, - 'title': 'Dnevnik', - 'series_id': '92', - 'release_date': '20220125', - 'duration': 1789, - }, - }, { - 'url': 'https://365.rtvslo.si/arhiv/utrip/174843754', - 'info_dict': { - 'id': '174843754', - 'ext': 'mp4', - 'series_id': '94', - 'release_date': '20220129', - 'timestamp': 1643484455, - 'title': 'Utrip', - 'duration': 813, - 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/94/utrip_1_wide2.jpg', - 'description': 'md5:77f2892630c7b17bb7a5bb84319020c9', - 'release_timestamp': 1643485825, - 'upload_date': '20220129', - 'series': 'Utrip', - }, - }, { - 'url': 'https://365.rtvslo.si/arhiv/il-giornale-della-sera/174844609', - 'info_dict': { - 'id': '174844609', - 'ext': 'mp3', - 'series_id': '106615841', - 'title': 'Il giornale della sera', - 'duration': 1328, - 'series': 'Il giornale della sera', - 'timestamp': 1643743800, - 'release_timestamp': 1643745424, - 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/il-giornale-della-sera_wide2.jpg', - 'upload_date': '20220201', - 'tbr': 128000, - 'release_date': '20220201', - }, - }, { - 'url': 'https://365.rtvslo.si/arhiv/razred-zase/148350750', - 'info_dict': { - 'id': '148350750', - 'ext': 'mp4', - 'title': 'Prvi šolski dan, mozaična oddaja za mlade', - 'series': 'Razred zase', - 'series_id': '148185730', - 'duration': 1481, - 'upload_date': '20121019', - 'timestamp': 1350672122, - 'release_date': '20121019', - 'release_timestamp': 1350672122, - 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/148185730/razred_zase_2014_logo_4d_wide2.jpg', - }, - }, { - 'url': 'https://4d.rtvslo.si/arhiv/dnevnik/174842550', - 'only_matching': True, + _TESTS = [{ + 'url': 'https://www.rtvslo.si/rtv365/arhiv/174842550?s=tv', + 'info_dict': { + 'id': '174842550', + 'ext': 'mp4', + 'release_timestamp': 1643140032, + 'upload_date': '20220125', + 'series': 'Dnevnik', + 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/92/dnevnik_3_wide2.jpg', + 'description': 'md5:76a18692757aeb8f0f51221106277dd2', + 'timestamp': 1643137046, + 'title': 'Dnevnik', + 'series_id': '92', + 'release_date': '20220125', + 'duration': 1789, + }, + }, { + 'url': 'https://365.rtvslo.si/arhiv/utrip/174843754', + 'info_dict': { + 'id': '174843754', + 'ext': 'mp4', + 'series_id': '94', + 'release_date': '20220129', + 'timestamp': 1643484455, + 'title': 'Utrip', + 'duration': 813, + 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/94/utrip_1_wide2.jpg', + 'description': 'md5:77f2892630c7b17bb7a5bb84319020c9', + 'release_timestamp': 1643485825, + 'upload_date': '20220129', + 'series': 'Utrip', + }, + }, { + 'url': 'https://365.rtvslo.si/arhiv/il-giornale-della-sera/174844609', + 'info_dict': { + 'id': '174844609', + 'ext': 'mp3', + 'series_id': '106615841', + 'title': 'Il giornale della sera', + 'duration': 1328, + 'series': 'Il giornale della sera', + 'timestamp': 1643743800, + 'release_timestamp': 1643745424, + 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/il-giornale-della-sera_wide2.jpg', + 'upload_date': '20220201', + 'tbr': 128000, + 'release_date': '20220201', }, - ] + }, { + 'url': 'https://365.rtvslo.si/arhiv/razred-zase/148350750', + 'info_dict': { + 'id': '148350750', + 'ext': 'mp4', + 'title': 'Prvi šolski dan, mozaična oddaja za mlade', + 'series': 'Razred zase', + 'series_id': '148185730', + 'duration': 1481, + 'upload_date': '20121019', + 'timestamp': 1350672122, + 'release_date': '20121019', + 'release_timestamp': 1350672122, + 'thumbnail': 'https://img.rtvcdn.si/_up/ava/ava_misc/show_logos/148185730/razred_zase_2014_logo_4d_wide2.jpg', + }, + }, { + 'url': 'https://4d.rtvslo.si/arhiv/dnevnik/174842550', + 'only_matching': True, + }] def _real_extract(self, url): v_id = self._match_id(url) @@ -164,3 +165,26 @@ def _real_extract(self, url): 'series': meta.get('showName'), 'series_id': meta.get('showId'), } + + +class RTVSLOShowIE(InfoExtractor): + IE_NAME = 'rtvslo.si:show' + _VALID_URL = r'https?://(?:365|4d)\.rtvslo.si/oddaja/[^/?#&]+/(?P\d+)' + + _TESTS = [{ + 'url': 'https://365.rtvslo.si/oddaja/ekipa-bled/173250997', + 'info_dict': { + 'id': '173250997', + 'title': 'Ekipa Bled', + }, + 'playlist_count': 18, + }] + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + + return self.playlist_from_matches( + re.findall(r']*\bhref="(/arhiv/[^"]+)"', webpage), + playlist_id, self._html_extract_title(webpage), + getter=lambda x: urljoin('https://365.rtvslo.si', x), ie=RTVSLOIE) diff --git a/lib/yt_dlp/extractor/soundcloud.py b/lib/yt_dlp/extractor/soundcloud.py index 0f7368435..0c6f0b070 100644 --- a/lib/yt_dlp/extractor/soundcloud.py +++ b/lib/yt_dlp/extractor/soundcloud.py @@ -95,7 +95,7 @@ def _update_client_id(self): return raise ExtractorError('Unable to extract client id') - def _download_json(self, *args, **kwargs): + def _call_api(self, *args, **kwargs): non_fatal = kwargs.get('fatal') is False if non_fatal: del kwargs['fatal'] @@ -104,7 +104,7 @@ def _download_json(self, *args, **kwargs): query['client_id'] = self._CLIENT_ID kwargs['query'] = query try: - return super()._download_json(*args, **kwargs) + return self._download_json(*args, **kwargs) except ExtractorError as e: if isinstance(e.cause, HTTPError) and e.cause.status in (401, 403): self._store_client_id(None) @@ -163,7 +163,7 @@ def genNumBlock(): 'user_agent': self._USER_AGENT } - response = self._download_json( + response = self._call_api( self._API_AUTH_URL_PW % (self._API_AUTH_QUERY_TEMPLATE % self._CLIENT_ID), None, note='Verifying login token...', fatal=False, data=json.dumps(payload).encode()) @@ -217,12 +217,26 @@ def _extract_info_dict(self, info, full_title=None, secret_token=None, extract_f query['secret_token'] = secret_token if not extract_flat and info.get('downloadable') and info.get('has_downloads_left'): - download_url = update_url_query( - self._API_V2_BASE + 'tracks/' + track_id + '/download', query) - redirect_url = (self._download_json(download_url, track_id, fatal=False) or {}).get('redirectUri') - if redirect_url: + try: + # Do not use _call_api(); HTTP Error codes have different meanings for this request + download_data = self._download_json( + f'{self._API_V2_BASE}tracks/{track_id}/download', track_id, + 'Downloading original download format info JSON', query=query, headers=self._HEADERS) + except ExtractorError as e: + if isinstance(e.cause, HTTPError) and e.cause.status == 401: + self.report_warning( + 'Original download format is only available ' + f'for registered users. {self._login_hint()}') + elif isinstance(e.cause, HTTPError) and e.cause.status == 403: + self.write_debug('Original download format is not available for this client') + else: + self.report_warning(e.msg) + download_data = None + + if redirect_url := traverse_obj(download_data, ('redirectUri', {url_or_none})): urlh = self._request_webpage( - HEADRequest(redirect_url), track_id, 'Checking for original download format', fatal=False) + HEADRequest(redirect_url), track_id, 'Checking original download format availability', + 'Original download format is not available', fatal=False) if urlh: format_url = urlh.url format_urls.add(format_url) @@ -303,7 +317,7 @@ def add_format(f, protocol, is_preview=False): stream = None for retry in self.RetryManager(fatal=False): try: - stream = self._download_json( + stream = self._call_api( format_url, track_id, f'Downloading {identifier} format info JSON', query=query, headers=self._HEADERS) except ExtractorError as e: @@ -630,7 +644,7 @@ def _real_extract(self, url): resolve_title += f'/{token}' info_json_url = self._resolv_url(self._BASE_URL + resolve_title) - info = self._download_json( + info = self._call_api( info_json_url, full_title, 'Downloading info JSON', query=query, headers=self._HEADERS) return self._extract_info_dict(info, full_title, token) @@ -641,7 +655,7 @@ def _extract_set(self, playlist, token=None): playlist_id = str(playlist['id']) tracks = playlist.get('tracks') or [] if not all(t.get('permalink_url') for t in tracks) and token: - tracks = self._download_json( + tracks = self._call_api( self._API_V2_BASE + 'tracks', playlist_id, 'Downloading tracks', query={ 'ids': ','.join([str(t['id']) for t in tracks]), @@ -699,7 +713,7 @@ def _real_extract(self, url): if token: full_title += '/' + token - info = self._download_json(self._resolv_url( + info = self._call_api(self._resolv_url( self._BASE_URL + full_title), full_title, headers=self._HEADERS) if 'errors' in info: @@ -730,7 +744,7 @@ def _entries(self, url, playlist_id): for i in itertools.count(): for retry in self.RetryManager(): try: - response = self._download_json( + response = self._call_api( url, playlist_id, query=query, headers=self._HEADERS, note=f'Downloading track page {i + 1}') break @@ -838,7 +852,7 @@ def _real_extract(self, url): mobj = self._match_valid_url(url) uploader = mobj.group('user') - user = self._download_json( + user = self._call_api( self._resolv_url(self._BASE_URL + uploader), uploader, 'Downloading user info', headers=self._HEADERS) @@ -864,7 +878,7 @@ class SoundcloudUserPermalinkIE(SoundcloudPagedPlaylistBaseIE): def _real_extract(self, url): user_id = self._match_id(url) - user = self._download_json( + user = self._call_api( self._resolv_url(url), user_id, 'Downloading user info', headers=self._HEADERS) return self._extract_playlist( @@ -886,7 +900,7 @@ class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE): def _real_extract(self, url): track_name = self._match_id(url) - track = self._download_json(self._resolv_url(url), track_name, headers=self._HEADERS) + track = self._call_api(self._resolv_url(url), track_name, headers=self._HEADERS) track_id = self._search_regex( r'soundcloud:track-stations:(\d+)', track['id'], 'track id') @@ -930,7 +944,7 @@ class SoundcloudRelatedIE(SoundcloudPagedPlaylistBaseIE): def _real_extract(self, url): slug, relation = self._match_valid_url(url).group('slug', 'relation') - track = self._download_json( + track = self._call_api( self._resolv_url(self._BASE_URL + slug), slug, 'Downloading track info', headers=self._HEADERS) @@ -965,7 +979,7 @@ def _real_extract(self, url): if token: query['secret_token'] = token - data = self._download_json( + data = self._call_api( self._API_V2_BASE + 'playlists/' + playlist_id, playlist_id, 'Downloading playlist', query=query, headers=self._HEADERS) @@ -1000,7 +1014,7 @@ def _get_collection(self, endpoint, collection_id, **query): next_url = update_url_query(self._API_V2_BASE + endpoint, query) for i in itertools.count(1): - response = self._download_json( + response = self._call_api( next_url, collection_id, f'Downloading page {i}', 'Unable to download API page', headers=self._HEADERS) diff --git a/lib/yt_dlp/extractor/tiktok.py b/lib/yt_dlp/extractor/tiktok.py index dc74d4a1f..48934fc6b 100644 --- a/lib/yt_dlp/extractor/tiktok.py +++ b/lib/yt_dlp/extractor/tiktok.py @@ -213,8 +213,19 @@ def _extract_aweme_app(self, aweme_id): return self._parse_aweme_video_app(aweme_detail) def _extract_web_data_and_status(self, url, video_id, fatal=True): - webpage = self._download_webpage(url, video_id, headers={'User-Agent': 'Mozilla/5.0'}, fatal=fatal) or '' - video_data, status = {}, None + video_data, status = {}, -1 + + res = self._download_webpage_handle(url, video_id, fatal=fatal, headers={'User-Agent': 'Mozilla/5.0'}) + if res is False: + return video_data, status + + webpage, urlh = res + if urllib.parse.urlparse(urlh.url).path == '/login': + message = 'TikTok is requiring login for access to this content' + if fatal: + self.raise_login_required(message) + self.report_warning(f'{message}. {self._login_hint()}') + return video_data, status if universal_data := self._get_universal_data(webpage, video_id): self.write_debug('Found universal data for rehydration') diff --git a/lib/yt_dlp/extractor/tubitv.py b/lib/yt_dlp/extractor/tubitv.py index 9d9ddae72..85eb3a211 100644 --- a/lib/yt_dlp/extractor/tubitv.py +++ b/lib/yt_dlp/extractor/tubitv.py @@ -13,6 +13,7 @@ class TubiTvIE(InfoExtractor): + IE_NAME = 'tubitv' _VALID_URL = r'https?://(?:www\.)?tubitv\.com/(?Pvideo|movies|tv-shows)/(?P\d+)' _LOGIN_URL = 'http://tubitv.com/login' _NETRC_MACHINE = 'tubitv' @@ -148,30 +149,54 @@ def _real_extract(self, url): class TubiTvShowIE(InfoExtractor): - _WORKING = False - _VALID_URL = r'https?://(?:www\.)?tubitv\.com/series/[0-9]+/(?P[^/?#]+)' + IE_NAME = 'tubitv:series' + _VALID_URL = r'https?://(?:www\.)?tubitv\.com/series/\d+/(?P[^/?#]+)(?:/season-(?P\d+))?' _TESTS = [{ 'url': 'https://tubitv.com/series/3936/the-joy-of-painting-with-bob-ross?start=true', - 'playlist_mincount': 390, + 'playlist_mincount': 389, 'info_dict': { 'id': 'the-joy-of-painting-with-bob-ross', }, + }, { + 'url': 'https://tubitv.com/series/2311/the-saddle-club/season-1', + 'playlist_count': 26, + 'info_dict': { + 'id': 'the-saddle-club-season-1', + }, + }, { + 'url': 'https://tubitv.com/series/2311/the-saddle-club/season-3', + 'playlist_count': 19, + 'info_dict': { + 'id': 'the-saddle-club-season-3', + }, + }, { + 'url': 'https://tubitv.com/series/2311/the-saddle-club/', + 'playlist_mincount': 71, + 'info_dict': { + 'id': 'the-saddle-club', + }, }] - def _entries(self, show_url, show_name): - show_webpage = self._download_webpage(show_url, show_name) + def _entries(self, show_url, playlist_id, selected_season): + webpage = self._download_webpage(show_url, playlist_id) + + data = self._search_json( + r'window\.__data\s*=', webpage, 'data', playlist_id, + transform_source=js_to_json)['video'] - show_json = self._parse_json(self._search_regex( - r'window\.__data\s*=\s*({[^<]+});\s*', - show_webpage, 'data'), show_name, transform_source=js_to_json)['video'] + # v['number'] is already a decimal string, but stringify to protect against API changes + path = [lambda _, v: str(v['number']) == selected_season] if selected_season else [..., {dict}] - for episode_id in show_json['fullContentById']: - if traverse_obj(show_json, ('byId', episode_id, 'type')) == 's': - continue - yield self.url_result( - f'https://tubitv.com/tv-shows/{episode_id}/', - ie=TubiTvIE.ie_key(), video_id=episode_id) + for season in traverse_obj(data, ('byId', lambda _, v: v['type'] == 's', 'seasons', *path)): + season_number = int_or_none(season.get('number')) + for episode in traverse_obj(season, ('episodes', lambda _, v: v['id'])): + episode_id = episode['id'] + yield self.url_result( + f'https://tubitv.com/tv-shows/{episode_id}/', TubiTvIE, episode_id, + season_number=season_number, episode_number=int_or_none(episode.get('num'))) def _real_extract(self, url): - show_name = self._match_valid_url(url).group('show_name') - return self.playlist_result(self._entries(url, show_name), playlist_id=show_name) + playlist_id, selected_season = self._match_valid_url(url).group('show_name', 'season') + if selected_season: + playlist_id = f'{playlist_id}-season-{selected_season}' + return self.playlist_result(self._entries(url, playlist_id, selected_season), playlist_id) diff --git a/lib/yt_dlp/extractor/youtube.py b/lib/yt_dlp/extractor/youtube.py index a227f2425..a89744eb1 100644 --- a/lib/yt_dlp/extractor/youtube.py +++ b/lib/yt_dlp/extractor/youtube.py @@ -885,14 +885,14 @@ def _get_count(self, data, *path_list): return count @staticmethod - def _extract_thumbnails(data, *path_list): + def _extract_thumbnails(data, *path_list, final_key='thumbnails'): """ Extract thumbnails from thumbnails dict @param path_list: path list to level that contains 'thumbnails' key """ thumbnails = [] for path in path_list or [()]: - for thumbnail in traverse_obj(data, (*variadic(path), 'thumbnails', ...)): + for thumbnail in traverse_obj(data, (*variadic(path), final_key, ...)): thumbnail_url = url_or_none(thumbnail.get('url')) if not thumbnail_url: continue @@ -5124,6 +5124,10 @@ def _extract_metadata_from_tabs(self, item_id, data): else: metadata_renderer = traverse_obj(data, ('metadata', 'playlistMetadataRenderer'), expected_type=dict) + # pageHeaderViewModel slow rollout began April 2024 + page_header_view_model = traverse_obj(data, ( + 'header', 'pageHeaderRenderer', 'content', 'pageHeaderViewModel', {dict})) + # We can get the uncropped banner/avatar by replacing the crop params with '=s0' # See: https://github.com/yt-dlp/yt-dlp/issues/2237#issuecomment-1013694714 def _get_uncropped(url): @@ -5139,8 +5143,10 @@ def _get_uncropped(url): 'preference': 1, }) - channel_banners = self._extract_thumbnails( - data, ('header', ..., ('banner', 'mobileBanner', 'tvBanner'))) + channel_banners = ( + self._extract_thumbnails(data, ('header', ..., ('banner', 'mobileBanner', 'tvBanner'))) + or self._extract_thumbnails( + page_header_view_model, ('banner', 'imageBannerViewModel', 'image'), final_key='sources')) for banner in channel_banners: banner['preference'] = -10 @@ -5167,7 +5173,11 @@ def _get_uncropped(url): or self._get_text(data, ('header', 'hashtagHeaderRenderer', 'hashtag')) or info['id']), 'availability': self._extract_availability(data), - 'channel_follower_count': self._get_count(data, ('header', ..., 'subscriberCountText')), + 'channel_follower_count': ( + self._get_count(data, ('header', ..., 'subscriberCountText')) + or traverse_obj(page_header_view_model, ( + 'metadata', 'contentMetadataViewModel', 'metadataRows', ..., 'metadataParts', + lambda _, v: 'subscribers' in v['text']['content'], 'text', 'content', {parse_count}, any))), 'description': try_get(metadata_renderer, lambda x: x.get('description', '')), 'tags': (traverse_obj(data, ('microformat', 'microformatDataRenderer', 'tags', ..., {str})) or traverse_obj(metadata_renderer, ('keywords', {lambda x: x and shlex.split(x)}, ...))), diff --git a/lib/yt_dlp_version b/lib/yt_dlp_version index 1b4e8837f..202cae62f 100644 --- a/lib/yt_dlp_version +++ b/lib/yt_dlp_version @@ -1 +1 @@ -add96eb9f84cfffe85682bf2fb85135746994ee8 \ No newline at end of file +a0d9967f6822fc279e86bce33464194985148727 \ No newline at end of file