Skip to content

Commit

Permalink
[CI] auto update yt_dlp to upstream commit d8fb3490863653182864d2a535…
Browse files Browse the repository at this point in the history
…22f350d67a9ff8
  • Loading branch information
github-actions[bot] committed Dec 2, 2024
1 parent e206945 commit 4d5f7de
Show file tree
Hide file tree
Showing 17 changed files with 113 additions and 81 deletions.
10 changes: 5 additions & 5 deletions lib/yt_dlp/YoutubeDL.py
Original file line number Diff line number Diff line change
Expand Up @@ -1116,7 +1116,7 @@ def report_file_delete(self, file_name):
def raise_no_formats(self, info, forced=False, *, msg=None):
has_drm = info.get('_has_drm')
ignored, expected = self.params.get('ignore_no_formats_error'), bool(msg)
msg = msg or has_drm and 'This video is DRM protected' or 'No video formats found!'
msg = msg or (has_drm and 'This video is DRM protected') or 'No video formats found!'
if forced or not ignored:
raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
expected=has_drm or ignored or expected)
Expand Down Expand Up @@ -2196,7 +2196,7 @@ def _select_formats(self, formats, selector):
def _default_format_spec(self, info_dict):
prefer_best = (
self.params['outtmpl']['default'] == '-'
or info_dict.get('is_live') and not self.params.get('live_from_start'))
or (info_dict.get('is_live') and not self.params.get('live_from_start')))

def can_merge():
merger = FFmpegMergerPP(self)
Expand Down Expand Up @@ -2365,7 +2365,7 @@ def _merge(formats_pair):
vexts=[f['ext'] for f in video_fmts],
aexts=[f['ext'] for f in audio_fmts],
preferences=(try_call(lambda: self.params['merge_output_format'].split('/'))
or self.params.get('prefer_free_formats') and ('webm', 'mkv')))
or (self.params.get('prefer_free_formats') and ('webm', 'mkv'))))

filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))

Expand Down Expand Up @@ -3541,8 +3541,8 @@ def ffmpeg_fixup(cndn, msg, cls):
and info_dict.get('container') == 'm4a_dash',
'writing DASH m4a. Only some players support this container',
FFmpegFixupM4aPP)
ffmpeg_fixup(downloader == 'hlsnative' and not self.params.get('hls_use_mpegts')
or info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None,
ffmpeg_fixup((downloader == 'hlsnative' and not self.params.get('hls_use_mpegts'))
or (info_dict.get('is_live') and self.params.get('hls_use_mpegts') is None),
'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
FFmpegFixupM3u8PP)
ffmpeg_fixup(downloader == 'dashsegments'
Expand Down
6 changes: 3 additions & 3 deletions lib/yt_dlp/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -1062,7 +1062,7 @@ def make_row(target, handler):
# If we only have a single process attached, then the executable was double clicked
# When using `pyinstaller` with `--onefile`, two processes get attached
is_onefile = hasattr(sys, '_MEIPASS') and os.path.basename(sys._MEIPASS).startswith('_MEI')
if attached_processes == 1 or is_onefile and attached_processes == 2:
if attached_processes == 1 or (is_onefile and attached_processes == 2):
print(parser._generate_error_message(
'Do not double-click the executable, instead call it from a command line.\n'
'Please read the README for further information on how to use yt-dlp: '
Expand Down Expand Up @@ -1109,9 +1109,9 @@ def main(argv=None):
from .extractor import gen_extractors, list_extractors

__all__ = [
'main',
'YoutubeDL',
'parse_options',
'gen_extractors',
'list_extractors',
'main',
'parse_options',
]
14 changes: 6 additions & 8 deletions lib/yt_dlp/aes.py
Original file line number Diff line number Diff line change
Expand Up @@ -534,19 +534,17 @@ def ghash(subkey, data):
__all__ = [
'aes_cbc_decrypt',
'aes_cbc_decrypt_bytes',
'aes_ctr_decrypt',
'aes_decrypt_text',
'aes_decrypt',
'aes_ecb_decrypt',
'aes_gcm_decrypt_and_verify',
'aes_gcm_decrypt_and_verify_bytes',

'aes_cbc_encrypt',
'aes_cbc_encrypt_bytes',
'aes_ctr_decrypt',
'aes_ctr_encrypt',
'aes_decrypt',
'aes_decrypt_text',
'aes_ecb_decrypt',
'aes_ecb_encrypt',
'aes_encrypt',

'aes_gcm_decrypt_and_verify',
'aes_gcm_decrypt_and_verify_bytes',
'key_expansion',
'pad_block',
'pkcs7_padding',
Expand Down
4 changes: 2 additions & 2 deletions lib/yt_dlp/cookies.py
Original file line number Diff line number Diff line change
Expand Up @@ -1276,8 +1276,8 @@ def open(self, file, *, write=False):
def _really_save(self, f, ignore_discard, ignore_expires):
now = time.time()
for cookie in self:
if (not ignore_discard and cookie.discard
or not ignore_expires and cookie.is_expired(now)):
if ((not ignore_discard and cookie.discard)
or (not ignore_expires and cookie.is_expired(now))):
continue
name, value = cookie.name, cookie.value
if value is None:
Expand Down
8 changes: 4 additions & 4 deletions lib/yt_dlp/downloader/hls.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,12 +119,12 @@ def real_download(self, filename, info_dict):
self.to_screen(f'[{self.FD_NAME}] Fragment downloads will be delegated to {real_downloader.get_basename()}')

def is_ad_fragment_start(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad'))
return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=ad' in s)
or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',ad')))

def is_ad_fragment_end(s):
return (s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s
or s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment'))
return ((s.startswith('#ANVATO-SEGMENT-INFO') and 'type=master' in s)
or (s.startswith('#UPLYNK-SEGMENT') and s.endswith(',segment')))

fragments = []

Expand Down
4 changes: 2 additions & 2 deletions lib/yt_dlp/downloader/youtube_live_chat.py
Original file line number Diff line number Diff line change
Expand Up @@ -123,8 +123,8 @@ def download_and_parse_fragment(url, frag_index, request_data=None, headers=None
data,
lambda x: x['continuationContents']['liveChatContinuation'], dict) or {}

func = (info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live
or frag_index == 1 and try_refresh_replay_beginning
func = ((info_dict['protocol'] == 'youtube_live_chat' and parse_actions_live)
or (frag_index == 1 and try_refresh_replay_beginning)
or parse_actions_replay)
return (True, *func(live_chat_continuation))
except HTTPError as err:
Expand Down
47 changes: 23 additions & 24 deletions lib/yt_dlp/extractor/bilibili.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,6 @@
InAdvancePagedList,
OnDemandPagedList,
bool_or_none,
clean_html,
determine_ext,
filter_dict,
float_or_none,
Expand Down Expand Up @@ -639,40 +638,36 @@ def _real_extract(self, url):
headers['Referer'] = url

initial_state = self._search_json(r'window\.__INITIAL_STATE__\s*=', webpage, 'initial state', video_id)

if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
self.raise_login_required()
if traverse_obj(initial_state, ('error', 'trueCode')) == -404:
raise ExtractorError(
'This video may be deleted or geo-restricted. '
'You might want to try a VPN or a proxy server (with --proxy)', expected=True)

is_festival = 'videoData' not in initial_state
if is_festival:
video_data = initial_state['videoInfo']
else:
play_info_obj = self._search_json(
r'window\.__playinfo__\s*=', webpage, 'play info', video_id, fatal=False)
if not play_info_obj:
if traverse_obj(initial_state, ('error', 'trueCode')) == -403:
self.raise_login_required()
if traverse_obj(initial_state, ('error', 'trueCode')) == -404:
raise ExtractorError(
'This video may be deleted or geo-restricted. '
'You might want to try a VPN or a proxy server (with --proxy)', expected=True)
play_info = traverse_obj(play_info_obj, ('data', {dict}))
if not play_info:
if traverse_obj(play_info_obj, 'code') == 87007:
toast = get_element_by_class('tips-toast', webpage) or ''
msg = clean_html(
f'{get_element_by_class("belongs-to", toast) or ""},'
+ (get_element_by_class('level', toast) or ''))
raise ExtractorError(
f'This is a supporter-only video: {msg}. {self._login_hint()}', expected=True)
raise ExtractorError('Failed to extract play info')
video_data = initial_state['videoData']

if video_data.get('is_upower_exclusive'):
high_level = traverse_obj(initial_state, ('elecFullInfo', 'show_info', 'high_level', {dict})) or {}
raise ExtractorError(
'This is a supporter-only video: '
f'{join_nonempty("title", "sub_title", from_dict=high_level, delim=",")}. '
f'{self._login_hint()}', expected=True)

video_id, title = video_data['bvid'], video_data.get('title')

# Bilibili anthologies are similar to playlists but all videos share the same video ID as the anthology itself.
page_list_json = not is_festival and traverse_obj(
page_list_json = (not is_festival and traverse_obj(
self._download_json(
'https://api.bilibili.com/x/player/pagelist', video_id,
fatal=False, query={'bvid': video_id, 'jsonp': 'jsonp'},
note='Extracting videos in anthology', headers=headers),
'data', expected_type=list) or []
'data', expected_type=list)) or []
is_anthology = len(page_list_json) > 1

part_id = int_or_none(parse_qs(url).get('p', [None])[-1])
Expand All @@ -689,10 +684,14 @@ def _real_extract(self, url):
old_video_id = format_field(aid, None, f'%s_part{part_id or 1}')
cid = traverse_obj(video_data, ('pages', part_id - 1, 'cid')) if part_id else video_data.get('cid')

play_info = (
traverse_obj(
self._search_json(r'window\.__playinfo__\s*=', webpage, 'play info', video_id, default=None),
('data', {dict}))
or self._download_playinfo(video_id, cid, headers=headers))

festival_info = {}
if is_festival:
play_info = self._download_playinfo(video_id, cid, headers=headers)

festival_info = traverse_obj(initial_state, {
'uploader': ('videoInfo', 'upName'),
'uploader_id': ('videoInfo', 'upMid', {str_or_none}),
Expand Down
2 changes: 1 addition & 1 deletion lib/yt_dlp/extractor/common.py
Original file line number Diff line number Diff line change
Expand Up @@ -3803,7 +3803,7 @@ def _cookies_passed(self):
def mark_watched(self, *args, **kwargs):
if not self.get_param('mark_watched', False):
return
if self.supports_login() and self._get_login_info()[0] is not None or self._cookies_passed:
if (self.supports_login() and self._get_login_info()[0] is not None) or self._cookies_passed:
self._mark_watched(*args, **kwargs)

def _mark_watched(self, *args, **kwargs):
Expand Down
60 changes: 47 additions & 13 deletions lib/yt_dlp/extractor/duoplay.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,15 +5,16 @@
get_element_text_and_html_by_tag,
int_or_none,
join_nonempty,
parse_qs,
str_or_none,
try_call,
unified_timestamp,
)
from ..utils.traversal import traverse_obj
from ..utils.traversal import traverse_obj, value


class DuoplayIE(InfoExtractor):
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)/[\w-]+/?(?:\?(?:[^#]+&)?ep=(?P<ep>\d+))?'
_VALID_URL = r'https?://duoplay\.ee/(?P<id>\d+)(?:[/?#]|$)'
_TESTS = [{
'note': 'Siberi võmm S02E12',
'url': 'https://duoplay.ee/4312/siberi-vomm?ep=24',
Expand All @@ -34,15 +35,16 @@ class DuoplayIE(InfoExtractor):
'episode_number': 12,
'episode_id': '24',
},
'skip': 'No video found',
}, {
'note': 'Empty title',
'url': 'https://duoplay.ee/17/uhikarotid?ep=14',
'md5': '6aca68be71112314738dd17cced7f8bf',
'md5': 'cba9f5dabf2582b224d80ac44fb80e47',
'info_dict': {
'id': '17_14',
'ext': 'mp4',
'title': 'Ühikarotid',
'thumbnail': r're:https://.+\.jpg(?:\?c=\d+)?$',
'title': 'Episode 14',
'thumbnail': r're:https?://.+\.jpg',
'description': 'md5:4719b418e058c209def41d48b601276e',
'upload_date': '20100916',
'timestamp': 1284661800,
Expand All @@ -52,6 +54,8 @@ class DuoplayIE(InfoExtractor):
'season_number': 2,
'episode_id': '14',
'release_year': 2010,
'episode': 'Episode 14',
'episode_number': 14,
},
}, {
'note': 'Movie without expiry',
Expand All @@ -68,36 +72,66 @@ class DuoplayIE(InfoExtractor):
'timestamp': 1671054000,
'release_year': 2018,
},
'skip': 'No video found',
}, {
'note': 'Episode url without show name',
'url': 'https://duoplay.ee/9644?ep=185',
'md5': '63f324b4fe2dbd8194dca16a6d52184a',
'info_dict': {
'id': '9644_185',
'ext': 'mp4',
'title': 'Episode 185',
'thumbnail': r're:https?://.+\.jpg',
'description': 'md5:ed25ba4e9e5d54bc291a4a0cdd241467',
'upload_date': '20241120',
'timestamp': 1732077000,
'episode': 'Episode 63',
'episode_id': '185',
'episode_number': 63,
'season': 'Season 2',
'season_number': 2,
'series': 'Telehommik',
'series_id': '9644',
},
}]

def _real_extract(self, url):
telecast_id, episode = self._match_valid_url(url).group('id', 'ep')
telecast_id = self._match_id(url)
episode = traverse_obj(parse_qs(url), ('ep', 0, {int_or_none}, {str_or_none}))
video_id = join_nonempty(telecast_id, episode, delim='_')
webpage = self._download_webpage(url, video_id)
video_player = try_call(lambda: extract_attributes(
get_element_text_and_html_by_tag('video-player', webpage)[1]))
if not video_player or not video_player.get('manifest-url'):
raise ExtractorError('No video found', expected=True)

manifest_url = video_player['manifest-url']
session_token = self._download_json(
'https://sts.postimees.ee/session/register', video_id, 'Registering session',
'Unable to register session', headers={
'Accept': 'application/json',
'X-Original-URI': manifest_url,
})['session']

episode_attr = self._parse_json(video_player.get(':episode') or '', video_id, fatal=False) or {}

return {
'id': video_id,
'formats': self._extract_m3u8_formats(video_player['manifest-url'], video_id, 'mp4'),
'formats': self._extract_m3u8_formats(manifest_url, video_id, 'mp4', query={'s': session_token}),
**traverse_obj(episode_attr, {
'title': 'title',
'description': 'synopsis',
'title': ('title', {str}),
'description': ('synopsis', {str}),
'thumbnail': ('images', 'original'),
'timestamp': ('airtime', {lambda x: unified_timestamp(x + ' +0200')}),
'cast': ('cast', {lambda x: x.split(', ')}),
'cast': ('cast', filter, {lambda x: x.split(', ')}),
'release_year': ('year', {int_or_none}),
}),
**(traverse_obj(episode_attr, {
'title': (None, ('subtitle', ('episode_nr', {lambda x: f'Episode {x}' if x else None}))),
'series': 'title',
'title': (None, (('subtitle', {str}, filter), {value(f'Episode {episode}' if episode else None)})),
'series': ('title', {str}),
'series_id': ('telecast_id', {str_or_none}),
'season_number': ('season_id', {int_or_none}),
'episode': 'subtitle',
'episode': ('subtitle', {str}, filter),
'episode_number': ('episode_nr', {int_or_none}),
'episode_id': ('episode_id', {str_or_none}),
}, get_all=False) if episode_attr.get('category') != 'movies' else {}),
Expand Down
6 changes: 3 additions & 3 deletions lib/yt_dlp/extractor/funimation.py
Original file line number Diff line number Diff line change
Expand Up @@ -193,9 +193,9 @@ def _real_extract(self, url):

for lang, version, fmt in self._get_experiences(episode):
experience_id = str(fmt['experienceId'])
if (only_initial_experience and experience_id != initial_experience_id
or requested_languages and lang.lower() not in requested_languages
or requested_versions and version.lower() not in requested_versions):
if ((only_initial_experience and experience_id != initial_experience_id)
or (requested_languages and lang.lower() not in requested_languages)
or (requested_versions and version.lower() not in requested_versions)):
continue
thumbnails.append({'url': fmt.get('poster')})
duration = max(duration, fmt.get('duration', 0))
Expand Down
7 changes: 4 additions & 3 deletions lib/yt_dlp/extractor/microsoftembed.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ class MicrosoftEmbedIE(InfoExtractor):
'timestamp': 1631658316,
'upload_date': '20210914',
},
'expected_warnings': ['Failed to parse XML: syntax error: line 1, column 0'],
}]
_API_URL = 'https://prod-video-cms-rt-microsoft-com.akamaized.net/vhs/api/videos/'

Expand All @@ -36,11 +37,11 @@ def _real_extract(self, url):
formats = []
for source_type, source in metadata['streams'].items():
if source_type == 'smooth_Streaming':
formats.extend(self._extract_ism_formats(source['url'], video_id, 'mss'))
formats.extend(self._extract_ism_formats(source['url'], video_id, 'mss', fatal=False))
elif source_type == 'apple_HTTP_Live_Streaming':
formats.extend(self._extract_m3u8_formats(source['url'], video_id, 'mp4'))
formats.extend(self._extract_m3u8_formats(source['url'], video_id, 'mp4', fatal=False))
elif source_type == 'mPEG_DASH':
formats.extend(self._extract_mpd_formats(source['url'], video_id))
formats.extend(self._extract_mpd_formats(source['url'], video_id, fatal=False))
else:
formats.append({
'format_id': source_type,
Expand Down
Loading

0 comments on commit 4d5f7de

Please sign in to comment.