diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-05 09:06:10 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-05 09:06:10 +0000 |
commit | 137f07c27a88b07df57df5b2cdebebe3c7722bac (patch) | |
tree | 4ebdcbea6b540f69c40ba0be6ac766008030d149 /yt_dlp/downloader | |
parent | Adding debian version 2024.05.27-1. (diff) | |
download | yt-dlp-137f07c27a88b07df57df5b2cdebebe3c7722bac.tar.xz yt-dlp-137f07c27a88b07df57df5b2cdebebe3c7722bac.zip |
Merging upstream version 2024.07.01.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'yt_dlp/downloader')
-rw-r--r-- | yt_dlp/downloader/common.py | 2 | ||||
-rw-r--r-- | yt_dlp/downloader/external.py | 28 | ||||
-rw-r--r-- | yt_dlp/downloader/f4m.py | 22 | ||||
-rw-r--r-- | yt_dlp/downloader/fragment.py | 8 | ||||
-rw-r--r-- | yt_dlp/downloader/hls.py | 33 | ||||
-rw-r--r-- | yt_dlp/downloader/http.py | 14 | ||||
-rw-r--r-- | yt_dlp/downloader/ism.py | 2 | ||||
-rw-r--r-- | yt_dlp/downloader/mhtml.py | 53 | ||||
-rw-r--r-- | yt_dlp/downloader/niconico.py | 14 | ||||
-rw-r--r-- | yt_dlp/downloader/rtmp.py | 6 | ||||
-rw-r--r-- | yt_dlp/downloader/youtube_live_chat.py | 2 |
11 files changed, 86 insertions, 98 deletions
diff --git a/yt_dlp/downloader/common.py b/yt_dlp/downloader/common.py index 65a0d6f..2e3ea2f 100644 --- a/yt_dlp/downloader/common.py +++ b/yt_dlp/downloader/common.py @@ -404,7 +404,7 @@ class FileDownloader: def report_resuming_byte(self, resume_len): """Report attempt to resume at given byte.""" - self.to_screen('[download] Resuming download at byte %s' % resume_len) + self.to_screen(f'[download] Resuming download at byte {resume_len}') def report_retry(self, err, count, retries, frag_index=NO_DEFAULT, fatal=True): """Report retry""" diff --git a/yt_dlp/downloader/external.py b/yt_dlp/downloader/external.py index 8b0b94e..ae23729 100644 --- a/yt_dlp/downloader/external.py +++ b/yt_dlp/downloader/external.py @@ -1,4 +1,5 @@ import enum +import functools import json import os import re @@ -9,7 +10,6 @@ import time import uuid from .fragment import FragmentFD -from ..compat import functools from ..networking import Request from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor from ..utils import ( @@ -55,7 +55,7 @@ class ExternalFD(FragmentFD): # correct and expected termination thus all postprocessing # should take place retval = 0 - self.to_screen('[%s] Interrupted by user' % self.get_basename()) + self.to_screen(f'[{self.get_basename()}] Interrupted by user') finally: if self._cookies_tempfile: self.try_remove(self._cookies_tempfile) @@ -108,7 +108,7 @@ class ExternalFD(FragmentFD): return all(( not info_dict.get('to_stdout') or Features.TO_STDOUT in cls.SUPPORTED_FEATURES, '+' not in info_dict['protocol'] or Features.MULTIPLE_FORMATS in cls.SUPPORTED_FEATURES, - not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url'), + not traverse_obj(info_dict, ('hls_aes', ...), 'extra_param_to_segment_url', 'extra_param_to_key_url'), all(proto in cls.SUPPORTED_PROTOCOLS for proto in info_dict['protocol'].split('+')), )) @@ -172,7 +172,7 @@ class ExternalFD(FragmentFD): decrypt_fragment = self.decrypter(info_dict) dest, _ = self.sanitize_open(tmpfilename, 'wb') for frag_index, fragment in enumerate(info_dict['fragments']): - fragment_filename = '%s-Frag%d' % (tmpfilename, frag_index) + fragment_filename = f'{tmpfilename}-Frag{frag_index}' try: src, _ = self.sanitize_open(fragment_filename, 'rb') except OSError as err: @@ -186,7 +186,7 @@ class ExternalFD(FragmentFD): if not self.params.get('keep_fragments', False): self.try_remove(encodeFilename(fragment_filename)) dest.close() - self.try_remove(encodeFilename('%s.frag.urls' % tmpfilename)) + self.try_remove(encodeFilename(f'{tmpfilename}.frag.urls')) return 0 def _call_process(self, cmd, info_dict): @@ -336,11 +336,11 @@ class Aria2cFD(ExternalFD): if 'fragments' in info_dict: cmd += ['--uri-selector=inorder'] - url_list_file = '%s.frag.urls' % tmpfilename + url_list_file = f'{tmpfilename}.frag.urls' url_list = [] for frag_index, fragment in enumerate(info_dict['fragments']): - fragment_filename = '%s-Frag%d' % (os.path.basename(tmpfilename), frag_index) - url_list.append('%s\n\tout=%s' % (fragment['url'], self._aria2c_filename(fragment_filename))) + fragment_filename = f'{os.path.basename(tmpfilename)}-Frag{frag_index}' + url_list.append('{}\n\tout={}'.format(fragment['url'], self._aria2c_filename(fragment_filename))) stream, _ = self.sanitize_open(url_list_file, 'wb') stream.write('\n'.join(url_list).encode()) stream.close() @@ -357,7 +357,7 @@ class Aria2cFD(ExternalFD): 'id': sanitycheck, 'method': method, 'params': [f'token:{rpc_secret}', *params], - }).encode('utf-8') + }).encode() request = Request( f'http://localhost:{rpc_port}/jsonrpc', data=d, headers={ @@ -416,7 +416,7 @@ class Aria2cFD(ExternalFD): 'total_bytes_estimate': total, 'eta': (total - downloaded) / (speed or 1), 'fragment_index': min(frag_count, len(completed) + 1) if fragmented else None, - 'elapsed': time.time() - started + 'elapsed': time.time() - started, }) self._hook_progress(status, info_dict) @@ -509,12 +509,12 @@ class FFmpegFD(ExternalFD): proxy = self.params.get('proxy') if proxy: if not re.match(r'^[\da-zA-Z]+://', proxy): - proxy = 'http://%s' % proxy + proxy = f'http://{proxy}' if proxy.startswith('socks'): self.report_warning( - '%s does not support SOCKS proxies. Downloading is likely to fail. ' - 'Consider adding --hls-prefer-native to your command.' % self.get_basename()) + f'{self.get_basename()} does not support SOCKS proxies. Downloading is likely to fail. ' + 'Consider adding --hls-prefer-native to your command.') # Since December 2015 ffmpeg supports -http_proxy option (see # http://git.videolan.org/?p=ffmpeg.git;a=commit;h=b4eb1f29ebddd60c41a2eb39f5af701e38e0d3fd) @@ -575,7 +575,7 @@ class FFmpegFD(ExternalFD): if end_time: args += ['-t', str(end_time - start_time)] - args += self._configuration_args((f'_i{i + 1}', '_i')) + ['-i', fmt['url']] + args += [*self._configuration_args((f'_i{i + 1}', '_i')), '-i', fmt['url']] if not (start_time or end_time) or not self.params.get('force_keyframes_at_cuts'): args += ['-c', 'copy'] diff --git a/yt_dlp/downloader/f4m.py b/yt_dlp/downloader/f4m.py index 28cbba0..22d0ebd 100644 --- a/yt_dlp/downloader/f4m.py +++ b/yt_dlp/downloader/f4m.py @@ -67,12 +67,12 @@ class FlvReader(io.BytesIO): self.read_bytes(3) quality_entry_count = self.read_unsigned_char() # QualityEntryCount - for i in range(quality_entry_count): + for _ in range(quality_entry_count): self.read_string() segment_run_count = self.read_unsigned_int() segments = [] - for i in range(segment_run_count): + for _ in range(segment_run_count): first_segment = self.read_unsigned_int() fragments_per_segment = self.read_unsigned_int() segments.append((first_segment, fragments_per_segment)) @@ -91,12 +91,12 @@ class FlvReader(io.BytesIO): quality_entry_count = self.read_unsigned_char() # QualitySegmentUrlModifiers - for i in range(quality_entry_count): + for _ in range(quality_entry_count): self.read_string() fragments_count = self.read_unsigned_int() fragments = [] - for i in range(fragments_count): + for _ in range(fragments_count): first = self.read_unsigned_int() first_ts = self.read_unsigned_long_long() duration = self.read_unsigned_int() @@ -135,11 +135,11 @@ class FlvReader(io.BytesIO): self.read_string() # MovieIdentifier server_count = self.read_unsigned_char() # ServerEntryTable - for i in range(server_count): + for _ in range(server_count): self.read_string() quality_count = self.read_unsigned_char() # QualityEntryTable - for i in range(quality_count): + for _ in range(quality_count): self.read_string() # DrmData self.read_string() @@ -148,14 +148,14 @@ class FlvReader(io.BytesIO): segments_count = self.read_unsigned_char() segments = [] - for i in range(segments_count): + for _ in range(segments_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'asrt' segment = FlvReader(box_data).read_asrt() segments.append(segment) fragments_run_count = self.read_unsigned_char() fragments = [] - for i in range(fragments_run_count): + for _ in range(fragments_run_count): box_size, box_type, box_data = self.read_box_info() assert box_type == b'afrt' fragments.append(FlvReader(box_data).read_afrt()) @@ -309,7 +309,7 @@ class F4mFD(FragmentFD): def real_download(self, filename, info_dict): man_url = info_dict['url'] requested_bitrate = info_dict.get('tbr') - self.to_screen('[%s] Downloading f4m manifest' % self.FD_NAME) + self.to_screen(f'[{self.FD_NAME}] Downloading f4m manifest') urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url)) man_url = urlh.url @@ -326,8 +326,8 @@ class F4mFD(FragmentFD): formats = sorted(formats, key=lambda f: f[0]) rate, media = formats[-1] else: - rate, media = list(filter( - lambda f: int(f[0]) == requested_bitrate, formats))[0] + rate, media = next(filter( + lambda f: int(f[0]) == requested_bitrate, formats)) # Prefer baseURL for relative URLs as per 11.2 of F4M 3.0 spec. man_base_url = get_base_url(doc) or man_url diff --git a/yt_dlp/downloader/fragment.py b/yt_dlp/downloader/fragment.py index b4f003d..0d00196 100644 --- a/yt_dlp/downloader/fragment.py +++ b/yt_dlp/downloader/fragment.py @@ -199,7 +199,7 @@ class FragmentFD(FileDownloader): '.ytdl file is corrupt' if is_corrupt else 'Inconsistent state of incomplete fragment download') self.report_warning( - '%s. Restarting from the beginning ...' % message) + f'{message}. Restarting from the beginning ...') ctx['fragment_index'] = resume_len = 0 if 'ytdl_corrupt' in ctx: del ctx['ytdl_corrupt'] @@ -366,10 +366,10 @@ class FragmentFD(FileDownloader): return decrypt_fragment def download_and_append_fragments_multiple(self, *args, **kwargs): - ''' + """ @params (ctx1, fragments1, info_dict1), (ctx2, fragments2, info_dict2), ... all args must be either tuple or list - ''' + """ interrupt_trigger = [True] max_progress = len(args) if max_progress == 1: @@ -424,7 +424,7 @@ class FragmentFD(FileDownloader): finally: tpe.shutdown(wait=True) if not interrupt_trigger[0] and not is_live: - raise KeyboardInterrupt() + raise KeyboardInterrupt # we expect the user wants to stop and DO WANT the preceding postprocessors to run; # so returning a intermediate result here instead of KeyboardInterrupt on live return result diff --git a/yt_dlp/downloader/hls.py b/yt_dlp/downloader/hls.py index 4ac5d99..0a00d5d 100644 --- a/yt_dlp/downloader/hls.py +++ b/yt_dlp/downloader/hls.py @@ -72,7 +72,7 @@ class HlsFD(FragmentFD): def real_download(self, filename, info_dict): man_url = info_dict['url'] - self.to_screen('[%s] Downloading m3u8 manifest' % self.FD_NAME) + self.to_screen(f'[{self.FD_NAME}] Downloading m3u8 manifest') urlh = self.ydl.urlopen(self._prepare_url(info_dict, man_url)) man_url = urlh.url @@ -160,10 +160,12 @@ class HlsFD(FragmentFD): extra_state = ctx.setdefault('extra_state', {}) format_index = info_dict.get('format_index') - extra_query = None - extra_param_to_segment_url = info_dict.get('extra_param_to_segment_url') - if extra_param_to_segment_url: - extra_query = urllib.parse.parse_qs(extra_param_to_segment_url) + extra_segment_query = None + if extra_param_to_segment_url := info_dict.get('extra_param_to_segment_url'): + extra_segment_query = urllib.parse.parse_qs(extra_param_to_segment_url) + extra_key_query = None + if extra_param_to_key_url := info_dict.get('extra_param_to_key_url'): + extra_key_query = urllib.parse.parse_qs(extra_param_to_key_url) i = 0 media_sequence = 0 decrypt_info = {'METHOD': 'NONE'} @@ -190,8 +192,8 @@ class HlsFD(FragmentFD): if frag_index <= ctx['fragment_index']: continue frag_url = urljoin(man_url, line) - if extra_query: - frag_url = update_url_query(frag_url, extra_query) + if extra_segment_query: + frag_url = update_url_query(frag_url, extra_segment_query) fragments.append({ 'frag_index': frag_index, @@ -212,8 +214,8 @@ class HlsFD(FragmentFD): frag_index += 1 map_info = parse_m3u8_attributes(line[11:]) frag_url = urljoin(man_url, map_info.get('URI')) - if extra_query: - frag_url = update_url_query(frag_url, extra_query) + if extra_segment_query: + frag_url = update_url_query(frag_url, extra_segment_query) if map_info.get('BYTERANGE'): splitted_byte_range = map_info.get('BYTERANGE').split('@') @@ -228,7 +230,7 @@ class HlsFD(FragmentFD): 'url': frag_url, 'decrypt_info': decrypt_info, 'byte_range': byte_range, - 'media_sequence': media_sequence + 'media_sequence': media_sequence, }) media_sequence += 1 @@ -244,8 +246,10 @@ class HlsFD(FragmentFD): decrypt_info['KEY'] = external_aes_key else: decrypt_info['URI'] = urljoin(man_url, decrypt_info['URI']) - if extra_query: - decrypt_info['URI'] = update_url_query(decrypt_info['URI'], extra_query) + if extra_key_query or extra_segment_query: + # Fall back to extra_segment_query to key for backwards compat + decrypt_info['URI'] = update_url_query( + decrypt_info['URI'], extra_key_query or extra_segment_query) if decrypt_url != decrypt_info['URI']: decrypt_info['KEY'] = None @@ -350,9 +354,8 @@ class HlsFD(FragmentFD): # XXX: this should probably be silent as well # or verify that all segments contain the same data self.report_warning(bug_reports_message( - 'Discarding a %s block found in the middle of the stream; ' - 'if the subtitles display incorrectly,' - % (type(block).__name__))) + f'Discarding a {type(block).__name__} block found in the middle of the stream; ' + 'if the subtitles display incorrectly,')) continue block.write_into(output) diff --git a/yt_dlp/downloader/http.py b/yt_dlp/downloader/http.py index 693828b..c016579 100644 --- a/yt_dlp/downloader/http.py +++ b/yt_dlp/downloader/http.py @@ -176,7 +176,7 @@ class HttpFD(FileDownloader): 'downloaded_bytes': ctx.resume_len, 'total_bytes': ctx.resume_len, }, info_dict) - raise SucceedDownload() + raise SucceedDownload else: # The length does not match, we start the download over self.report_unable_to_resume() @@ -194,7 +194,7 @@ class HttpFD(FileDownloader): def close_stream(): if ctx.stream is not None: - if not ctx.tmpfilename == '-': + if ctx.tmpfilename != '-': ctx.stream.close() ctx.stream = None @@ -268,20 +268,20 @@ class HttpFD(FileDownloader): ctx.filename = self.undo_temp_name(ctx.tmpfilename) self.report_destination(ctx.filename) except OSError as err: - self.report_error('unable to open for writing: %s' % str(err)) + self.report_error(f'unable to open for writing: {err}') return False if self.params.get('xattr_set_filesize', False) and data_len is not None: try: write_xattr(ctx.tmpfilename, 'user.ytdl.filesize', str(data_len).encode()) except (XAttrUnavailableError, XAttrMetadataError) as err: - self.report_error('unable to set filesize xattr: %s' % str(err)) + self.report_error(f'unable to set filesize xattr: {err}') try: ctx.stream.write(data_block) except OSError as err: self.to_stderr('\n') - self.report_error('unable to write data: %s' % str(err)) + self.report_error(f'unable to write data: {err}') return False # Apply rate limit @@ -327,7 +327,7 @@ class HttpFD(FileDownloader): elif now - ctx.throttle_start > 3: if ctx.stream is not None and ctx.tmpfilename != '-': ctx.stream.close() - raise ThrottledDownload() + raise ThrottledDownload elif speed: ctx.throttle_start = None @@ -338,7 +338,7 @@ class HttpFD(FileDownloader): if not is_test and ctx.chunk_size and ctx.content_len is not None and byte_counter < ctx.content_len: ctx.resume_len = byte_counter - raise NextFragment() + raise NextFragment if ctx.tmpfilename != '-': ctx.stream.close() diff --git a/yt_dlp/downloader/ism.py b/yt_dlp/downloader/ism.py index dd688f5..62c3a3b 100644 --- a/yt_dlp/downloader/ism.py +++ b/yt_dlp/downloader/ism.py @@ -251,7 +251,7 @@ class IsmFD(FragmentFD): skip_unavailable_fragments = self.params.get('skip_unavailable_fragments', True) frag_index = 0 - for i, segment in enumerate(segments): + for segment in segments: frag_index += 1 if frag_index <= ctx['fragment_index']: continue diff --git a/yt_dlp/downloader/mhtml.py b/yt_dlp/downloader/mhtml.py index d977dce..3d4f2d7 100644 --- a/yt_dlp/downloader/mhtml.py +++ b/yt_dlp/downloader/mhtml.py @@ -10,7 +10,7 @@ from ..version import __version__ as YT_DLP_VERSION class MhtmlFD(FragmentFD): - _STYLESHEET = """\ + _STYLESHEET = '''\ html, body { margin: 0; padding: 0; @@ -45,7 +45,7 @@ body > figure > img { max-width: 100%; max-height: calc(100vh - 5em); } -""" +''' _STYLESHEET = re.sub(r'\s+', ' ', _STYLESHEET) _STYLESHEET = re.sub(r'\B \B|(?<=[\w\-]) (?=[^\w\-])|(?<=[^\w\-]) (?=[\w\-])', '', _STYLESHEET) @@ -57,24 +57,19 @@ body > figure > img { )).decode('us-ascii') + '?=' def _gen_cid(self, i, fragment, frag_boundary): - return '%u.%s@yt-dlp.github.io.invalid' % (i, frag_boundary) + return f'{i}.{frag_boundary}@yt-dlp.github.io.invalid' def _gen_stub(self, *, fragments, frag_boundary, title): output = io.StringIO() - output.write(( + output.write( '<!DOCTYPE html>' '<html>' '<head>' - '' '<meta name="generator" content="yt-dlp {version}">' - '' '<title>{title}</title>' - '' '<style>{styles}</style>' - '<body>' - ).format( - version=escapeHTML(YT_DLP_VERSION), - styles=self._STYLESHEET, - title=escapeHTML(title) - )) + f'<meta name="generator" content="yt-dlp {escapeHTML(YT_DLP_VERSION)}">' + f'<title>{escapeHTML(title)}</title>' + f'<style>{self._STYLESHEET}</style>' + '<body>') t0 = 0 for i, frag in enumerate(fragments): @@ -87,15 +82,12 @@ body > figure > img { num=i + 1, t0=srt_subtitles_timecode(t0), t1=srt_subtitles_timecode(t1), - duration=formatSeconds(frag['duration'], msec=True) + duration=formatSeconds(frag['duration'], msec=True), )) except (KeyError, ValueError, TypeError): t1 = None - output.write(( - '<figcaption>Slide #{num}</figcaption>' - ).format(num=i + 1)) - output.write('<img src="cid:{cid}">'.format( - cid=self._gen_cid(i, frag, frag_boundary))) + output.write(f'<figcaption>Slide #{i + 1}</figcaption>') + output.write(f'<img src="cid:{self._gen_cid(i, frag, frag_boundary)}">') output.write('</figure>') t0 = t1 @@ -126,31 +118,24 @@ body > figure > img { stub = self._gen_stub( fragments=fragments, frag_boundary=frag_boundary, - title=title + title=title, ) ctx['dest_stream'].write(( 'MIME-Version: 1.0\r\n' 'From: <nowhere@yt-dlp.github.io.invalid>\r\n' 'To: <nowhere@yt-dlp.github.io.invalid>\r\n' - 'Subject: {title}\r\n' + f'Subject: {self._escape_mime(title)}\r\n' 'Content-type: multipart/related; ' - '' 'boundary="{boundary}"; ' - '' 'type="text/html"\r\n' - 'X.yt-dlp.Origin: {origin}\r\n' + f'boundary="{frag_boundary}"; ' + 'type="text/html"\r\n' + f'X.yt-dlp.Origin: {origin}\r\n' '\r\n' - '--{boundary}\r\n' + f'--{frag_boundary}\r\n' 'Content-Type: text/html; charset=utf-8\r\n' - 'Content-Length: {length}\r\n' + f'Content-Length: {len(stub)}\r\n' '\r\n' - '{stub}\r\n' - ).format( - origin=origin, - boundary=frag_boundary, - length=len(stub), - title=self._escape_mime(title), - stub=stub - ).encode()) + f'{stub}\r\n').encode()) extra_state['header_written'] = True for i, fragment in enumerate(fragments): diff --git a/yt_dlp/downloader/niconico.py b/yt_dlp/downloader/niconico.py index fef8bff..462c6e2 100644 --- a/yt_dlp/downloader/niconico.py +++ b/yt_dlp/downloader/niconico.py @@ -15,7 +15,7 @@ class NiconicoDmcFD(FileDownloader): def real_download(self, filename, info_dict): from ..extractor.niconico import NiconicoIE - self.to_screen('[%s] Downloading from DMC' % self.FD_NAME) + self.to_screen(f'[{self.FD_NAME}] Downloading from DMC') ie = NiconicoIE(self.ydl) info_dict, heartbeat_info_dict = ie._get_heartbeat_info(info_dict) @@ -34,7 +34,7 @@ class NiconicoDmcFD(FileDownloader): try: self.ydl.urlopen(request).read() except Exception: - self.to_screen('[%s] Heartbeat failed' % self.FD_NAME) + self.to_screen(f'[{self.FD_NAME}] Heartbeat failed') with heartbeat_lock: if not download_complete: @@ -85,14 +85,14 @@ class NiconicoLiveFD(FileDownloader): 'quality': live_quality, 'protocol': 'hls+fmp4', 'latency': live_latency, - 'chasePlay': False + 'chasePlay': False, }, 'room': { 'protocol': 'webSocket', - 'commentable': True + 'commentable': True, }, 'reconnect': True, - } + }, })) else: ws = ws_extractor @@ -118,7 +118,7 @@ class NiconicoLiveFD(FileDownloader): elif self.ydl.params.get('verbose', False): if len(recv) > 100: recv = recv[:100] + '...' - self.to_screen('[debug] Server said: %s' % recv) + self.to_screen(f'[debug] Server said: {recv}') def ws_main(): reconnect = False @@ -128,7 +128,7 @@ class NiconicoLiveFD(FileDownloader): if ret is True: return except BaseException as e: - self.to_screen('[%s] %s: Connection error occured, reconnecting after 10 seconds: %s' % ('niconico:live', video_id, str_or_none(e))) + self.to_screen('[{}] {}: Connection error occured, reconnecting after 10 seconds: {}'.format('niconico:live', video_id, str_or_none(e))) time.sleep(10) continue finally: diff --git a/yt_dlp/downloader/rtmp.py b/yt_dlp/downloader/rtmp.py index 0e09525..d7ffb3b 100644 --- a/yt_dlp/downloader/rtmp.py +++ b/yt_dlp/downloader/rtmp.py @@ -180,9 +180,9 @@ class RtmpFD(FileDownloader): while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live: prevsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen('[rtmpdump] Downloaded %s bytes' % prevsize) + self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes') time.sleep(5.0) # This seems to be needed - args = basic_args + ['--resume'] + args = [*basic_args, '--resume'] if retval == RD_FAILED: args += ['--skip', '1'] args = [encodeArgument(a) for a in args] @@ -197,7 +197,7 @@ class RtmpFD(FileDownloader): break if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): fsize = os.path.getsize(encodeFilename(tmpfilename)) - self.to_screen('[rtmpdump] Downloaded %s bytes' % fsize) + self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes') self.try_rename(tmpfilename, filename) self._hook_progress({ 'downloaded_bytes': fsize, diff --git a/yt_dlp/downloader/youtube_live_chat.py b/yt_dlp/downloader/youtube_live_chat.py index c7a8637..961938d 100644 --- a/yt_dlp/downloader/youtube_live_chat.py +++ b/yt_dlp/downloader/youtube_live_chat.py @@ -18,7 +18,7 @@ class YoutubeLiveChatFD(FragmentFD): def real_download(self, filename, info_dict): video_id = info_dict['video_id'] - self.to_screen('[%s] Downloading live chat' % self.FD_NAME) + self.to_screen(f'[{self.FD_NAME}] Downloading live chat') if not self.params.get('skip_download') and info_dict['protocol'] == 'youtube_live_chat': self.report_warning('Live chat download runs until the livestream ends. ' 'If you wish to download the video simultaneously, run a separate yt-dlp instance') |