summaryrefslogtreecommitdiffstats
path: root/yt_dlp/postprocessor
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 16:49:24 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-15 16:49:24 +0000
commit2415e66f889f38503b73e8ebc5f43ca342390e5c (patch)
treeac48ab69d1d96bae3d83756134921e0d90593aa5 /yt_dlp/postprocessor
parentInitial commit. (diff)
downloadyt-dlp-2415e66f889f38503b73e8ebc5f43ca342390e5c.tar.xz
yt-dlp-2415e66f889f38503b73e8ebc5f43ca342390e5c.zip
Adding upstream version 2024.03.10.upstream/2024.03.10
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'yt_dlp/postprocessor')
-rw-r--r--yt_dlp/postprocessor/__init__.py47
-rw-r--r--yt_dlp/postprocessor/common.py215
-rw-r--r--yt_dlp/postprocessor/embedthumbnail.py227
-rw-r--r--yt_dlp/postprocessor/exec.py41
-rw-r--r--yt_dlp/postprocessor/ffmpeg.py1192
-rw-r--r--yt_dlp/postprocessor/metadataparser.py125
-rw-r--r--yt_dlp/postprocessor/modify_chapters.py336
-rw-r--r--yt_dlp/postprocessor/movefilesafterdownload.py53
-rw-r--r--yt_dlp/postprocessor/sponskrub.py98
-rw-r--r--yt_dlp/postprocessor/sponsorblock.py104
-rw-r--r--yt_dlp/postprocessor/xattrpp.py63
11 files changed, 2501 insertions, 0 deletions
diff --git a/yt_dlp/postprocessor/__init__.py b/yt_dlp/postprocessor/__init__.py
new file mode 100644
index 0000000..bfe9df7
--- /dev/null
+++ b/yt_dlp/postprocessor/__init__.py
@@ -0,0 +1,47 @@
+# flake8: noqa: F401
+
+from .common import PostProcessor
+from .embedthumbnail import EmbedThumbnailPP
+from .exec import ExecAfterDownloadPP, ExecPP
+from .ffmpeg import (
+ FFmpegConcatPP,
+ FFmpegCopyStreamPP,
+ FFmpegEmbedSubtitlePP,
+ FFmpegExtractAudioPP,
+ FFmpegFixupDuplicateMoovPP,
+ FFmpegFixupDurationPP,
+ FFmpegFixupM3u8PP,
+ FFmpegFixupM4aPP,
+ FFmpegFixupStretchedPP,
+ FFmpegFixupTimestampPP,
+ FFmpegMergerPP,
+ FFmpegMetadataPP,
+ FFmpegPostProcessor,
+ FFmpegSplitChaptersPP,
+ FFmpegSubtitlesConvertorPP,
+ FFmpegThumbnailsConvertorPP,
+ FFmpegVideoConvertorPP,
+ FFmpegVideoRemuxerPP,
+)
+from .metadataparser import (
+ MetadataFromFieldPP,
+ MetadataFromTitlePP,
+ MetadataParserPP,
+)
+from .modify_chapters import ModifyChaptersPP
+from .movefilesafterdownload import MoveFilesAfterDownloadPP
+from .sponskrub import SponSkrubPP
+from .sponsorblock import SponsorBlockPP
+from .xattrpp import XAttrMetadataPP
+from ..plugins import load_plugins
+
+_PLUGIN_CLASSES = load_plugins('postprocessor', 'PP')
+
+
+def get_postprocessor(key):
+ return globals()[key + 'PP']
+
+
+globals().update(_PLUGIN_CLASSES)
+__all__ = [name for name in globals().keys() if name.endswith('PP')]
+__all__.extend(('PostProcessor', 'FFmpegPostProcessor'))
diff --git a/yt_dlp/postprocessor/common.py b/yt_dlp/postprocessor/common.py
new file mode 100644
index 0000000..8cef86c
--- /dev/null
+++ b/yt_dlp/postprocessor/common.py
@@ -0,0 +1,215 @@
+import functools
+import json
+import os
+
+from ..networking import Request
+from ..networking.exceptions import HTTPError, network_exceptions
+from ..utils import (
+ PostProcessingError,
+ RetryManager,
+ _configuration_args,
+ deprecation_warning,
+ encodeFilename,
+)
+
+
+class PostProcessorMetaClass(type):
+ @staticmethod
+ def run_wrapper(func):
+ @functools.wraps(func)
+ def run(self, info, *args, **kwargs):
+ info_copy = self._copy_infodict(info)
+ self._hook_progress({'status': 'started'}, info_copy)
+ ret = func(self, info, *args, **kwargs)
+ if ret is not None:
+ _, info = ret
+ self._hook_progress({'status': 'finished'}, info_copy)
+ return ret
+ return run
+
+ def __new__(cls, name, bases, attrs):
+ if 'run' in attrs:
+ attrs['run'] = cls.run_wrapper(attrs['run'])
+ return type.__new__(cls, name, bases, attrs)
+
+
+class PostProcessor(metaclass=PostProcessorMetaClass):
+ """Post Processor class.
+
+ PostProcessor objects can be added to downloaders with their
+ add_post_processor() method. When the downloader has finished a
+ successful download, it will take its internal chain of PostProcessors
+ and start calling the run() method on each one of them, first with
+ an initial argument and then with the returned value of the previous
+ PostProcessor.
+
+ PostProcessor objects follow a "mutual registration" process similar
+ to InfoExtractor objects.
+
+ Optionally PostProcessor can use a list of additional command-line arguments
+ with self._configuration_args.
+ """
+
+ _downloader = None
+
+ def __init__(self, downloader=None):
+ self._progress_hooks = []
+ self.add_progress_hook(self.report_progress)
+ self.set_downloader(downloader)
+ self.PP_NAME = self.pp_key()
+
+ @classmethod
+ def pp_key(cls):
+ name = cls.__name__[:-2]
+ return name[6:] if name[:6].lower() == 'ffmpeg' else name
+
+ def to_screen(self, text, prefix=True, *args, **kwargs):
+ if self._downloader:
+ tag = '[%s] ' % self.PP_NAME if prefix else ''
+ return self._downloader.to_screen(f'{tag}{text}', *args, **kwargs)
+
+ def report_warning(self, text, *args, **kwargs):
+ if self._downloader:
+ return self._downloader.report_warning(text, *args, **kwargs)
+
+ def deprecation_warning(self, msg):
+ warn = getattr(self._downloader, 'deprecation_warning', deprecation_warning)
+ return warn(msg, stacklevel=1)
+
+ def deprecated_feature(self, msg):
+ if self._downloader:
+ return self._downloader.deprecated_feature(msg)
+ return deprecation_warning(msg, stacklevel=1)
+
+ def report_error(self, text, *args, **kwargs):
+ self.deprecation_warning('"yt_dlp.postprocessor.PostProcessor.report_error" is deprecated. '
+ 'raise "yt_dlp.utils.PostProcessingError" instead')
+ if self._downloader:
+ return self._downloader.report_error(text, *args, **kwargs)
+
+ def write_debug(self, text, *args, **kwargs):
+ if self._downloader:
+ return self._downloader.write_debug(text, *args, **kwargs)
+
+ def _delete_downloaded_files(self, *files_to_delete, **kwargs):
+ if self._downloader:
+ return self._downloader._delete_downloaded_files(*files_to_delete, **kwargs)
+ for filename in set(filter(None, files_to_delete)):
+ os.remove(filename)
+
+ def get_param(self, name, default=None, *args, **kwargs):
+ if self._downloader:
+ return self._downloader.params.get(name, default, *args, **kwargs)
+ return default
+
+ def set_downloader(self, downloader):
+ """Sets the downloader for this PP."""
+ self._downloader = downloader
+ for ph in getattr(downloader, '_postprocessor_hooks', []):
+ self.add_progress_hook(ph)
+
+ def _copy_infodict(self, info_dict):
+ return getattr(self._downloader, '_copy_infodict', dict)(info_dict)
+
+ @staticmethod
+ def _restrict_to(*, video=True, audio=True, images=True, simulated=True):
+ allowed = {'video': video, 'audio': audio, 'images': images}
+
+ def decorator(func):
+ @functools.wraps(func)
+ def wrapper(self, info):
+ if not simulated and (self.get_param('simulate') or self.get_param('skip_download')):
+ return [], info
+ format_type = (
+ 'video' if info.get('vcodec') != 'none'
+ else 'audio' if info.get('acodec') != 'none'
+ else 'images')
+ if allowed[format_type]:
+ return func(self, info)
+ else:
+ self.to_screen('Skipping %s' % format_type)
+ return [], info
+ return wrapper
+ return decorator
+
+ def run(self, information):
+ """Run the PostProcessor.
+
+ The "information" argument is a dictionary like the ones
+ composed by InfoExtractors. The only difference is that this
+ one has an extra field called "filepath" that points to the
+ downloaded file.
+
+ This method returns a tuple, the first element is a list of the files
+ that can be deleted, and the second of which is the updated
+ information.
+
+ In addition, this method may raise a PostProcessingError
+ exception if post processing fails.
+ """
+ return [], information # by default, keep file and do nothing
+
+ def try_utime(self, path, atime, mtime, errnote='Cannot update utime of file'):
+ try:
+ os.utime(encodeFilename(path), (atime, mtime))
+ except Exception:
+ self.report_warning(errnote)
+
+ def _configuration_args(self, exe, *args, **kwargs):
+ return _configuration_args(
+ self.pp_key(), self.get_param('postprocessor_args'), exe, *args, **kwargs)
+
+ def _hook_progress(self, status, info_dict):
+ if not self._progress_hooks:
+ return
+ status.update({
+ 'info_dict': info_dict,
+ 'postprocessor': self.pp_key(),
+ })
+ for ph in self._progress_hooks:
+ ph(status)
+
+ def add_progress_hook(self, ph):
+ # See YoutubeDl.py (search for postprocessor_hooks) for a description of this interface
+ self._progress_hooks.append(ph)
+
+ def report_progress(self, s):
+ s['_default_template'] = '%(postprocessor)s %(status)s' % s
+ if not self._downloader:
+ return
+
+ progress_dict = s.copy()
+ progress_dict.pop('info_dict')
+ progress_dict = {'info': s['info_dict'], 'progress': progress_dict}
+
+ progress_template = self.get_param('progress_template', {})
+ tmpl = progress_template.get('postprocess')
+ if tmpl:
+ self._downloader.to_screen(
+ self._downloader.evaluate_outtmpl(tmpl, progress_dict), quiet=False)
+
+ self._downloader.to_console_title(self._downloader.evaluate_outtmpl(
+ progress_template.get('postprocess-title') or 'yt-dlp %(progress._default_template)s',
+ progress_dict))
+
+ def _retry_download(self, err, count, retries):
+ # While this is not an extractor, it behaves similar to one and
+ # so obey extractor_retries and "--retry-sleep extractor"
+ RetryManager.report_retry(err, count, retries, info=self.to_screen, warn=self.report_warning,
+ sleep_func=self.get_param('retry_sleep_functions', {}).get('extractor'))
+
+ def _download_json(self, url, *, expected_http_errors=(404,)):
+ self.write_debug(f'{self.PP_NAME} query: {url}')
+ for retry in RetryManager(self.get_param('extractor_retries', 3), self._retry_download):
+ try:
+ rsp = self._downloader.urlopen(Request(url))
+ except network_exceptions as e:
+ if isinstance(e, HTTPError) and e.status in expected_http_errors:
+ return None
+ retry.error = PostProcessingError(f'Unable to communicate with {self.PP_NAME} API: {e}')
+ continue
+ return json.loads(rsp.read().decode(rsp.headers.get_param('charset') or 'utf-8'))
+
+
+class AudioConversionError(PostProcessingError): # Deprecated
+ pass
diff --git a/yt_dlp/postprocessor/embedthumbnail.py b/yt_dlp/postprocessor/embedthumbnail.py
new file mode 100644
index 0000000..9c53729
--- /dev/null
+++ b/yt_dlp/postprocessor/embedthumbnail.py
@@ -0,0 +1,227 @@
+import base64
+import os
+import re
+import subprocess
+
+from .common import PostProcessor
+from .ffmpeg import FFmpegPostProcessor, FFmpegThumbnailsConvertorPP
+from ..compat import imghdr
+from ..dependencies import mutagen
+from ..utils import (
+ Popen,
+ PostProcessingError,
+ check_executable,
+ encodeArgument,
+ encodeFilename,
+ error_to_compat_str,
+ prepend_extension,
+ shell_quote,
+)
+
+if mutagen:
+ from mutagen.flac import FLAC, Picture
+ from mutagen.mp4 import MP4, MP4Cover
+ from mutagen.oggopus import OggOpus
+ from mutagen.oggvorbis import OggVorbis
+
+
+class EmbedThumbnailPPError(PostProcessingError):
+ pass
+
+
+class EmbedThumbnailPP(FFmpegPostProcessor):
+
+ def __init__(self, downloader=None, already_have_thumbnail=False):
+ FFmpegPostProcessor.__init__(self, downloader)
+ self._already_have_thumbnail = already_have_thumbnail
+
+ def _get_thumbnail_resolution(self, filename, thumbnail_dict):
+ def guess():
+ width, height = thumbnail_dict.get('width'), thumbnail_dict.get('height')
+ if width and height:
+ return width, height
+
+ try:
+ size_regex = r',\s*(?P<w>\d+)x(?P<h>\d+)\s*[,\[]'
+ size_result = self.run_ffmpeg(filename, None, ['-hide_banner'], expected_retcodes=(1,))
+ mobj = re.search(size_regex, size_result)
+ if mobj is None:
+ return guess()
+ except PostProcessingError as err:
+ self.report_warning('unable to find the thumbnail resolution; %s' % error_to_compat_str(err))
+ return guess()
+ return int(mobj.group('w')), int(mobj.group('h'))
+
+ def _report_run(self, exe, filename):
+ self.to_screen(f'{exe}: Adding thumbnail to "{filename}"')
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ filename = info['filepath']
+ temp_filename = prepend_extension(filename, 'temp')
+
+ if not info.get('thumbnails'):
+ self.to_screen('There aren\'t any thumbnails to embed')
+ return [], info
+
+ idx = next((-i for i, t in enumerate(info['thumbnails'][::-1], 1) if t.get('filepath')), None)
+ if idx is None:
+ self.to_screen('There are no thumbnails on disk')
+ return [], info
+ thumbnail_filename = info['thumbnails'][idx]['filepath']
+ if not os.path.exists(encodeFilename(thumbnail_filename)):
+ self.report_warning('Skipping embedding the thumbnail because the file is missing.')
+ return [], info
+
+ # Correct extension for WebP file with wrong extension (see #25687, #25717)
+ convertor = FFmpegThumbnailsConvertorPP(self._downloader)
+ convertor.fixup_webp(info, idx)
+
+ original_thumbnail = thumbnail_filename = info['thumbnails'][idx]['filepath']
+
+ # Convert unsupported thumbnail formats (see #25687, #25717)
+ # PNG is preferred since JPEG is lossy
+ thumbnail_ext = os.path.splitext(thumbnail_filename)[1][1:]
+ if info['ext'] not in ('mkv', 'mka') and thumbnail_ext not in ('jpg', 'jpeg', 'png'):
+ thumbnail_filename = convertor.convert_thumbnail(thumbnail_filename, 'png')
+ thumbnail_ext = 'png'
+
+ mtime = os.stat(encodeFilename(filename)).st_mtime
+
+ success = True
+ if info['ext'] == 'mp3':
+ options = [
+ '-c', 'copy', '-map', '0:0', '-map', '1:0', '-write_id3v1', '1', '-id3v2_version', '3',
+ '-metadata:s:v', 'title="Album cover"', '-metadata:s:v', 'comment=Cover (front)']
+
+ self._report_run('ffmpeg', filename)
+ self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
+
+ elif info['ext'] in ['mkv', 'mka']:
+ options = list(self.stream_copy_opts())
+
+ mimetype = f'image/{thumbnail_ext.replace("jpg", "jpeg")}'
+ old_stream, new_stream = self.get_stream_number(
+ filename, ('tags', 'mimetype'), mimetype)
+ if old_stream is not None:
+ options.extend(['-map', '-0:%d' % old_stream])
+ new_stream -= 1
+ options.extend([
+ '-attach', self._ffmpeg_filename_argument(thumbnail_filename),
+ '-metadata:s:%d' % new_stream, 'mimetype=%s' % mimetype,
+ '-metadata:s:%d' % new_stream, 'filename=cover.%s' % thumbnail_ext])
+
+ self._report_run('ffmpeg', filename)
+ self.run_ffmpeg(filename, temp_filename, options)
+
+ elif info['ext'] in ['m4a', 'mp4', 'm4v', 'mov']:
+ prefer_atomicparsley = 'embed-thumbnail-atomicparsley' in self.get_param('compat_opts', [])
+ # Method 1: Use mutagen
+ if not mutagen or prefer_atomicparsley:
+ success = False
+ else:
+ try:
+ self._report_run('mutagen', filename)
+ meta = MP4(filename)
+ # NOTE: the 'covr' atom is a non-standard MPEG-4 atom,
+ # Apple iTunes 'M4A' files include the 'moov.udta.meta.ilst' atom.
+ f = {'jpeg': MP4Cover.FORMAT_JPEG, 'png': MP4Cover.FORMAT_PNG}[imghdr.what(thumbnail_filename)]
+ with open(thumbnail_filename, 'rb') as thumbfile:
+ thumb_data = thumbfile.read()
+ meta.tags['covr'] = [MP4Cover(data=thumb_data, imageformat=f)]
+ meta.save()
+ temp_filename = filename
+ except Exception as err:
+ self.report_warning('unable to embed using mutagen; %s' % error_to_compat_str(err))
+ success = False
+
+ # Method 2: Use AtomicParsley
+ if not success:
+ success = True
+ atomicparsley = next((
+ # libatomicparsley.so : See https://github.com/xibr/ytdlp-lazy/issues/1
+ x for x in ['AtomicParsley', 'atomicparsley', 'libatomicparsley.so']
+ if check_executable(x, ['-v'])), None)
+ if atomicparsley is None:
+ self.to_screen('Neither mutagen nor AtomicParsley was found. Falling back to ffmpeg')
+ success = False
+ else:
+ if not prefer_atomicparsley:
+ self.to_screen('mutagen was not found. Falling back to AtomicParsley')
+ cmd = [encodeFilename(atomicparsley, True),
+ encodeFilename(filename, True),
+ encodeArgument('--artwork'),
+ encodeFilename(thumbnail_filename, True),
+ encodeArgument('-o'),
+ encodeFilename(temp_filename, True)]
+ cmd += [encodeArgument(o) for o in self._configuration_args('AtomicParsley')]
+
+ self._report_run('atomicparsley', filename)
+ self.write_debug('AtomicParsley command line: %s' % shell_quote(cmd))
+ stdout, stderr, returncode = Popen.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if returncode:
+ self.report_warning(f'Unable to embed thumbnails using AtomicParsley; {stderr.strip()}')
+ # for formats that don't support thumbnails (like 3gp) AtomicParsley
+ # won't create to the temporary file
+ if 'No changes' in stdout:
+ self.report_warning('The file format doesn\'t support embedding a thumbnail')
+ success = False
+
+ # Method 3: Use ffmpeg+ffprobe
+ # Thumbnails attached using this method doesn't show up as cover in some cases
+ # See https://github.com/yt-dlp/yt-dlp/issues/2125, https://github.com/yt-dlp/yt-dlp/issues/411
+ if not success:
+ success = True
+ try:
+ options = [*self.stream_copy_opts(), '-map', '1']
+
+ old_stream, new_stream = self.get_stream_number(
+ filename, ('disposition', 'attached_pic'), 1)
+ if old_stream is not None:
+ options.extend(['-map', '-0:%d' % old_stream])
+ new_stream -= 1
+ options.extend(['-disposition:%s' % new_stream, 'attached_pic'])
+
+ self._report_run('ffmpeg', filename)
+ self.run_ffmpeg_multiple_files([filename, thumbnail_filename], temp_filename, options)
+ except PostProcessingError as err:
+ success = False
+ raise EmbedThumbnailPPError(f'Unable to embed using ffprobe & ffmpeg; {err}')
+
+ elif info['ext'] in ['ogg', 'opus', 'flac']:
+ if not mutagen:
+ raise EmbedThumbnailPPError('module mutagen was not found. Please install using `python3 -m pip install mutagen`')
+
+ self._report_run('mutagen', filename)
+ f = {'opus': OggOpus, 'flac': FLAC, 'ogg': OggVorbis}[info['ext']](filename)
+
+ pic = Picture()
+ pic.mime = 'image/%s' % imghdr.what(thumbnail_filename)
+ with open(thumbnail_filename, 'rb') as thumbfile:
+ pic.data = thumbfile.read()
+ pic.type = 3 # front cover
+ res = self._get_thumbnail_resolution(thumbnail_filename, info['thumbnails'][idx])
+ if res is not None:
+ pic.width, pic.height = res
+
+ if info['ext'] == 'flac':
+ f.add_picture(pic)
+ else:
+ # https://wiki.xiph.org/VorbisComment#METADATA_BLOCK_PICTURE
+ f['METADATA_BLOCK_PICTURE'] = base64.b64encode(pic.write()).decode('ascii')
+ f.save()
+ temp_filename = filename
+
+ else:
+ raise EmbedThumbnailPPError('Supported filetypes for thumbnail embedding are: mp3, mkv/mka, ogg/opus/flac, m4a/mp4/m4v/mov')
+
+ if success and temp_filename != filename:
+ os.replace(temp_filename, filename)
+
+ self.try_utime(filename, mtime, mtime)
+ converted = original_thumbnail != thumbnail_filename
+ self._delete_downloaded_files(
+ thumbnail_filename if converted or not self._already_have_thumbnail else None,
+ original_thumbnail if converted and not self._already_have_thumbnail else None,
+ info=info)
+ return [], info
diff --git a/yt_dlp/postprocessor/exec.py b/yt_dlp/postprocessor/exec.py
new file mode 100644
index 0000000..c2e73fb
--- /dev/null
+++ b/yt_dlp/postprocessor/exec.py
@@ -0,0 +1,41 @@
+from .common import PostProcessor
+from ..compat import compat_shlex_quote
+from ..utils import Popen, PostProcessingError, variadic
+
+
+class ExecPP(PostProcessor):
+
+ def __init__(self, downloader, exec_cmd):
+ PostProcessor.__init__(self, downloader)
+ self.exec_cmd = variadic(exec_cmd)
+
+ def parse_cmd(self, cmd, info):
+ tmpl, tmpl_dict = self._downloader.prepare_outtmpl(cmd, info)
+ if tmpl_dict: # if there are no replacements, tmpl_dict = {}
+ return self._downloader.escape_outtmpl(tmpl) % tmpl_dict
+
+ filepath = info.get('filepath', info.get('_filename'))
+ # If video, and no replacements are found, replace {} for backard compatibility
+ if filepath:
+ if '{}' not in cmd:
+ cmd += ' {}'
+ cmd = cmd.replace('{}', compat_shlex_quote(filepath))
+ return cmd
+
+ def run(self, info):
+ for tmpl in self.exec_cmd:
+ cmd = self.parse_cmd(tmpl, info)
+ self.to_screen(f'Executing command: {cmd}')
+ _, _, return_code = Popen.run(cmd, shell=True)
+ if return_code != 0:
+ raise PostProcessingError(f'Command returned error code {return_code}')
+ return [], info
+
+
+# Deprecated
+class ExecAfterDownloadPP(ExecPP):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.deprecation_warning(
+ 'yt_dlp.postprocessor.ExecAfterDownloadPP is deprecated '
+ 'and may be removed in a future version. Use yt_dlp.postprocessor.ExecPP instead')
diff --git a/yt_dlp/postprocessor/ffmpeg.py b/yt_dlp/postprocessor/ffmpeg.py
new file mode 100644
index 0000000..7d7f3f0
--- /dev/null
+++ b/yt_dlp/postprocessor/ffmpeg.py
@@ -0,0 +1,1192 @@
+import collections
+import contextvars
+import itertools
+import json
+import os
+import re
+import subprocess
+import time
+
+from .common import PostProcessor
+from ..compat import functools, imghdr
+from ..utils import (
+ MEDIA_EXTENSIONS,
+ ISO639Utils,
+ Popen,
+ PostProcessingError,
+ _get_exe_version_output,
+ deprecation_warning,
+ detect_exe_version,
+ determine_ext,
+ dfxp2srt,
+ encodeArgument,
+ encodeFilename,
+ filter_dict,
+ float_or_none,
+ is_outdated_version,
+ orderedSet,
+ prepend_extension,
+ replace_extension,
+ shell_quote,
+ traverse_obj,
+ variadic,
+ write_json_file,
+)
+
+EXT_TO_OUT_FORMATS = {
+ 'aac': 'adts',
+ 'flac': 'flac',
+ 'm4a': 'ipod',
+ 'mka': 'matroska',
+ 'mkv': 'matroska',
+ 'mpg': 'mpeg',
+ 'ogv': 'ogg',
+ 'ts': 'mpegts',
+ 'wma': 'asf',
+ 'wmv': 'asf',
+ 'weba': 'webm',
+ 'vtt': 'webvtt',
+}
+ACODECS = {
+ # name: (ext, encoder, opts)
+ 'mp3': ('mp3', 'libmp3lame', ()),
+ 'aac': ('m4a', 'aac', ('-f', 'adts')),
+ 'm4a': ('m4a', 'aac', ('-bsf:a', 'aac_adtstoasc')),
+ 'opus': ('opus', 'libopus', ()),
+ 'vorbis': ('ogg', 'libvorbis', ()),
+ 'flac': ('flac', 'flac', ()),
+ 'alac': ('m4a', None, ('-acodec', 'alac')),
+ 'wav': ('wav', None, ('-f', 'wav')),
+}
+
+
+def create_mapping_re(supported):
+ return re.compile(r'{0}(?:/{0})*$'.format(r'(?:\s*\w+\s*>)?\s*(?:%s)\s*' % '|'.join(supported)))
+
+
+def resolve_mapping(source, mapping):
+ """
+ Get corresponding item from a mapping string like 'A>B/C>D/E'
+ @returns (target, error_message)
+ """
+ for pair in mapping.lower().split('/'):
+ kv = pair.split('>', 1)
+ if len(kv) == 1 or kv[0].strip() == source:
+ target = kv[-1].strip()
+ if target == source:
+ return target, f'already is in target format {source}'
+ return target, None
+ return None, f'could not find a mapping for {source}'
+
+
+class FFmpegPostProcessorError(PostProcessingError):
+ pass
+
+
+class FFmpegPostProcessor(PostProcessor):
+ _ffmpeg_location = contextvars.ContextVar('ffmpeg_location', default=None)
+
+ def __init__(self, downloader=None):
+ PostProcessor.__init__(self, downloader)
+ self._prefer_ffmpeg = self.get_param('prefer_ffmpeg', True)
+ self._paths = self._determine_executables()
+
+ @staticmethod
+ def get_versions_and_features(downloader=None):
+ pp = FFmpegPostProcessor(downloader)
+ return pp._versions, pp._features
+
+ @staticmethod
+ def get_versions(downloader=None):
+ return FFmpegPostProcessor.get_versions_and_features(downloader)[0]
+
+ _ffmpeg_to_avconv = {'ffmpeg': 'avconv', 'ffprobe': 'avprobe'}
+
+ def _determine_executables(self):
+ programs = [*self._ffmpeg_to_avconv.keys(), *self._ffmpeg_to_avconv.values()]
+
+ location = self.get_param('ffmpeg_location', self._ffmpeg_location.get())
+ if location is None:
+ return {p: p for p in programs}
+
+ if not os.path.exists(location):
+ self.report_warning(
+ f'ffmpeg-location {location} does not exist! Continuing without ffmpeg', only_once=True)
+ return {}
+ elif os.path.isdir(location):
+ dirname, basename, filename = location, None, None
+ else:
+ filename = os.path.basename(location)
+ basename = next((p for p in programs if p in filename), 'ffmpeg')
+ dirname = os.path.dirname(os.path.abspath(location))
+ if basename in self._ffmpeg_to_avconv.keys():
+ self._prefer_ffmpeg = True
+
+ paths = {p: os.path.join(dirname, p) for p in programs}
+ if basename and basename in filename:
+ for p in programs:
+ path = os.path.join(dirname, filename.replace(basename, p))
+ if os.path.exists(path):
+ paths[p] = path
+ if basename:
+ paths[basename] = location
+ return paths
+
+ _version_cache, _features_cache = {None: None}, {}
+
+ def _get_ffmpeg_version(self, prog):
+ path = self._paths.get(prog)
+ if path in self._version_cache:
+ return self._version_cache[path], self._features_cache.get(path, {})
+ out = _get_exe_version_output(path, ['-bsfs'])
+ ver = detect_exe_version(out) if out else False
+ if ver:
+ regexs = [
+ r'(?:\d+:)?([0-9.]+)-[0-9]+ubuntu[0-9.]+$', # Ubuntu, see [1]
+ r'n([0-9.]+)$', # Arch Linux
+ # 1. http://www.ducea.com/2006/06/17/ubuntu-package-version-naming-explanation/
+ ]
+ for regex in regexs:
+ mobj = re.match(regex, ver)
+ if mobj:
+ ver = mobj.group(1)
+ self._version_cache[path] = ver
+ if prog != 'ffmpeg' or not out:
+ return ver, {}
+
+ mobj = re.search(r'(?m)^\s+libavformat\s+(?:[0-9. ]+)\s+/\s+(?P<runtime>[0-9. ]+)', out)
+ lavf_runtime_version = mobj.group('runtime').replace(' ', '') if mobj else None
+ self._features_cache[path] = features = {
+ 'fdk': '--enable-libfdk-aac' in out,
+ 'setts': 'setts' in out.splitlines(),
+ 'needs_adtstoasc': is_outdated_version(lavf_runtime_version, '57.56.100', False),
+ }
+ return ver, features
+
+ @property
+ def _versions(self):
+ return filter_dict({self.basename: self._version, self.probe_basename: self._probe_version})
+
+ @functools.cached_property
+ def basename(self):
+ self._version # run property
+ return self.basename
+
+ @functools.cached_property
+ def probe_basename(self):
+ self._probe_version # run property
+ return self.probe_basename
+
+ def _get_version(self, kind):
+ executables = (kind, )
+ if not self._prefer_ffmpeg:
+ executables = (kind, self._ffmpeg_to_avconv[kind])
+ basename, version, features = next(filter(
+ lambda x: x[1], ((p, *self._get_ffmpeg_version(p)) for p in executables)), (None, None, {}))
+ if kind == 'ffmpeg':
+ self.basename, self._features = basename, features
+ else:
+ self.probe_basename = basename
+ if basename == self._ffmpeg_to_avconv[kind]:
+ self.deprecated_feature(f'Support for {self._ffmpeg_to_avconv[kind]} is deprecated and '
+ f'may be removed in a future version. Use {kind} instead')
+ return version
+
+ @functools.cached_property
+ def _version(self):
+ return self._get_version('ffmpeg')
+
+ @functools.cached_property
+ def _probe_version(self):
+ return self._get_version('ffprobe')
+
+ @property
+ def available(self):
+ return self.basename is not None
+
+ @property
+ def executable(self):
+ return self._paths.get(self.basename)
+
+ @property
+ def probe_available(self):
+ return self.probe_basename is not None
+
+ @property
+ def probe_executable(self):
+ return self._paths.get(self.probe_basename)
+
+ @staticmethod
+ def stream_copy_opts(copy=True, *, ext=None):
+ yield from ('-map', '0')
+ # Don't copy Apple TV chapters track, bin_data
+ # See https://github.com/yt-dlp/yt-dlp/issues/2, #19042, #19024, https://trac.ffmpeg.org/ticket/6016
+ yield from ('-dn', '-ignore_unknown')
+ if copy:
+ yield from ('-c', 'copy')
+ if ext in ('mp4', 'mov', 'm4a'):
+ yield from ('-c:s', 'mov_text')
+
+ def check_version(self):
+ if not self.available:
+ raise FFmpegPostProcessorError('ffmpeg not found. Please install or provide the path using --ffmpeg-location')
+
+ required_version = '10-0' if self.basename == 'avconv' else '1.0'
+ if is_outdated_version(self._version, required_version):
+ self.report_warning(f'Your copy of {self.basename} is outdated, update {self.basename} '
+ f'to version {required_version} or newer if you encounter any errors')
+
+ def get_audio_codec(self, path):
+ if not self.probe_available and not self.available:
+ raise PostProcessingError('ffprobe and ffmpeg not found. Please install or provide the path using --ffmpeg-location')
+ try:
+ if self.probe_available:
+ cmd = [
+ encodeFilename(self.probe_executable, True),
+ encodeArgument('-show_streams')]
+ else:
+ cmd = [
+ encodeFilename(self.executable, True),
+ encodeArgument('-i')]
+ cmd.append(encodeFilename(self._ffmpeg_filename_argument(path), True))
+ self.write_debug(f'{self.basename} command line: {shell_quote(cmd)}')
+ stdout, stderr, returncode = Popen.run(
+ cmd, text=True, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
+ if returncode != (0 if self.probe_available else 1):
+ return None
+ except OSError:
+ return None
+ output = stdout if self.probe_available else stderr
+ if self.probe_available:
+ audio_codec = None
+ for line in output.split('\n'):
+ if line.startswith('codec_name='):
+ audio_codec = line.split('=')[1].strip()
+ elif line.strip() == 'codec_type=audio' and audio_codec is not None:
+ return audio_codec
+ else:
+ # Stream #FILE_INDEX:STREAM_INDEX[STREAM_ID](LANGUAGE): CODEC_TYPE: CODEC_NAME
+ mobj = re.search(
+ r'Stream\s*#\d+:\d+(?:\[0x[0-9a-f]+\])?(?:\([a-z]{3}\))?:\s*Audio:\s*([0-9a-z]+)',
+ output)
+ if mobj:
+ return mobj.group(1)
+ return None
+
+ def get_metadata_object(self, path, opts=[]):
+ if self.probe_basename != 'ffprobe':
+ if self.probe_available:
+ self.report_warning('Only ffprobe is supported for metadata extraction')
+ raise PostProcessingError('ffprobe not found. Please install or provide the path using --ffmpeg-location')
+ self.check_version()
+
+ cmd = [
+ encodeFilename(self.probe_executable, True),
+ encodeArgument('-hide_banner'),
+ encodeArgument('-show_format'),
+ encodeArgument('-show_streams'),
+ encodeArgument('-print_format'),
+ encodeArgument('json'),
+ ]
+
+ cmd += opts
+ cmd.append(self._ffmpeg_filename_argument(path))
+ self.write_debug(f'ffprobe command line: {shell_quote(cmd)}')
+ stdout, _, _ = Popen.run(cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
+ return json.loads(stdout)
+
+ def get_stream_number(self, path, keys, value):
+ streams = self.get_metadata_object(path)['streams']
+ num = next(
+ (i for i, stream in enumerate(streams) if traverse_obj(stream, keys, casesense=False) == value),
+ None)
+ return num, len(streams)
+
+ def _fixup_chapters(self, info):
+ last_chapter = traverse_obj(info, ('chapters', -1))
+ if last_chapter and not last_chapter.get('end_time'):
+ last_chapter['end_time'] = self._get_real_video_duration(info['filepath'])
+
+ def _get_real_video_duration(self, filepath, fatal=True):
+ try:
+ duration = float_or_none(
+ traverse_obj(self.get_metadata_object(filepath), ('format', 'duration')))
+ if not duration:
+ raise PostProcessingError('ffprobe returned empty duration')
+ return duration
+ except PostProcessingError as e:
+ if fatal:
+ raise PostProcessingError(f'Unable to determine video duration: {e.msg}')
+
+ def _duration_mismatch(self, d1, d2, tolerance=2):
+ if not d1 or not d2:
+ return None
+ # The duration is often only known to nearest second. So there can be <1sec disparity natually.
+ # Further excuse an additional <1sec difference.
+ return abs(d1 - d2) > tolerance
+
+ def run_ffmpeg_multiple_files(self, input_paths, out_path, opts, **kwargs):
+ return self.real_run_ffmpeg(
+ [(path, []) for path in input_paths],
+ [(out_path, opts)], **kwargs)
+
+ def real_run_ffmpeg(self, input_path_opts, output_path_opts, *, expected_retcodes=(0,)):
+ self.check_version()
+
+ oldest_mtime = min(
+ os.stat(encodeFilename(path)).st_mtime for path, _ in input_path_opts if path)
+
+ cmd = [encodeFilename(self.executable, True), encodeArgument('-y')]
+ # avconv does not have repeat option
+ if self.basename == 'ffmpeg':
+ cmd += [encodeArgument('-loglevel'), encodeArgument('repeat+info')]
+
+ def make_args(file, args, name, number):
+ keys = ['_%s%d' % (name, number), '_%s' % name]
+ if name == 'o':
+ args += ['-movflags', '+faststart']
+ if number == 1:
+ keys.append('')
+ args += self._configuration_args(self.basename, keys)
+ if name == 'i':
+ args.append('-i')
+ return (
+ [encodeArgument(arg) for arg in args]
+ + [encodeFilename(self._ffmpeg_filename_argument(file), True)])
+
+ for arg_type, path_opts in (('i', input_path_opts), ('o', output_path_opts)):
+ cmd += itertools.chain.from_iterable(
+ make_args(path, list(opts), arg_type, i + 1)
+ for i, (path, opts) in enumerate(path_opts) if path)
+
+ self.write_debug('ffmpeg command line: %s' % shell_quote(cmd))
+ _, stderr, returncode = Popen.run(
+ cmd, text=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, stdin=subprocess.PIPE)
+ if returncode not in variadic(expected_retcodes):
+ self.write_debug(stderr)
+ raise FFmpegPostProcessorError(stderr.strip().splitlines()[-1])
+ for out_path, _ in output_path_opts:
+ if out_path:
+ self.try_utime(out_path, oldest_mtime, oldest_mtime)
+ return stderr
+
+ def run_ffmpeg(self, path, out_path, opts, **kwargs):
+ return self.run_ffmpeg_multiple_files([path], out_path, opts, **kwargs)
+
+ @staticmethod
+ def _ffmpeg_filename_argument(fn):
+ # Always use 'file:' because the filename may contain ':' (ffmpeg
+ # interprets that as a protocol) or can start with '-' (-- is broken in
+ # ffmpeg, see https://ffmpeg.org/trac/ffmpeg/ticket/2127 for details)
+ # Also leave '-' intact in order not to break streaming to stdout.
+ if fn.startswith(('http://', 'https://')):
+ return fn
+ return 'file:' + fn if fn != '-' else fn
+
+ @staticmethod
+ def _quote_for_ffmpeg(string):
+ # See https://ffmpeg.org/ffmpeg-utils.html#toc-Quoting-and-escaping
+ # A sequence of '' produces '\'''\'';
+ # final replace removes the empty '' between \' \'.
+ string = string.replace("'", r"'\''").replace("'''", "'")
+ # Handle potential ' at string boundaries.
+ string = string[1:] if string[0] == "'" else "'" + string
+ return string[:-1] if string[-1] == "'" else string + "'"
+
+ def force_keyframes(self, filename, timestamps):
+ timestamps = orderedSet(timestamps)
+ if timestamps[0] == 0:
+ timestamps = timestamps[1:]
+ keyframe_file = prepend_extension(filename, 'keyframes.temp')
+ self.to_screen(f'Re-encoding "{filename}" with appropriate keyframes')
+ self.run_ffmpeg(filename, keyframe_file, [
+ *self.stream_copy_opts(False, ext=determine_ext(filename)),
+ '-force_key_frames', ','.join(f'{t:.6f}' for t in timestamps)])
+ return keyframe_file
+
+ def concat_files(self, in_files, out_file, concat_opts=None):
+ """
+ Use concat demuxer to concatenate multiple files having identical streams.
+
+ Only inpoint, outpoint, and duration concat options are supported.
+ See https://ffmpeg.org/ffmpeg-formats.html#concat-1 for details
+ """
+ concat_file = f'{out_file}.concat'
+ self.write_debug(f'Writing concat spec to {concat_file}')
+ with open(concat_file, 'w', encoding='utf-8') as f:
+ f.writelines(self._concat_spec(in_files, concat_opts))
+
+ out_flags = list(self.stream_copy_opts(ext=determine_ext(out_file)))
+
+ self.real_run_ffmpeg(
+ [(concat_file, ['-hide_banner', '-nostdin', '-f', 'concat', '-safe', '0'])],
+ [(out_file, out_flags)])
+ self._delete_downloaded_files(concat_file)
+
+ @classmethod
+ def _concat_spec(cls, in_files, concat_opts=None):
+ if concat_opts is None:
+ concat_opts = [{}] * len(in_files)
+ yield 'ffconcat version 1.0\n'
+ for file, opts in zip(in_files, concat_opts):
+ yield f'file {cls._quote_for_ffmpeg(cls._ffmpeg_filename_argument(file))}\n'
+ # Iterate explicitly to yield the following directives in order, ignoring the rest.
+ for directive in 'inpoint', 'outpoint', 'duration':
+ if directive in opts:
+ yield f'{directive} {opts[directive]}\n'
+
+
+class FFmpegExtractAudioPP(FFmpegPostProcessor):
+ COMMON_AUDIO_EXTS = MEDIA_EXTENSIONS.common_audio + ('wma', )
+ SUPPORTED_EXTS = tuple(ACODECS.keys())
+ FORMAT_RE = create_mapping_re(('best', *SUPPORTED_EXTS))
+
+ def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, nopostoverwrites=False):
+ FFmpegPostProcessor.__init__(self, downloader)
+ self.mapping = preferredcodec or 'best'
+ self._preferredquality = float_or_none(preferredquality)
+ self._nopostoverwrites = nopostoverwrites
+
+ def _quality_args(self, codec):
+ if self._preferredquality is None:
+ return []
+ elif self._preferredquality > 10:
+ return ['-b:a', f'{self._preferredquality}k']
+
+ limits = {
+ 'libmp3lame': (10, 0),
+ 'libvorbis': (0, 10),
+ # FFmpeg's AAC encoder does not have an upper limit for the value of -q:a.
+ # Experimentally, with values over 4, bitrate changes were minimal or non-existent
+ 'aac': (0.1, 4),
+ 'libfdk_aac': (1, 5),
+ }.get(codec)
+ if not limits:
+ return []
+
+ q = limits[1] + (limits[0] - limits[1]) * (self._preferredquality / 10)
+ if codec == 'libfdk_aac':
+ return ['-vbr', f'{int(q)}']
+ return ['-q:a', f'{q}']
+
+ def run_ffmpeg(self, path, out_path, codec, more_opts):
+ if codec is None:
+ acodec_opts = []
+ else:
+ acodec_opts = ['-acodec', codec]
+ opts = ['-vn'] + acodec_opts + more_opts
+ try:
+ FFmpegPostProcessor.run_ffmpeg(self, path, out_path, opts)
+ except FFmpegPostProcessorError as err:
+ raise PostProcessingError(f'audio conversion failed: {err.msg}')
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, information):
+ orig_path = path = information['filepath']
+ target_format, _skip_msg = resolve_mapping(information['ext'], self.mapping)
+ if target_format == 'best' and information['ext'] in self.COMMON_AUDIO_EXTS:
+ target_format, _skip_msg = None, 'the file is already in a common audio format'
+ if not target_format:
+ self.to_screen(f'Not converting audio {orig_path}; {_skip_msg}')
+ return [], information
+
+ filecodec = self.get_audio_codec(path)
+ if filecodec is None:
+ raise PostProcessingError('WARNING: unable to obtain file audio codec with ffprobe')
+
+ if filecodec == 'aac' and target_format in ('m4a', 'best'):
+ # Lossless, but in another container
+ extension, _, more_opts, acodec = *ACODECS['m4a'], 'copy'
+ elif target_format == 'best' or target_format == filecodec:
+ # Lossless if possible
+ try:
+ extension, _, more_opts, acodec = *ACODECS[filecodec], 'copy'
+ except KeyError:
+ extension, acodec, more_opts = ACODECS['mp3']
+ else:
+ # We convert the audio (lossy if codec is lossy)
+ extension, acodec, more_opts = ACODECS[target_format]
+ if acodec == 'aac' and self._features.get('fdk'):
+ acodec, more_opts = 'libfdk_aac', []
+
+ more_opts = list(more_opts)
+ if acodec != 'copy':
+ more_opts = self._quality_args(acodec)
+
+ temp_path = new_path = replace_extension(path, extension, information['ext'])
+
+ if new_path == path:
+ if acodec == 'copy':
+ self.to_screen(f'Not converting audio {orig_path}; file is already in target format {target_format}')
+ return [], information
+ orig_path = prepend_extension(path, 'orig')
+ temp_path = prepend_extension(path, 'temp')
+ if (self._nopostoverwrites and os.path.exists(encodeFilename(new_path))
+ and os.path.exists(encodeFilename(orig_path))):
+ self.to_screen('Post-process file %s exists, skipping' % new_path)
+ return [], information
+
+ self.to_screen(f'Destination: {new_path}')
+ self.run_ffmpeg(path, temp_path, acodec, more_opts)
+
+ os.replace(path, orig_path)
+ os.replace(temp_path, new_path)
+ information['filepath'] = new_path
+ information['ext'] = extension
+
+ # Try to update the date time for extracted audio file.
+ if information.get('filetime') is not None:
+ self.try_utime(
+ new_path, time.time(), information['filetime'], errnote='Cannot update utime of audio file')
+
+ return [orig_path], information
+
+
+class FFmpegVideoConvertorPP(FFmpegPostProcessor):
+ SUPPORTED_EXTS = (
+ *sorted((*MEDIA_EXTENSIONS.common_video, 'gif')),
+ *sorted((*MEDIA_EXTENSIONS.common_audio, 'aac', 'vorbis')),
+ )
+ FORMAT_RE = create_mapping_re(SUPPORTED_EXTS)
+ _ACTION = 'converting'
+
+ def __init__(self, downloader=None, preferedformat=None):
+ super().__init__(downloader)
+ self.mapping = preferedformat
+
+ @staticmethod
+ def _options(target_ext):
+ yield from FFmpegPostProcessor.stream_copy_opts(False)
+ if target_ext == 'avi':
+ yield from ('-c:v', 'libxvid', '-vtag', 'XVID')
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ filename, source_ext = info['filepath'], info['ext'].lower()
+ target_ext, _skip_msg = resolve_mapping(source_ext, self.mapping)
+ if _skip_msg:
+ self.to_screen(f'Not {self._ACTION} media file "{filename}"; {_skip_msg}')
+ return [], info
+
+ outpath = replace_extension(filename, target_ext, source_ext)
+ self.to_screen(f'{self._ACTION.title()} video from {source_ext} to {target_ext}; Destination: {outpath}')
+ self.run_ffmpeg(filename, outpath, self._options(target_ext))
+
+ info['filepath'] = outpath
+ info['format'] = info['ext'] = target_ext
+ return [filename], info
+
+
+class FFmpegVideoRemuxerPP(FFmpegVideoConvertorPP):
+ _ACTION = 'remuxing'
+
+ @staticmethod
+ def _options(target_ext):
+ return FFmpegPostProcessor.stream_copy_opts()
+
+
+class FFmpegEmbedSubtitlePP(FFmpegPostProcessor):
+ SUPPORTED_EXTS = ('mp4', 'mov', 'm4a', 'webm', 'mkv', 'mka')
+
+ def __init__(self, downloader=None, already_have_subtitle=False):
+ super().__init__(downloader)
+ self._already_have_subtitle = already_have_subtitle
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ if info['ext'] not in self.SUPPORTED_EXTS:
+ self.to_screen(f'Subtitles can only be embedded in {", ".join(self.SUPPORTED_EXTS)} files')
+ return [], info
+ subtitles = info.get('requested_subtitles')
+ if not subtitles:
+ self.to_screen('There aren\'t any subtitles to embed')
+ return [], info
+
+ filename = info['filepath']
+
+ # Disabled temporarily. There needs to be a way to override this
+ # in case of duration actually mismatching in extractor
+ # See: https://github.com/yt-dlp/yt-dlp/issues/1870, https://github.com/yt-dlp/yt-dlp/issues/1385
+ '''
+ if info.get('duration') and not info.get('__real_download') and self._duration_mismatch(
+ self._get_real_video_duration(filename, False), info['duration']):
+ self.to_screen(f'Skipping {self.pp_key()} since the real and expected durations mismatch')
+ return [], info
+ '''
+
+ ext = info['ext']
+ sub_langs, sub_names, sub_filenames = [], [], []
+ webm_vtt_warn = False
+ mp4_ass_warn = False
+
+ for lang, sub_info in subtitles.items():
+ if not os.path.exists(sub_info.get('filepath', '')):
+ self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
+ continue
+ sub_ext = sub_info['ext']
+ if sub_ext == 'json':
+ self.report_warning('JSON subtitles cannot be embedded')
+ elif ext != 'webm' or ext == 'webm' and sub_ext == 'vtt':
+ sub_langs.append(lang)
+ sub_names.append(sub_info.get('name'))
+ sub_filenames.append(sub_info['filepath'])
+ else:
+ if not webm_vtt_warn and ext == 'webm' and sub_ext != 'vtt':
+ webm_vtt_warn = True
+ self.report_warning('Only WebVTT subtitles can be embedded in webm files')
+ if not mp4_ass_warn and ext == 'mp4' and sub_ext == 'ass':
+ mp4_ass_warn = True
+ self.report_warning('ASS subtitles cannot be properly embedded in mp4 files; expect issues')
+
+ if not sub_langs:
+ return [], info
+
+ input_files = [filename] + sub_filenames
+
+ opts = [
+ *self.stream_copy_opts(ext=info['ext']),
+ # Don't copy the existing subtitles, we may be running the
+ # postprocessor a second time
+ '-map', '-0:s',
+ ]
+ for i, (lang, name) in enumerate(zip(sub_langs, sub_names)):
+ opts.extend(['-map', '%d:0' % (i + 1)])
+ lang_code = ISO639Utils.short2long(lang) or lang
+ opts.extend(['-metadata:s:s:%d' % i, 'language=%s' % lang_code])
+ if name:
+ opts.extend(['-metadata:s:s:%d' % i, 'handler_name=%s' % name,
+ '-metadata:s:s:%d' % i, 'title=%s' % name])
+
+ temp_filename = prepend_extension(filename, 'temp')
+ self.to_screen('Embedding subtitles in "%s"' % filename)
+ self.run_ffmpeg_multiple_files(input_files, temp_filename, opts)
+ os.replace(temp_filename, filename)
+
+ files_to_delete = [] if self._already_have_subtitle else sub_filenames
+ return files_to_delete, info
+
+
+class FFmpegMetadataPP(FFmpegPostProcessor):
+
+ def __init__(self, downloader, add_metadata=True, add_chapters=True, add_infojson='if_exists'):
+ FFmpegPostProcessor.__init__(self, downloader)
+ self._add_metadata = add_metadata
+ self._add_chapters = add_chapters
+ self._add_infojson = add_infojson
+
+ @staticmethod
+ def _options(target_ext):
+ audio_only = target_ext == 'm4a'
+ yield from FFmpegPostProcessor.stream_copy_opts(not audio_only)
+ if audio_only:
+ yield from ('-vn', '-acodec', 'copy')
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ self._fixup_chapters(info)
+ filename, metadata_filename = info['filepath'], None
+ files_to_delete, options = [], []
+ if self._add_chapters and info.get('chapters'):
+ metadata_filename = replace_extension(filename, 'meta')
+ options.extend(self._get_chapter_opts(info['chapters'], metadata_filename))
+ files_to_delete.append(metadata_filename)
+ if self._add_metadata:
+ options.extend(self._get_metadata_opts(info))
+
+ if self._add_infojson:
+ if info['ext'] in ('mkv', 'mka'):
+ infojson_filename = info.get('infojson_filename')
+ options.extend(self._get_infojson_opts(info, infojson_filename))
+ if not infojson_filename:
+ files_to_delete.append(info.get('infojson_filename'))
+ elif self._add_infojson is True:
+ self.to_screen('The info-json can only be attached to mkv/mka files')
+
+ if not options:
+ self.to_screen('There isn\'t any metadata to add')
+ return [], info
+
+ temp_filename = prepend_extension(filename, 'temp')
+ self.to_screen('Adding metadata to "%s"' % filename)
+ self.run_ffmpeg_multiple_files(
+ (filename, metadata_filename), temp_filename,
+ itertools.chain(self._options(info['ext']), *options))
+ self._delete_downloaded_files(*files_to_delete)
+ os.replace(temp_filename, filename)
+ return [], info
+
+ @staticmethod
+ def _get_chapter_opts(chapters, metadata_filename):
+ with open(metadata_filename, 'w', encoding='utf-8') as f:
+ def ffmpeg_escape(text):
+ return re.sub(r'([\\=;#\n])', r'\\\1', text)
+
+ metadata_file_content = ';FFMETADATA1\n'
+ for chapter in chapters:
+ metadata_file_content += '[CHAPTER]\nTIMEBASE=1/1000\n'
+ metadata_file_content += 'START=%d\n' % (chapter['start_time'] * 1000)
+ metadata_file_content += 'END=%d\n' % (chapter['end_time'] * 1000)
+ chapter_title = chapter.get('title')
+ if chapter_title:
+ metadata_file_content += 'title=%s\n' % ffmpeg_escape(chapter_title)
+ f.write(metadata_file_content)
+ yield ('-map_metadata', '1')
+
+ def _get_metadata_opts(self, info):
+ meta_prefix = 'meta'
+ metadata = collections.defaultdict(dict)
+
+ def add(meta_list, info_list=None):
+ value = next((
+ info[key] for key in [f'{meta_prefix}_'] + list(variadic(info_list or meta_list))
+ if info.get(key) is not None), None)
+ if value not in ('', None):
+ value = ', '.join(map(str, variadic(value)))
+ value = value.replace('\0', '') # nul character cannot be passed in command line
+ metadata['common'].update({meta_f: value for meta_f in variadic(meta_list)})
+
+ # Info on media metadata/metadata supported by ffmpeg:
+ # https://wiki.multimedia.cx/index.php/FFmpeg_Metadata
+ # https://kdenlive.org/en/project/adding-meta-data-to-mp4-video/
+ # https://kodi.wiki/view/Video_file_tagging
+
+ add('title', ('track', 'title'))
+ add('date', 'upload_date')
+ add(('description', 'synopsis'), 'description')
+ add(('purl', 'comment'), 'webpage_url')
+ add('track', 'track_number')
+ add('artist', ('artist', 'artists', 'creator', 'creators', 'uploader', 'uploader_id'))
+ add('composer', ('composer', 'composers'))
+ add('genre', ('genre', 'genres'))
+ add('album')
+ add('album_artist', ('album_artist', 'album_artists'))
+ add('disc', 'disc_number')
+ add('show', 'series')
+ add('season_number')
+ add('episode_id', ('episode', 'episode_id'))
+ add('episode_sort', 'episode_number')
+ if 'embed-metadata' in self.get_param('compat_opts', []):
+ add('comment', 'description')
+ metadata['common'].pop('synopsis', None)
+
+ meta_regex = rf'{re.escape(meta_prefix)}(?P<i>\d+)?_(?P<key>.+)'
+ for key, value in info.items():
+ mobj = re.fullmatch(meta_regex, key)
+ if value is not None and mobj:
+ metadata[mobj.group('i') or 'common'][mobj.group('key')] = value.replace('\0', '')
+
+ # Write id3v1 metadata also since Windows Explorer can't handle id3v2 tags
+ yield ('-write_id3v1', '1')
+
+ for name, value in metadata['common'].items():
+ yield ('-metadata', f'{name}={value}')
+
+ stream_idx = 0
+ for fmt in info.get('requested_formats') or [info]:
+ stream_count = 2 if 'none' not in (fmt.get('vcodec'), fmt.get('acodec')) else 1
+ lang = ISO639Utils.short2long(fmt.get('language') or '') or fmt.get('language')
+ for i in range(stream_idx, stream_idx + stream_count):
+ if lang:
+ metadata[str(i)].setdefault('language', lang)
+ for name, value in metadata[str(i)].items():
+ yield (f'-metadata:s:{i}', f'{name}={value}')
+ stream_idx += stream_count
+
+ def _get_infojson_opts(self, info, infofn):
+ if not infofn or not os.path.exists(infofn):
+ if self._add_infojson is not True:
+ return
+ infofn = infofn or '%s.temp' % (
+ self._downloader.prepare_filename(info, 'infojson')
+ or replace_extension(self._downloader.prepare_filename(info), 'info.json', info['ext']))
+ if not self._downloader._ensure_dir_exists(infofn):
+ return
+ self.write_debug(f'Writing info-json to: {infofn}')
+ write_json_file(self._downloader.sanitize_info(info, self.get_param('clean_infojson', True)), infofn)
+ info['infojson_filename'] = infofn
+
+ old_stream, new_stream = self.get_stream_number(info['filepath'], ('tags', 'mimetype'), 'application/json')
+ if old_stream is not None:
+ yield ('-map', '-0:%d' % old_stream)
+ new_stream -= 1
+
+ yield (
+ '-attach', self._ffmpeg_filename_argument(infofn),
+ f'-metadata:s:{new_stream}', 'mimetype=application/json',
+ f'-metadata:s:{new_stream}', 'filename=info.json',
+ )
+
+
+class FFmpegMergerPP(FFmpegPostProcessor):
+ SUPPORTED_EXTS = MEDIA_EXTENSIONS.common_video
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ filename = info['filepath']
+ temp_filename = prepend_extension(filename, 'temp')
+ args = ['-c', 'copy']
+ audio_streams = 0
+ for (i, fmt) in enumerate(info['requested_formats']):
+ if fmt.get('acodec') != 'none':
+ args.extend(['-map', f'{i}:a:0'])
+ aac_fixup = fmt['protocol'].startswith('m3u8') and self.get_audio_codec(fmt['filepath']) == 'aac'
+ if aac_fixup:
+ args.extend([f'-bsf:a:{audio_streams}', 'aac_adtstoasc'])
+ audio_streams += 1
+ if fmt.get('vcodec') != 'none':
+ args.extend(['-map', '%u:v:0' % (i)])
+ self.to_screen('Merging formats into "%s"' % filename)
+ self.run_ffmpeg_multiple_files(info['__files_to_merge'], temp_filename, args)
+ os.rename(encodeFilename(temp_filename), encodeFilename(filename))
+ return info['__files_to_merge'], info
+
+ def can_merge(self):
+ # TODO: figure out merge-capable ffmpeg version
+ if self.basename != 'avconv':
+ return True
+
+ required_version = '10-0'
+ if is_outdated_version(
+ self._versions[self.basename], required_version):
+ warning = ('Your copy of %s is outdated and unable to properly mux separate video and audio files, '
+ 'yt-dlp will download single file media. '
+ 'Update %s to version %s or newer to fix this.') % (
+ self.basename, self.basename, required_version)
+ self.report_warning(warning)
+ return False
+ return True
+
+
+class FFmpegFixupPostProcessor(FFmpegPostProcessor):
+ def _fixup(self, msg, filename, options):
+ temp_filename = prepend_extension(filename, 'temp')
+
+ self.to_screen(f'{msg} of "{filename}"')
+ self.run_ffmpeg(filename, temp_filename, options)
+
+ os.replace(temp_filename, filename)
+
+
+class FFmpegFixupStretchedPP(FFmpegFixupPostProcessor):
+ @PostProcessor._restrict_to(images=False, audio=False)
+ def run(self, info):
+ stretched_ratio = info.get('stretched_ratio')
+ if stretched_ratio not in (None, 1):
+ self._fixup('Fixing aspect ratio', info['filepath'], [
+ *self.stream_copy_opts(), '-aspect', '%f' % stretched_ratio])
+ return [], info
+
+
+class FFmpegFixupM4aPP(FFmpegFixupPostProcessor):
+ @PostProcessor._restrict_to(images=False, video=False)
+ def run(self, info):
+ if info.get('container') == 'm4a_dash':
+ self._fixup('Correcting container', info['filepath'], [*self.stream_copy_opts(), '-f', 'mp4'])
+ return [], info
+
+
+class FFmpegFixupM3u8PP(FFmpegFixupPostProcessor):
+ def _needs_fixup(self, info):
+ yield info['ext'] in ('mp4', 'm4a')
+ yield info['protocol'].startswith('m3u8')
+ try:
+ metadata = self.get_metadata_object(info['filepath'])
+ except PostProcessingError as e:
+ self.report_warning(f'Unable to extract metadata: {e.msg}')
+ yield True
+ else:
+ yield traverse_obj(metadata, ('format', 'format_name'), casesense=False) == 'mpegts'
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ if all(self._needs_fixup(info)):
+ args = ['-f', 'mp4']
+ if self.get_audio_codec(info['filepath']) == 'aac':
+ args.extend(['-bsf:a', 'aac_adtstoasc'])
+ self._fixup('Fixing MPEG-TS in MP4 container', info['filepath'], [
+ *self.stream_copy_opts(), *args])
+ return [], info
+
+
+class FFmpegFixupTimestampPP(FFmpegFixupPostProcessor):
+
+ def __init__(self, downloader=None, trim=0.001):
+ # "trim" should be used when the video contains unintended packets
+ super().__init__(downloader)
+ assert isinstance(trim, (int, float))
+ self.trim = str(trim)
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ if not self._features.get('setts'):
+ self.report_warning(
+ 'A re-encode is needed to fix timestamps in older versions of ffmpeg. '
+ 'Please install ffmpeg 4.4 or later to fixup without re-encoding')
+ opts = ['-vf', 'setpts=PTS-STARTPTS']
+ else:
+ opts = ['-c', 'copy', '-bsf', 'setts=ts=TS-STARTPTS']
+ self._fixup('Fixing frame timestamp', info['filepath'], opts + [*self.stream_copy_opts(False), '-ss', self.trim])
+ return [], info
+
+
+class FFmpegCopyStreamPP(FFmpegFixupPostProcessor):
+ MESSAGE = 'Copying stream'
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ self._fixup(self.MESSAGE, info['filepath'], self.stream_copy_opts())
+ return [], info
+
+
+class FFmpegFixupDurationPP(FFmpegCopyStreamPP):
+ MESSAGE = 'Fixing video duration'
+
+
+class FFmpegFixupDuplicateMoovPP(FFmpegCopyStreamPP):
+ MESSAGE = 'Fixing duplicate MOOV atoms'
+
+
+class FFmpegSubtitlesConvertorPP(FFmpegPostProcessor):
+ SUPPORTED_EXTS = MEDIA_EXTENSIONS.subtitles
+
+ def __init__(self, downloader=None, format=None):
+ super().__init__(downloader)
+ self.format = format
+
+ def run(self, info):
+ subs = info.get('requested_subtitles')
+ new_ext = self.format
+ new_format = new_ext
+ if new_format == 'vtt':
+ new_format = 'webvtt'
+ if subs is None:
+ self.to_screen('There aren\'t any subtitles to convert')
+ return [], info
+ self.to_screen('Converting subtitles')
+ sub_filenames = []
+ for lang, sub in subs.items():
+ if not os.path.exists(sub.get('filepath', '')):
+ self.report_warning(f'Skipping embedding {lang} subtitle because the file is missing')
+ continue
+ ext = sub['ext']
+ if ext == new_ext:
+ self.to_screen('Subtitle file for %s is already in the requested format' % new_ext)
+ continue
+ elif ext == 'json':
+ self.to_screen(
+ 'You have requested to convert json subtitles into another format, '
+ 'which is currently not possible')
+ continue
+ old_file = sub['filepath']
+ sub_filenames.append(old_file)
+ new_file = replace_extension(old_file, new_ext)
+
+ if ext in ('dfxp', 'ttml', 'tt'):
+ self.report_warning(
+ 'You have requested to convert dfxp (TTML) subtitles into another format, '
+ 'which results in style information loss')
+
+ dfxp_file = old_file
+ srt_file = replace_extension(old_file, 'srt')
+
+ with open(dfxp_file, 'rb') as f:
+ srt_data = dfxp2srt(f.read())
+
+ with open(srt_file, 'w', encoding='utf-8') as f:
+ f.write(srt_data)
+ old_file = srt_file
+
+ subs[lang] = {
+ 'ext': 'srt',
+ 'data': srt_data,
+ 'filepath': srt_file,
+ }
+
+ if new_ext == 'srt':
+ continue
+ else:
+ sub_filenames.append(srt_file)
+
+ self.run_ffmpeg(old_file, new_file, ['-f', new_format])
+
+ with open(new_file, encoding='utf-8') as f:
+ subs[lang] = {
+ 'ext': new_ext,
+ 'data': f.read(),
+ 'filepath': new_file,
+ }
+
+ info['__files_to_move'][new_file] = replace_extension(
+ info['__files_to_move'][sub['filepath']], new_ext)
+
+ return sub_filenames, info
+
+
+class FFmpegSplitChaptersPP(FFmpegPostProcessor):
+ def __init__(self, downloader, force_keyframes=False):
+ FFmpegPostProcessor.__init__(self, downloader)
+ self._force_keyframes = force_keyframes
+
+ def _prepare_filename(self, number, chapter, info):
+ info = info.copy()
+ info.update({
+ 'section_number': number,
+ 'section_title': chapter.get('title'),
+ 'section_start': chapter.get('start_time'),
+ 'section_end': chapter.get('end_time'),
+ })
+ return self._downloader.prepare_filename(info, 'chapter')
+
+ def _ffmpeg_args_for_chapter(self, number, chapter, info):
+ destination = self._prepare_filename(number, chapter, info)
+ if not self._downloader._ensure_dir_exists(encodeFilename(destination)):
+ return
+
+ chapter['filepath'] = destination
+ self.to_screen('Chapter %03d; Destination: %s' % (number, destination))
+ return (
+ destination,
+ ['-ss', str(chapter['start_time']),
+ '-t', str(chapter['end_time'] - chapter['start_time'])])
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ self._fixup_chapters(info)
+ chapters = info.get('chapters') or []
+ if not chapters:
+ self.to_screen('Chapter information is unavailable')
+ return [], info
+
+ in_file = info['filepath']
+ if self._force_keyframes and len(chapters) > 1:
+ in_file = self.force_keyframes(in_file, (c['start_time'] for c in chapters))
+ self.to_screen('Splitting video by chapters; %d chapters found' % len(chapters))
+ for idx, chapter in enumerate(chapters):
+ destination, opts = self._ffmpeg_args_for_chapter(idx + 1, chapter, info)
+ self.real_run_ffmpeg([(in_file, opts)], [(destination, self.stream_copy_opts())])
+ if in_file != info['filepath']:
+ self._delete_downloaded_files(in_file, msg=None)
+ return [], info
+
+
+class FFmpegThumbnailsConvertorPP(FFmpegPostProcessor):
+ SUPPORTED_EXTS = MEDIA_EXTENSIONS.thumbnails
+ FORMAT_RE = create_mapping_re(SUPPORTED_EXTS)
+
+ def __init__(self, downloader=None, format=None):
+ super().__init__(downloader)
+ self.mapping = format
+
+ @classmethod
+ def is_webp(cls, path):
+ deprecation_warning(f'{cls.__module__}.{cls.__name__}.is_webp is deprecated')
+ return imghdr.what(path) == 'webp'
+
+ def fixup_webp(self, info, idx=-1):
+ thumbnail_filename = info['thumbnails'][idx]['filepath']
+ _, thumbnail_ext = os.path.splitext(thumbnail_filename)
+ if thumbnail_ext:
+ if thumbnail_ext.lower() != '.webp' and imghdr.what(thumbnail_filename) == 'webp':
+ self.to_screen('Correcting thumbnail "%s" extension to webp' % thumbnail_filename)
+ webp_filename = replace_extension(thumbnail_filename, 'webp')
+ os.replace(thumbnail_filename, webp_filename)
+ info['thumbnails'][idx]['filepath'] = webp_filename
+ info['__files_to_move'][webp_filename] = replace_extension(
+ info['__files_to_move'].pop(thumbnail_filename), 'webp')
+
+ @staticmethod
+ def _options(target_ext):
+ yield from ('-update', '1')
+ if target_ext == 'jpg':
+ yield from ('-bsf:v', 'mjpeg2jpeg')
+
+ def convert_thumbnail(self, thumbnail_filename, target_ext):
+ thumbnail_conv_filename = replace_extension(thumbnail_filename, target_ext)
+
+ self.to_screen(f'Converting thumbnail "{thumbnail_filename}" to {target_ext}')
+ _, source_ext = os.path.splitext(thumbnail_filename)
+ self.real_run_ffmpeg(
+ [(thumbnail_filename, [] if source_ext == '.gif' else ['-f', 'image2', '-pattern_type', 'none'])],
+ [(thumbnail_conv_filename, self._options(target_ext))])
+ return thumbnail_conv_filename
+
+ def run(self, info):
+ files_to_delete = []
+ has_thumbnail = False
+
+ for idx, thumbnail_dict in enumerate(info.get('thumbnails') or []):
+ original_thumbnail = thumbnail_dict.get('filepath')
+ if not original_thumbnail:
+ continue
+ has_thumbnail = True
+ self.fixup_webp(info, idx)
+ original_thumbnail = thumbnail_dict['filepath'] # Path can change during fixup
+ thumbnail_ext = os.path.splitext(original_thumbnail)[1][1:].lower()
+ if thumbnail_ext == 'jpeg':
+ thumbnail_ext = 'jpg'
+ target_ext, _skip_msg = resolve_mapping(thumbnail_ext, self.mapping)
+ if _skip_msg:
+ self.to_screen(f'Not converting thumbnail "{original_thumbnail}"; {_skip_msg}')
+ continue
+ thumbnail_dict['filepath'] = self.convert_thumbnail(original_thumbnail, target_ext)
+ files_to_delete.append(original_thumbnail)
+ info['__files_to_move'][thumbnail_dict['filepath']] = replace_extension(
+ info['__files_to_move'][original_thumbnail], target_ext)
+
+ if not has_thumbnail:
+ self.to_screen('There aren\'t any thumbnails to convert')
+ return files_to_delete, info
+
+
+class FFmpegConcatPP(FFmpegPostProcessor):
+ def __init__(self, downloader, only_multi_video=False):
+ self._only_multi_video = only_multi_video
+ super().__init__(downloader)
+
+ def _get_codecs(self, file):
+ codecs = traverse_obj(self.get_metadata_object(file), ('streams', ..., 'codec_name'))
+ self.write_debug(f'Codecs = {", ".join(codecs)}')
+ return tuple(codecs)
+
+ def concat_files(self, in_files, out_file):
+ if not self._downloader._ensure_dir_exists(out_file):
+ return
+ if len(in_files) == 1:
+ if os.path.realpath(in_files[0]) != os.path.realpath(out_file):
+ self.to_screen(f'Moving "{in_files[0]}" to "{out_file}"')
+ os.replace(in_files[0], out_file)
+ return []
+
+ if len(set(map(self._get_codecs, in_files))) > 1:
+ raise PostProcessingError(
+ 'The files have different streams/codecs and cannot be concatenated. '
+ 'Either select different formats or --recode-video them to a common format')
+
+ self.to_screen(f'Concatenating {len(in_files)} files; Destination: {out_file}')
+ super().concat_files(in_files, out_file)
+ return in_files
+
+ @PostProcessor._restrict_to(images=False, simulated=False)
+ def run(self, info):
+ entries = info.get('entries') or []
+ if not any(entries) or (self._only_multi_video and info['_type'] != 'multi_video'):
+ return [], info
+ elif traverse_obj(entries, (..., lambda k, v: k == 'requested_downloads' and len(v) > 1)):
+ raise PostProcessingError('Concatenation is not supported when downloading multiple separate formats')
+
+ in_files = traverse_obj(entries, (..., 'requested_downloads', 0, 'filepath')) or []
+ if len(in_files) < len(entries):
+ raise PostProcessingError('Aborting concatenation because some downloads failed')
+
+ exts = traverse_obj(entries, (..., 'requested_downloads', 0, 'ext'), (..., 'ext'))
+ ie_copy = collections.ChainMap({'ext': exts[0] if len(set(exts)) == 1 else 'mkv'},
+ info, self._downloader._playlist_infodict(info))
+ out_file = self._downloader.prepare_filename(ie_copy, 'pl_video')
+
+ files_to_delete = self.concat_files(in_files, out_file)
+
+ info['requested_downloads'] = [{
+ 'filepath': out_file,
+ 'ext': ie_copy['ext'],
+ }]
+ return files_to_delete, info
diff --git a/yt_dlp/postprocessor/metadataparser.py b/yt_dlp/postprocessor/metadataparser.py
new file mode 100644
index 0000000..1d60542
--- /dev/null
+++ b/yt_dlp/postprocessor/metadataparser.py
@@ -0,0 +1,125 @@
+import re
+
+from .common import PostProcessor
+from ..utils import Namespace, filter_dict, function_with_repr
+
+
+class MetadataParserPP(PostProcessor):
+ def __init__(self, downloader, actions):
+ super().__init__(downloader)
+ self._actions = []
+ for f in actions:
+ action, *args = f
+ assert action in self.Actions
+ self._actions.append(action(self, *args))
+
+ @classmethod
+ def validate_action(cls, action, *data):
+ """Each action can be:
+ (Actions.INTERPRET, from, to) OR
+ (Actions.REPLACE, field, search, replace)
+ """
+ if action not in cls.Actions:
+ raise ValueError(f'{action!r} is not a valid action')
+ action(cls, *data) # So this can raise error to validate
+
+ @staticmethod
+ def field_to_template(tmpl):
+ if re.match(r'[a-zA-Z_]+$', tmpl):
+ return f'%({tmpl})s'
+
+ from ..YoutubeDL import YoutubeDL
+ err = YoutubeDL.validate_outtmpl(tmpl)
+ if err:
+ raise err
+ return tmpl
+
+ @staticmethod
+ def format_to_regex(fmt):
+ r"""
+ Converts a string like
+ '%(title)s - %(artist)s'
+ to a regex like
+ '(?P<title>.+)\ \-\ (?P<artist>.+)'
+ """
+ if not re.search(r'%\(\w+\)s', fmt):
+ return fmt
+ lastpos = 0
+ regex = ''
+ # replace %(..)s with regex group and escape other string parts
+ for match in re.finditer(r'%\((\w+)\)s', fmt):
+ regex += re.escape(fmt[lastpos:match.start()])
+ regex += rf'(?P<{match.group(1)}>.+)'
+ lastpos = match.end()
+ if lastpos < len(fmt):
+ regex += re.escape(fmt[lastpos:])
+ return regex
+
+ def run(self, info):
+ for f in self._actions:
+ f(info)
+ return [], info
+
+ @function_with_repr
+ def interpretter(self, inp, out):
+ def f(info):
+ data_to_parse = self._downloader.evaluate_outtmpl(template, info)
+ self.write_debug(f'Searching for {out_re.pattern!r} in {template!r}')
+ match = out_re.search(data_to_parse)
+ if match is None:
+ self.to_screen(f'Could not interpret {inp!r} as {out!r}')
+ return
+ for attribute, value in filter_dict(match.groupdict()).items():
+ info[attribute] = value
+ self.to_screen(f'Parsed {attribute} from {template!r}: {value!r}')
+
+ template = self.field_to_template(inp)
+ out_re = re.compile(self.format_to_regex(out))
+ return f
+
+ @function_with_repr
+ def replacer(self, field, search, replace):
+ def f(info):
+ val = info.get(field)
+ if val is None:
+ self.to_screen(f'Video does not have a {field}')
+ return
+ elif not isinstance(val, str):
+ self.report_warning(f'Cannot replace in field {field} since it is a {type(val).__name__}')
+ return
+ self.write_debug(f'Replacing all {search!r} in {field} with {replace!r}')
+ info[field], n = search_re.subn(replace, val)
+ if n:
+ self.to_screen(f'Changed {field} to: {info[field]}')
+ else:
+ self.to_screen(f'Did not find {search!r} in {field}')
+
+ search_re = re.compile(search)
+ return f
+
+ Actions = Namespace(INTERPRET=interpretter, REPLACE=replacer)
+
+
+class MetadataFromFieldPP(MetadataParserPP):
+ @classmethod
+ def to_action(cls, f):
+ match = re.match(r'(?s)(?P<in>.*?)(?<!\\):(?P<out>.+)$', f)
+ if match is None:
+ raise ValueError(f'it should be FROM:TO, not {f!r}')
+ return (
+ cls.Actions.INTERPRET,
+ match.group('in').replace('\\:', ':'),
+ match.group('out'),
+ )
+
+ def __init__(self, downloader, formats):
+ super().__init__(downloader, [self.to_action(f) for f in formats])
+
+
+# Deprecated
+class MetadataFromTitlePP(MetadataParserPP):
+ def __init__(self, downloader, titleformat):
+ super().__init__(downloader, [(self.Actions.INTERPRET, 'title', titleformat)])
+ self.deprecation_warning(
+ 'yt_dlp.postprocessor.MetadataFromTitlePP is deprecated '
+ 'and may be removed in a future version. Use yt_dlp.postprocessor.MetadataFromFieldPP instead')
diff --git a/yt_dlp/postprocessor/modify_chapters.py b/yt_dlp/postprocessor/modify_chapters.py
new file mode 100644
index 0000000..f521986
--- /dev/null
+++ b/yt_dlp/postprocessor/modify_chapters.py
@@ -0,0 +1,336 @@
+import copy
+import heapq
+import os
+
+from .common import PostProcessor
+from .ffmpeg import FFmpegPostProcessor, FFmpegSubtitlesConvertorPP
+from .sponsorblock import SponsorBlockPP
+from ..utils import PostProcessingError, orderedSet, prepend_extension
+
+_TINY_CHAPTER_DURATION = 1
+DEFAULT_SPONSORBLOCK_CHAPTER_TITLE = '[SponsorBlock]: %(category_names)l'
+
+
+class ModifyChaptersPP(FFmpegPostProcessor):
+ def __init__(self, downloader, remove_chapters_patterns=None, remove_sponsor_segments=None, remove_ranges=None,
+ *, sponsorblock_chapter_title=DEFAULT_SPONSORBLOCK_CHAPTER_TITLE, force_keyframes=False):
+ FFmpegPostProcessor.__init__(self, downloader)
+ self._remove_chapters_patterns = set(remove_chapters_patterns or [])
+ self._remove_sponsor_segments = set(remove_sponsor_segments or []) - set(SponsorBlockPP.NON_SKIPPABLE_CATEGORIES.keys())
+ self._ranges_to_remove = set(remove_ranges or [])
+ self._sponsorblock_chapter_title = sponsorblock_chapter_title
+ self._force_keyframes = force_keyframes
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, info):
+ self._fixup_chapters(info)
+ # Chapters must be preserved intact when downloading multiple formats of the same video.
+ chapters, sponsor_chapters = self._mark_chapters_to_remove(
+ copy.deepcopy(info.get('chapters')) or [],
+ copy.deepcopy(info.get('sponsorblock_chapters')) or [])
+ if not chapters and not sponsor_chapters:
+ return [], info
+
+ real_duration = self._get_real_video_duration(info['filepath'])
+ if not chapters:
+ chapters = [{'start_time': 0, 'end_time': info.get('duration') or real_duration, 'title': info['title']}]
+
+ info['chapters'], cuts = self._remove_marked_arrange_sponsors(chapters + sponsor_chapters)
+ if not cuts:
+ return [], info
+ elif not info['chapters']:
+ self.report_warning('You have requested to remove the entire video, which is not possible')
+ return [], info
+
+ original_duration, info['duration'] = info.get('duration'), info['chapters'][-1]['end_time']
+ if self._duration_mismatch(real_duration, original_duration, 1):
+ if not self._duration_mismatch(real_duration, info['duration']):
+ self.to_screen(f'Skipping {self.pp_key()} since the video appears to be already cut')
+ return [], info
+ if not info.get('__real_download'):
+ raise PostProcessingError('Cannot cut video since the real and expected durations mismatch. '
+ 'Different chapters may have already been removed')
+ else:
+ self.write_debug('Expected and actual durations mismatch')
+
+ concat_opts = self._make_concat_opts(cuts, real_duration)
+ self.write_debug('Concat spec = %s' % ', '.join(f'{c.get("inpoint", 0.0)}-{c.get("outpoint", "inf")}' for c in concat_opts))
+
+ def remove_chapters(file, is_sub):
+ return file, self.remove_chapters(file, cuts, concat_opts, self._force_keyframes and not is_sub)
+
+ in_out_files = [remove_chapters(info['filepath'], False)]
+ in_out_files.extend(remove_chapters(in_file, True) for in_file in self._get_supported_subs(info))
+
+ # Renaming should only happen after all files are processed
+ files_to_remove = []
+ for in_file, out_file in in_out_files:
+ mtime = os.stat(in_file).st_mtime
+ uncut_file = prepend_extension(in_file, 'uncut')
+ os.replace(in_file, uncut_file)
+ os.replace(out_file, in_file)
+ self.try_utime(in_file, mtime, mtime)
+ files_to_remove.append(uncut_file)
+
+ return files_to_remove, info
+
+ def _mark_chapters_to_remove(self, chapters, sponsor_chapters):
+ if self._remove_chapters_patterns:
+ warn_no_chapter_to_remove = True
+ if not chapters:
+ self.to_screen('Chapter information is unavailable')
+ warn_no_chapter_to_remove = False
+ for c in chapters:
+ if any(regex.search(c['title']) for regex in self._remove_chapters_patterns):
+ c['remove'] = True
+ warn_no_chapter_to_remove = False
+ if warn_no_chapter_to_remove:
+ self.to_screen('There are no chapters matching the regex')
+
+ if self._remove_sponsor_segments:
+ warn_no_chapter_to_remove = True
+ if not sponsor_chapters:
+ self.to_screen('SponsorBlock information is unavailable')
+ warn_no_chapter_to_remove = False
+ for c in sponsor_chapters:
+ if c['category'] in self._remove_sponsor_segments:
+ c['remove'] = True
+ warn_no_chapter_to_remove = False
+ if warn_no_chapter_to_remove:
+ self.to_screen('There are no matching SponsorBlock chapters')
+
+ sponsor_chapters.extend({
+ 'start_time': start,
+ 'end_time': end,
+ 'category': 'manually_removed',
+ '_categories': [('manually_removed', start, end, 'Manually removed')],
+ 'remove': True,
+ } for start, end in self._ranges_to_remove)
+
+ return chapters, sponsor_chapters
+
+ def _get_supported_subs(self, info):
+ for sub in (info.get('requested_subtitles') or {}).values():
+ sub_file = sub.get('filepath')
+ # The file might have been removed by --embed-subs
+ if not sub_file or not os.path.exists(sub_file):
+ continue
+ ext = sub['ext']
+ if ext not in FFmpegSubtitlesConvertorPP.SUPPORTED_EXTS:
+ self.report_warning(f'Cannot remove chapters from external {ext} subtitles; "{sub_file}" is now out of sync')
+ continue
+ # TODO: create __real_download for subs?
+ yield sub_file
+
+ def _remove_marked_arrange_sponsors(self, chapters):
+ # Store cuts separately, since adjacent and overlapping cuts must be merged.
+ cuts = []
+
+ def append_cut(c):
+ assert 'remove' in c, 'Not a cut is appended to cuts'
+ last_to_cut = cuts[-1] if cuts else None
+ if last_to_cut and last_to_cut['end_time'] >= c['start_time']:
+ last_to_cut['end_time'] = max(last_to_cut['end_time'], c['end_time'])
+ else:
+ cuts.append(c)
+ return len(cuts) - 1
+
+ def excess_duration(c):
+ # Cuts that are completely within the chapter reduce chapters' duration.
+ # Since cuts can overlap, excess duration may be less that the sum of cuts' durations.
+ # To avoid that, chapter stores the index to the fist cut within the chapter,
+ # instead of storing excess duration. append_cut ensures that subsequent cuts (if any)
+ # will be merged with previous ones (if necessary).
+ cut_idx, excess = c.pop('cut_idx', len(cuts)), 0
+ while cut_idx < len(cuts):
+ cut = cuts[cut_idx]
+ if cut['start_time'] >= c['end_time']:
+ break
+ if cut['end_time'] > c['start_time']:
+ excess += min(cut['end_time'], c['end_time'])
+ excess -= max(cut['start_time'], c['start_time'])
+ cut_idx += 1
+ return excess
+
+ new_chapters = []
+
+ def append_chapter(c):
+ assert 'remove' not in c, 'Cut is appended to chapters'
+ length = c['end_time'] - c['start_time'] - excess_duration(c)
+ # Chapter is completely covered by cuts or sponsors.
+ if length <= 0:
+ return
+ start = new_chapters[-1]['end_time'] if new_chapters else 0
+ c.update(start_time=start, end_time=start + length)
+ new_chapters.append(c)
+
+ # Turn into a priority queue, index is a tie breaker.
+ # Plain stack sorted by start_time is not enough: after splitting the chapter,
+ # the part returned to the stack is not guaranteed to have start_time
+ # less than or equal to the that of the stack's head.
+ chapters = [(c['start_time'], i, c) for i, c in enumerate(chapters)]
+ heapq.heapify(chapters)
+
+ _, cur_i, cur_chapter = heapq.heappop(chapters)
+ while chapters:
+ _, i, c = heapq.heappop(chapters)
+ # Non-overlapping chapters or cuts can be appended directly. However,
+ # adjacent non-overlapping cuts must be merged, which is handled by append_cut.
+ if cur_chapter['end_time'] <= c['start_time']:
+ (append_chapter if 'remove' not in cur_chapter else append_cut)(cur_chapter)
+ cur_i, cur_chapter = i, c
+ continue
+
+ # Eight possibilities for overlapping chapters: (cut, cut), (cut, sponsor),
+ # (cut, normal), (sponsor, cut), (normal, cut), (sponsor, sponsor),
+ # (sponsor, normal), and (normal, sponsor). There is no (normal, normal):
+ # normal chapters are assumed not to overlap.
+ if 'remove' in cur_chapter:
+ # (cut, cut): adjust end_time.
+ if 'remove' in c:
+ cur_chapter['end_time'] = max(cur_chapter['end_time'], c['end_time'])
+ # (cut, sponsor/normal): chop the beginning of the later chapter
+ # (if it's not completely hidden by the cut). Push to the priority queue
+ # to restore sorting by start_time: with beginning chopped, c may actually
+ # start later than the remaining chapters from the queue.
+ elif cur_chapter['end_time'] < c['end_time']:
+ c['start_time'] = cur_chapter['end_time']
+ c['_was_cut'] = True
+ heapq.heappush(chapters, (c['start_time'], i, c))
+ # (sponsor/normal, cut).
+ elif 'remove' in c:
+ cur_chapter['_was_cut'] = True
+ # Chop the end of the current chapter if the cut is not contained within it.
+ # Chopping the end doesn't break start_time sorting, no PQ push is necessary.
+ if cur_chapter['end_time'] <= c['end_time']:
+ cur_chapter['end_time'] = c['start_time']
+ append_chapter(cur_chapter)
+ cur_i, cur_chapter = i, c
+ continue
+ # Current chapter contains the cut within it. If the current chapter is
+ # a sponsor chapter, check whether the categories before and after the cut differ.
+ if '_categories' in cur_chapter:
+ after_c = dict(cur_chapter, start_time=c['end_time'], _categories=[])
+ cur_cats = []
+ for cat_start_end in cur_chapter['_categories']:
+ if cat_start_end[1] < c['start_time']:
+ cur_cats.append(cat_start_end)
+ if cat_start_end[2] > c['end_time']:
+ after_c['_categories'].append(cat_start_end)
+ cur_chapter['_categories'] = cur_cats
+ if cur_chapter['_categories'] != after_c['_categories']:
+ # Categories before and after the cut differ: push the after part to PQ.
+ heapq.heappush(chapters, (after_c['start_time'], cur_i, after_c))
+ cur_chapter['end_time'] = c['start_time']
+ append_chapter(cur_chapter)
+ cur_i, cur_chapter = i, c
+ continue
+ # Either sponsor categories before and after the cut are the same or
+ # we're dealing with a normal chapter. Just register an outstanding cut:
+ # subsequent append_chapter will reduce the duration.
+ cur_chapter.setdefault('cut_idx', append_cut(c))
+ # (sponsor, normal): if a normal chapter is not completely overlapped,
+ # chop the beginning of it and push it to PQ.
+ elif '_categories' in cur_chapter and '_categories' not in c:
+ if cur_chapter['end_time'] < c['end_time']:
+ c['start_time'] = cur_chapter['end_time']
+ c['_was_cut'] = True
+ heapq.heappush(chapters, (c['start_time'], i, c))
+ # (normal, sponsor) and (sponsor, sponsor)
+ else:
+ assert '_categories' in c, 'Normal chapters overlap'
+ cur_chapter['_was_cut'] = True
+ c['_was_cut'] = True
+ # Push the part after the sponsor to PQ.
+ if cur_chapter['end_time'] > c['end_time']:
+ # deepcopy to make categories in after_c and cur_chapter/c refer to different lists.
+ after_c = dict(copy.deepcopy(cur_chapter), start_time=c['end_time'])
+ heapq.heappush(chapters, (after_c['start_time'], cur_i, after_c))
+ # Push the part after the overlap to PQ.
+ elif c['end_time'] > cur_chapter['end_time']:
+ after_cur = dict(copy.deepcopy(c), start_time=cur_chapter['end_time'])
+ heapq.heappush(chapters, (after_cur['start_time'], cur_i, after_cur))
+ c['end_time'] = cur_chapter['end_time']
+ # (sponsor, sponsor): merge categories in the overlap.
+ if '_categories' in cur_chapter:
+ c['_categories'] = cur_chapter['_categories'] + c['_categories']
+ # Inherit the cuts that the current chapter has accumulated within it.
+ if 'cut_idx' in cur_chapter:
+ c['cut_idx'] = cur_chapter['cut_idx']
+ cur_chapter['end_time'] = c['start_time']
+ append_chapter(cur_chapter)
+ cur_i, cur_chapter = i, c
+ (append_chapter if 'remove' not in cur_chapter else append_cut)(cur_chapter)
+ return self._remove_tiny_rename_sponsors(new_chapters), cuts
+
+ def _remove_tiny_rename_sponsors(self, chapters):
+ new_chapters = []
+ for i, c in enumerate(chapters):
+ # Merge with the previous/next if the chapter is tiny.
+ # Only tiny chapters resulting from a cut can be skipped.
+ # Chapters that were already tiny in the original list will be preserved.
+ if (('_was_cut' in c or '_categories' in c)
+ and c['end_time'] - c['start_time'] < _TINY_CHAPTER_DURATION):
+ if not new_chapters:
+ # Prepend tiny chapter to the next one if possible.
+ if i < len(chapters) - 1:
+ chapters[i + 1]['start_time'] = c['start_time']
+ continue
+ else:
+ old_c = new_chapters[-1]
+ if i < len(chapters) - 1:
+ next_c = chapters[i + 1]
+ # Not a typo: key names in old_c and next_c are really different.
+ prev_is_sponsor = 'categories' in old_c
+ next_is_sponsor = '_categories' in next_c
+ # Preferentially prepend tiny normals to normals and sponsors to sponsors.
+ if (('_categories' not in c and prev_is_sponsor and not next_is_sponsor)
+ or ('_categories' in c and not prev_is_sponsor and next_is_sponsor)):
+ next_c['start_time'] = c['start_time']
+ continue
+ old_c['end_time'] = c['end_time']
+ continue
+
+ c.pop('_was_cut', None)
+ cats = c.pop('_categories', None)
+ if cats:
+ category, _, _, category_name = min(cats, key=lambda c: c[2] - c[1])
+ c.update({
+ 'category': category,
+ 'categories': orderedSet(x[0] for x in cats),
+ 'name': category_name,
+ 'category_names': orderedSet(x[3] for x in cats),
+ })
+ c['title'] = self._downloader.evaluate_outtmpl(self._sponsorblock_chapter_title, c.copy())
+ # Merge identically named sponsors.
+ if (new_chapters and 'categories' in new_chapters[-1]
+ and new_chapters[-1]['title'] == c['title']):
+ new_chapters[-1]['end_time'] = c['end_time']
+ continue
+ new_chapters.append(c)
+ return new_chapters
+
+ def remove_chapters(self, filename, ranges_to_cut, concat_opts, force_keyframes=False):
+ in_file = filename
+ out_file = prepend_extension(in_file, 'temp')
+ if force_keyframes:
+ in_file = self.force_keyframes(in_file, (t for c in ranges_to_cut for t in (c['start_time'], c['end_time'])))
+ self.to_screen(f'Removing chapters from {filename}')
+ self.concat_files([in_file] * len(concat_opts), out_file, concat_opts)
+ if in_file != filename:
+ self._delete_downloaded_files(in_file, msg=None)
+ return out_file
+
+ @staticmethod
+ def _make_concat_opts(chapters_to_remove, duration):
+ opts = [{}]
+ for s in chapters_to_remove:
+ # Do not create 0 duration chunk at the beginning.
+ if s['start_time'] == 0:
+ opts[-1]['inpoint'] = f'{s["end_time"]:.6f}'
+ continue
+ opts[-1]['outpoint'] = f'{s["start_time"]:.6f}'
+ # Do not create 0 duration chunk at the end.
+ if s['end_time'] < duration:
+ opts.append({'inpoint': f'{s["end_time"]:.6f}'})
+ return opts
diff --git a/yt_dlp/postprocessor/movefilesafterdownload.py b/yt_dlp/postprocessor/movefilesafterdownload.py
new file mode 100644
index 0000000..23b0924
--- /dev/null
+++ b/yt_dlp/postprocessor/movefilesafterdownload.py
@@ -0,0 +1,53 @@
+import os
+
+from .common import PostProcessor
+from ..compat import shutil
+from ..utils import (
+ PostProcessingError,
+ decodeFilename,
+ encodeFilename,
+ make_dir,
+)
+
+
+class MoveFilesAfterDownloadPP(PostProcessor):
+
+ def __init__(self, downloader=None, downloaded=True):
+ PostProcessor.__init__(self, downloader)
+ self._downloaded = downloaded
+
+ @classmethod
+ def pp_key(cls):
+ return 'MoveFiles'
+
+ def run(self, info):
+ dl_path, dl_name = os.path.split(encodeFilename(info['filepath']))
+ finaldir = info.get('__finaldir', dl_path)
+ finalpath = os.path.join(finaldir, dl_name)
+ if self._downloaded:
+ info['__files_to_move'][info['filepath']] = decodeFilename(finalpath)
+
+ make_newfilename = lambda old: decodeFilename(os.path.join(finaldir, os.path.basename(encodeFilename(old))))
+ for oldfile, newfile in info['__files_to_move'].items():
+ if not newfile:
+ newfile = make_newfilename(oldfile)
+ if os.path.abspath(encodeFilename(oldfile)) == os.path.abspath(encodeFilename(newfile)):
+ continue
+ if not os.path.exists(encodeFilename(oldfile)):
+ self.report_warning('File "%s" cannot be found' % oldfile)
+ continue
+ if os.path.exists(encodeFilename(newfile)):
+ if self.get_param('overwrites', True):
+ self.report_warning('Replacing existing file "%s"' % newfile)
+ os.remove(encodeFilename(newfile))
+ else:
+ self.report_warning(
+ 'Cannot move file "%s" out of temporary directory since "%s" already exists. '
+ % (oldfile, newfile))
+ continue
+ make_dir(newfile, PostProcessingError)
+ self.to_screen(f'Moving file "{oldfile}" to "{newfile}"')
+ shutil.move(oldfile, newfile) # os.rename cannot move between volumes
+
+ info['filepath'] = finalpath
+ return [], info
diff --git a/yt_dlp/postprocessor/sponskrub.py b/yt_dlp/postprocessor/sponskrub.py
new file mode 100644
index 0000000..ff50d5b
--- /dev/null
+++ b/yt_dlp/postprocessor/sponskrub.py
@@ -0,0 +1,98 @@
+import os
+import shlex
+import subprocess
+
+from .common import PostProcessor
+from ..utils import (
+ Popen,
+ PostProcessingError,
+ check_executable,
+ cli_option,
+ encodeArgument,
+ encodeFilename,
+ prepend_extension,
+ shell_quote,
+ str_or_none,
+)
+
+
+# Deprecated in favor of the native implementation
+class SponSkrubPP(PostProcessor):
+ _temp_ext = 'spons'
+ _exe_name = 'sponskrub'
+
+ def __init__(self, downloader, path='', args=None, ignoreerror=False, cut=False, force=False, _from_cli=False):
+ PostProcessor.__init__(self, downloader)
+ self.force = force
+ self.cutout = cut
+ self.args = str_or_none(args) or '' # For backward compatibility
+ self.path = self.get_exe(path)
+
+ if not _from_cli:
+ self.deprecation_warning(
+ 'yt_dlp.postprocessor.SponSkrubPP support is deprecated and may be removed in a future version. '
+ 'Use yt_dlp.postprocessor.SponsorBlock and yt_dlp.postprocessor.ModifyChaptersPP instead')
+
+ if not ignoreerror and self.path is None:
+ if path:
+ raise PostProcessingError('sponskrub not found in "%s"' % path)
+ else:
+ raise PostProcessingError('sponskrub not found. Please install or provide the path using --sponskrub-path')
+
+ def get_exe(self, path=''):
+ if not path or not check_executable(path, ['-h']):
+ path = os.path.join(path, self._exe_name)
+ if not check_executable(path, ['-h']):
+ return None
+ return path
+
+ @PostProcessor._restrict_to(images=False)
+ def run(self, information):
+ if self.path is None:
+ return [], information
+
+ filename = information['filepath']
+ if not os.path.exists(encodeFilename(filename)): # no download
+ return [], information
+
+ if information['extractor_key'].lower() != 'youtube':
+ self.to_screen('Skipping sponskrub since it is not a YouTube video')
+ return [], information
+ if self.cutout and not self.force and not information.get('__real_download', False):
+ self.report_warning(
+ 'Skipping sponskrub since the video was already downloaded. '
+ 'Use --sponskrub-force to run sponskrub anyway')
+ return [], information
+
+ self.to_screen('Trying to %s sponsor sections' % ('remove' if self.cutout else 'mark'))
+ if self.cutout:
+ self.report_warning('Cutting out sponsor segments will cause the subtitles to go out of sync.')
+ if not information.get('__real_download', False):
+ self.report_warning('If sponskrub is run multiple times, unintended parts of the video could be cut out.')
+
+ temp_filename = prepend_extension(filename, self._temp_ext)
+ if os.path.exists(encodeFilename(temp_filename)):
+ os.remove(encodeFilename(temp_filename))
+
+ cmd = [self.path]
+ if not self.cutout:
+ cmd += ['-chapter']
+ cmd += cli_option(self._downloader.params, '-proxy', 'proxy')
+ cmd += shlex.split(self.args) # For backward compatibility
+ cmd += self._configuration_args(self._exe_name, use_compat=False)
+ cmd += ['--', information['id'], filename, temp_filename]
+ cmd = [encodeArgument(i) for i in cmd]
+
+ self.write_debug('sponskrub command line: %s' % shell_quote(cmd))
+ stdout, _, returncode = Popen.run(cmd, text=True, stdout=None if self.get_param('verbose') else subprocess.PIPE)
+
+ if not returncode:
+ os.replace(temp_filename, filename)
+ self.to_screen('Sponsor sections have been %s' % ('removed' if self.cutout else 'marked'))
+ elif returncode == 3:
+ self.to_screen('No segments in the SponsorBlock database')
+ else:
+ raise PostProcessingError(
+ stdout.strip().splitlines()[0 if stdout.strip().lower().startswith('unrecognised') else -1]
+ or f'sponskrub failed with error code {returncode}')
+ return [], information
diff --git a/yt_dlp/postprocessor/sponsorblock.py b/yt_dlp/postprocessor/sponsorblock.py
new file mode 100644
index 0000000..6ba87cd
--- /dev/null
+++ b/yt_dlp/postprocessor/sponsorblock.py
@@ -0,0 +1,104 @@
+import hashlib
+import json
+import re
+import urllib.parse
+
+from .ffmpeg import FFmpegPostProcessor
+
+
+class SponsorBlockPP(FFmpegPostProcessor):
+ # https://wiki.sponsor.ajay.app/w/Types
+ EXTRACTORS = {
+ 'Youtube': 'YouTube',
+ }
+ POI_CATEGORIES = {
+ 'poi_highlight': 'Highlight',
+ }
+ NON_SKIPPABLE_CATEGORIES = {
+ **POI_CATEGORIES,
+ 'chapter': 'Chapter',
+ }
+ CATEGORIES = {
+ 'sponsor': 'Sponsor',
+ 'intro': 'Intermission/Intro Animation',
+ 'outro': 'Endcards/Credits',
+ 'selfpromo': 'Unpaid/Self Promotion',
+ 'preview': 'Preview/Recap',
+ 'filler': 'Filler Tangent',
+ 'interaction': 'Interaction Reminder',
+ 'music_offtopic': 'Non-Music Section',
+ **NON_SKIPPABLE_CATEGORIES
+ }
+
+ def __init__(self, downloader, categories=None, api='https://sponsor.ajay.app'):
+ FFmpegPostProcessor.__init__(self, downloader)
+ self._categories = tuple(categories or self.CATEGORIES.keys())
+ self._API_URL = api if re.match('^https?://', api) else 'https://' + api
+
+ def run(self, info):
+ extractor = info['extractor_key']
+ if extractor not in self.EXTRACTORS:
+ self.to_screen(f'SponsorBlock is not supported for {extractor}')
+ return [], info
+
+ self.to_screen('Fetching SponsorBlock segments')
+ info['sponsorblock_chapters'] = self._get_sponsor_chapters(info, info.get('duration'))
+ return [], info
+
+ def _get_sponsor_chapters(self, info, duration):
+ segments = self._get_sponsor_segments(info['id'], self.EXTRACTORS[info['extractor_key']])
+
+ def duration_filter(s):
+ start_end = s['segment']
+ # Ignore entire video segments (https://wiki.sponsor.ajay.app/w/Types).
+ if start_end == (0, 0):
+ return False
+ # Ignore milliseconds difference at the start.
+ if start_end[0] <= 1:
+ start_end[0] = 0
+ # Make POI chapters 1 sec so that we can properly mark them
+ if s['category'] in self.POI_CATEGORIES.keys():
+ start_end[1] += 1
+ # Ignore milliseconds difference at the end.
+ # Never allow the segment to exceed the video.
+ if duration and duration - start_end[1] <= 1:
+ start_end[1] = duration
+ # SponsorBlock duration may be absent or it may deviate from the real one.
+ diff = abs(duration - s['videoDuration']) if s['videoDuration'] else 0
+ return diff < 1 or (diff < 5 and diff / (start_end[1] - start_end[0]) < 0.05)
+
+ duration_match = [s for s in segments if duration_filter(s)]
+ if len(duration_match) != len(segments):
+ self.report_warning('Some SponsorBlock segments are from a video of different duration, maybe from an old version of this video')
+
+ def to_chapter(s):
+ (start, end), cat = s['segment'], s['category']
+ title = s['description'] if cat == 'chapter' else self.CATEGORIES[cat]
+ return {
+ 'start_time': start,
+ 'end_time': end,
+ 'category': cat,
+ 'title': title,
+ 'type': s['actionType'],
+ '_categories': [(cat, start, end, title)],
+ }
+
+ sponsor_chapters = [to_chapter(s) for s in duration_match]
+ if not sponsor_chapters:
+ self.to_screen('No matching segments were found in the SponsorBlock database')
+ else:
+ self.to_screen(f'Found {len(sponsor_chapters)} segments in the SponsorBlock database')
+ return sponsor_chapters
+
+ def _get_sponsor_segments(self, video_id, service):
+ hash = hashlib.sha256(video_id.encode('ascii')).hexdigest()
+ # SponsorBlock API recommends using first 4 hash characters.
+ url = f'{self._API_URL}/api/skipSegments/{hash[:4]}?' + urllib.parse.urlencode({
+ 'service': service,
+ 'categories': json.dumps(self._categories),
+ 'actionTypes': json.dumps(['skip', 'poi', 'chapter'])
+ })
+ for d in self._download_json(url) or []:
+ if d['videoID'] == video_id:
+ return d['segments']
+ return []
diff --git a/yt_dlp/postprocessor/xattrpp.py b/yt_dlp/postprocessor/xattrpp.py
new file mode 100644
index 0000000..f822eff
--- /dev/null
+++ b/yt_dlp/postprocessor/xattrpp.py
@@ -0,0 +1,63 @@
+import os
+
+from .common import PostProcessor
+from ..compat import compat_os_name
+from ..utils import (
+ PostProcessingError,
+ XAttrMetadataError,
+ XAttrUnavailableError,
+ hyphenate_date,
+ write_xattr,
+)
+
+
+class XAttrMetadataPP(PostProcessor):
+ """Set extended attributes on downloaded file (if xattr support is found)
+
+ More info about extended attributes for media:
+ http://freedesktop.org/wiki/CommonExtendedAttributes/
+ http://www.freedesktop.org/wiki/PhreedomDraft/
+ http://dublincore.org/documents/usageguide/elements.shtml
+
+ TODO:
+ * capture youtube keywords and put them in 'user.dublincore.subject' (comma-separated)
+ * figure out which xattrs can be used for 'duration', 'thumbnail', 'resolution'
+ """
+
+ XATTR_MAPPING = {
+ 'user.xdg.referrer.url': 'webpage_url',
+ # 'user.xdg.comment': 'description',
+ 'user.dublincore.title': 'title',
+ 'user.dublincore.date': 'upload_date',
+ 'user.dublincore.description': 'description',
+ 'user.dublincore.contributor': 'uploader',
+ 'user.dublincore.format': 'format',
+ }
+
+ def run(self, info):
+ mtime = os.stat(info['filepath']).st_mtime
+ self.to_screen('Writing metadata to file\'s xattrs')
+ try:
+ for xattrname, infoname in self.XATTR_MAPPING.items():
+ value = info.get(infoname)
+ if value:
+ if infoname == 'upload_date':
+ value = hyphenate_date(value)
+ write_xattr(info['filepath'], xattrname, value.encode())
+
+ except XAttrUnavailableError as e:
+ raise PostProcessingError(str(e))
+ except XAttrMetadataError as e:
+ if e.reason == 'NO_SPACE':
+ self.report_warning(
+ 'There\'s no disk space left, disk quota exceeded or filesystem xattr limit exceeded. '
+ 'Some extended attributes are not written')
+ elif e.reason == 'VALUE_TOO_LONG':
+ self.report_warning('Unable to write extended attributes due to too long values.')
+ else:
+ tip = ('You need to use NTFS' if compat_os_name == 'nt'
+ else 'You may have to enable them in your "/etc/fstab"')
+ raise PostProcessingError(f'This filesystem doesn\'t support extended attributes. {tip}')
+
+ self.try_utime(info['filepath'], mtime, mtime)
+ return [], info