2021-06-03 11:43:42 +02:00
|
|
|
#!/usr/bin/env python3
|
2016-10-02 13:39:18 +02:00
|
|
|
# coding: utf-8
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2014-01-05 01:52:03 +01:00
|
|
|
from __future__ import absolute_import, unicode_literals
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2013-12-09 22:00:42 +01:00
|
|
|
import collections
|
2015-03-01 11:46:57 +01:00
|
|
|
import contextlib
|
2014-03-13 15:30:25 +01:00
|
|
|
import datetime
|
2013-10-06 04:27:09 +02:00
|
|
|
import errno
|
2015-03-01 11:46:57 +01:00
|
|
|
import fileinput
|
2021-10-09 02:23:15 +02:00
|
|
|
import functools
|
2013-06-18 22:14:21 +02:00
|
|
|
import io
|
2014-12-06 14:02:19 +01:00
|
|
|
import itertools
|
2013-11-20 06:18:24 +01:00
|
|
|
import json
|
2014-03-30 06:02:41 +02:00
|
|
|
import locale
|
2015-01-23 00:04:05 +01:00
|
|
|
import operator
|
2013-06-18 22:14:21 +02:00
|
|
|
import os
|
2013-11-22 19:57:52 +01:00
|
|
|
import platform
|
2013-06-18 22:14:21 +02:00
|
|
|
import re
|
|
|
|
import shutil
|
2013-11-22 19:57:52 +01:00
|
|
|
import subprocess
|
2013-06-18 22:14:21 +02:00
|
|
|
import sys
|
2021-06-12 17:18:06 +02:00
|
|
|
import tempfile
|
2013-06-18 22:14:21 +02:00
|
|
|
import time
|
2015-06-28 22:08:29 +02:00
|
|
|
import tokenize
|
2013-06-18 22:14:21 +02:00
|
|
|
import traceback
|
2017-01-31 10:03:31 +01:00
|
|
|
import random
|
2021-09-25 22:09:44 +02:00
|
|
|
import unicodedata
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-10-20 18:37:32 +02:00
|
|
|
from enum import Enum
|
2017-07-15 02:02:14 +02:00
|
|
|
from string import ascii_letters
|
|
|
|
|
2014-11-02 11:23:40 +01:00
|
|
|
from .compat import (
|
2015-11-19 22:08:34 +01:00
|
|
|
compat_basestring,
|
2015-02-28 21:42:16 +01:00
|
|
|
compat_get_terminal_size,
|
2014-12-15 01:06:25 +01:00
|
|
|
compat_kwargs,
|
2016-03-05 22:52:42 +01:00
|
|
|
compat_numeric_types,
|
2016-03-03 12:24:24 +01:00
|
|
|
compat_os_name,
|
2021-09-17 21:21:27 +02:00
|
|
|
compat_pycrypto_AES,
|
2021-07-29 04:56:17 +02:00
|
|
|
compat_shlex_quote,
|
2013-11-17 16:47:52 +01:00
|
|
|
compat_str,
|
2015-06-28 22:08:29 +02:00
|
|
|
compat_tokenize_tokenize,
|
2013-11-17 16:47:52 +01:00
|
|
|
compat_urllib_error,
|
|
|
|
compat_urllib_request,
|
2015-10-17 17:16:40 +02:00
|
|
|
compat_urllib_request_DataHandler,
|
2021-10-08 21:11:59 +02:00
|
|
|
windows_enable_vt_mode,
|
2014-11-02 11:23:40 +01:00
|
|
|
)
|
2021-07-21 22:32:49 +02:00
|
|
|
from .cookies import load_cookies
|
2014-11-02 11:23:40 +01:00
|
|
|
from .utils import (
|
2016-03-26 14:40:33 +01:00
|
|
|
age_restricted,
|
|
|
|
args_to_str,
|
2013-11-17 16:47:52 +01:00
|
|
|
ContentTooShortError,
|
|
|
|
date_from_str,
|
|
|
|
DateRange,
|
2014-04-30 10:02:03 +02:00
|
|
|
DEFAULT_OUTTMPL,
|
2013-11-17 16:47:52 +01:00
|
|
|
determine_ext,
|
2016-01-16 05:10:28 +01:00
|
|
|
determine_protocol,
|
2021-10-26 16:45:12 +02:00
|
|
|
DownloadCancelled,
|
2013-11-17 16:47:52 +01:00
|
|
|
DownloadError,
|
2015-12-20 01:29:36 +01:00
|
|
|
encode_compat_str,
|
2013-11-17 16:47:52 +01:00
|
|
|
encodeFilename,
|
2021-03-23 20:45:53 +01:00
|
|
|
EntryNotInPlaylist,
|
2021-05-17 14:23:08 +02:00
|
|
|
error_to_compat_str,
|
2021-01-13 02:01:01 +01:00
|
|
|
ExistingVideoReached,
|
2017-03-25 20:31:16 +01:00
|
|
|
expand_path,
|
2013-11-17 16:47:52 +01:00
|
|
|
ExtractorError,
|
2021-02-02 22:15:00 +01:00
|
|
|
float_or_none,
|
2013-11-25 03:12:26 +01:00
|
|
|
format_bytes,
|
2020-12-13 15:29:09 +01:00
|
|
|
format_field,
|
2021-12-23 02:14:42 +01:00
|
|
|
format_decimal_suffix,
|
2013-12-16 04:15:10 +01:00
|
|
|
formatSeconds,
|
2017-02-04 12:49:58 +01:00
|
|
|
GeoRestrictedError,
|
2021-10-18 04:13:21 +02:00
|
|
|
get_domain,
|
2021-06-12 17:21:00 +02:00
|
|
|
HEADRequest,
|
2022-01-23 18:55:17 +01:00
|
|
|
InAdvancePagedList,
|
2017-06-08 17:53:14 +02:00
|
|
|
int_or_none,
|
2020-10-27 11:37:21 +01:00
|
|
|
iri_to_uri,
|
2017-02-04 12:49:58 +01:00
|
|
|
ISO3166Utils,
|
2021-11-06 02:05:24 +01:00
|
|
|
join_nonempty,
|
2021-05-28 18:37:11 +02:00
|
|
|
LazyList,
|
2021-10-26 16:41:59 +02:00
|
|
|
LINK_TEMPLATES,
|
2013-11-17 16:47:52 +01:00
|
|
|
locked_file,
|
2021-01-23 13:18:12 +01:00
|
|
|
make_dir,
|
2013-11-22 19:57:52 +01:00
|
|
|
make_HTTPS_handler,
|
2013-11-17 16:47:52 +01:00
|
|
|
MaxDownloadsReached,
|
2021-05-04 19:06:18 +02:00
|
|
|
network_exceptions,
|
2021-10-20 18:37:32 +02:00
|
|
|
number_of_digits,
|
[YoutubeDL] Ignore duplicates in --playlist-items
E.g. '--playlist-items 2-4,3-4,3' should result in '[2,3,4]', not '[2,3,4,3,4,3]'
2017-10-06 18:46:57 +02:00
|
|
|
orderedSet,
|
2021-05-17 14:23:08 +02:00
|
|
|
OUTTMPL_TYPES,
|
2014-01-20 11:36:47 +01:00
|
|
|
PagedList,
|
2015-01-23 00:04:05 +01:00
|
|
|
parse_filesize,
|
2015-03-03 00:03:06 +01:00
|
|
|
PerRequestProxyHandler,
|
2013-11-22 19:57:52 +01:00
|
|
|
platform_name,
|
2021-10-20 18:19:40 +02:00
|
|
|
Popen,
|
2022-01-03 12:13:54 +01:00
|
|
|
POSTPROCESS_WHEN,
|
2016-03-26 14:40:33 +01:00
|
|
|
PostProcessingError,
|
2013-11-17 16:47:52 +01:00
|
|
|
preferredencoding,
|
2016-03-26 14:40:33 +01:00
|
|
|
prepend_extension,
|
2021-11-28 19:57:44 +01:00
|
|
|
ReExtractInfo,
|
2016-05-03 09:15:32 +02:00
|
|
|
register_socks_protocols,
|
2021-05-17 14:23:08 +02:00
|
|
|
RejectedVideoReached,
|
2021-12-01 18:09:57 +01:00
|
|
|
remove_terminal_sequences,
|
2015-01-25 02:38:47 +01:00
|
|
|
render_table,
|
2016-03-26 14:40:33 +01:00
|
|
|
replace_extension,
|
2013-11-17 16:47:52 +01:00
|
|
|
SameFileError,
|
|
|
|
sanitize_filename,
|
2015-03-08 15:57:30 +01:00
|
|
|
sanitize_path,
|
2016-03-26 14:37:41 +01:00
|
|
|
sanitize_url,
|
2015-11-20 15:33:49 +01:00
|
|
|
sanitized_Request,
|
2015-01-24 18:52:26 +01:00
|
|
|
std_headers,
|
2021-10-08 21:11:59 +02:00
|
|
|
STR_FORMAT_RE_TMPL,
|
|
|
|
STR_FORMAT_TYPES,
|
2019-02-07 19:08:48 +01:00
|
|
|
str_or_none,
|
2021-02-02 22:15:00 +01:00
|
|
|
strftime_or_none,
|
2013-11-17 16:47:52 +01:00
|
|
|
subtitles_filename,
|
2021-10-08 21:11:59 +02:00
|
|
|
supports_terminal_sequences,
|
2021-11-28 19:57:44 +01:00
|
|
|
timetuple_from_msec,
|
2020-10-27 11:37:21 +01:00
|
|
|
to_high_limit_path,
|
2021-06-08 10:53:56 +02:00
|
|
|
traverse_obj,
|
2021-06-24 16:38:43 +02:00
|
|
|
try_get,
|
2013-11-17 16:47:52 +01:00
|
|
|
UnavailableVideoError,
|
2013-12-17 04:13:36 +01:00
|
|
|
url_basename,
|
2021-07-29 04:56:17 +02:00
|
|
|
variadic,
|
2015-01-10 21:02:27 +01:00
|
|
|
version_tuple,
|
2013-11-17 16:47:52 +01:00
|
|
|
write_json_file,
|
|
|
|
write_string,
|
2015-09-06 02:21:33 +02:00
|
|
|
YoutubeDLCookieProcessor,
|
2013-11-22 19:57:52 +01:00
|
|
|
YoutubeDLHandler,
|
2020-02-29 13:08:44 +01:00
|
|
|
YoutubeDLRedirectHandler,
|
2013-11-17 16:47:52 +01:00
|
|
|
)
|
2014-09-03 12:41:05 +02:00
|
|
|
from .cache import Cache
|
2021-10-20 18:37:32 +02:00
|
|
|
from .minicurses import format_text
|
2021-04-10 17:08:33 +02:00
|
|
|
from .extractor import (
|
|
|
|
gen_extractor_classes,
|
|
|
|
get_info_extractor,
|
|
|
|
_LAZY_LOADER,
|
2021-09-29 22:53:33 +02:00
|
|
|
_PLUGIN_CLASSES as plugin_extractors
|
2021-04-10 17:08:33 +02:00
|
|
|
)
|
2017-09-23 19:08:27 +02:00
|
|
|
from .extractor.openload import PhantomJSwrapper
|
2021-04-10 17:08:33 +02:00
|
|
|
from .downloader import (
|
2021-07-31 12:51:01 +02:00
|
|
|
FFmpegFD,
|
2021-04-10 17:08:33 +02:00
|
|
|
get_suitable_downloader,
|
|
|
|
shorten_protocol_name
|
|
|
|
)
|
2014-11-02 10:55:36 +01:00
|
|
|
from .downloader.rtmp import rtmpdump_version
|
2014-12-15 01:06:25 +01:00
|
|
|
from .postprocessor import (
|
2021-06-21 19:23:17 +02:00
|
|
|
get_postprocessor,
|
2021-10-09 18:18:46 +02:00
|
|
|
EmbedThumbnailPP,
|
2021-12-20 07:06:46 +01:00
|
|
|
FFmpegFixupDuplicateMoovPP,
|
2021-06-21 19:23:17 +02:00
|
|
|
FFmpegFixupDurationPP,
|
2016-03-01 21:08:50 +01:00
|
|
|
FFmpegFixupM3u8PP,
|
2015-01-23 18:39:12 +01:00
|
|
|
FFmpegFixupM4aPP,
|
2015-01-10 05:45:51 +01:00
|
|
|
FFmpegFixupStretchedPP,
|
2021-06-21 19:23:17 +02:00
|
|
|
FFmpegFixupTimestampPP,
|
2014-12-15 01:06:25 +01:00
|
|
|
FFmpegMergerPP,
|
|
|
|
FFmpegPostProcessor,
|
2021-01-23 13:18:12 +01:00
|
|
|
MoveFilesAfterDownloadPP,
|
2021-09-29 22:53:33 +02:00
|
|
|
_PLUGIN_CLASSES as plugin_postprocessors
|
2014-12-15 01:06:25 +01:00
|
|
|
)
|
2021-09-24 03:01:43 +02:00
|
|
|
from .update import detect_variant
|
2021-11-29 18:00:02 +01:00
|
|
|
from .version import __version__, RELEASE_GIT_HEAD
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2016-03-03 12:24:24 +01:00
|
|
|
if compat_os_name == 'nt':
|
|
|
|
import ctypes
|
|
|
|
|
2020-09-18 15:35:21 +02:00
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
class YoutubeDL(object):
|
|
|
|
"""YoutubeDL class.
|
|
|
|
|
|
|
|
YoutubeDL objects are the ones responsible of downloading the
|
|
|
|
actual video file and writing it to disk if the user has requested
|
|
|
|
it, among some other tasks. In most cases there should be one per
|
|
|
|
program. As, given a video URL, the downloader doesn't know how to
|
|
|
|
extract all the needed information, task that InfoExtractors do, it
|
|
|
|
has to pass the URL to one of them.
|
|
|
|
|
|
|
|
For this, YoutubeDL objects have a method that allows
|
|
|
|
InfoExtractors to be registered in a given order. When it is passed
|
|
|
|
a URL, the YoutubeDL object handles it to the first InfoExtractor it
|
|
|
|
finds that reports being able to handle it. The InfoExtractor extracts
|
|
|
|
all the information about the video or videos the URL refers to, and
|
|
|
|
YoutubeDL process the extracted information, possibly using a File
|
|
|
|
Downloader to download the video.
|
|
|
|
|
|
|
|
YoutubeDL objects accept a lot of parameters. In order not to saturate
|
|
|
|
the object constructor with arguments, it receives a dictionary of
|
|
|
|
options instead. These options are available through the params
|
|
|
|
attribute for the InfoExtractors to use. The YoutubeDL also
|
|
|
|
registers itself as the downloader in charge for the InfoExtractors
|
|
|
|
that are added to it, so this is a "mutual registration".
|
|
|
|
|
|
|
|
Available options:
|
|
|
|
|
|
|
|
username: Username for authentication purposes.
|
|
|
|
password: Password for authentication purposes.
|
2015-06-14 07:49:42 +02:00
|
|
|
videopassword: Password for accessing a video.
|
2016-09-15 17:24:55 +02:00
|
|
|
ap_mso: Adobe Pass multiple-system operator identifier.
|
|
|
|
ap_username: Multiple-system operator account username.
|
|
|
|
ap_password: Multiple-system operator account password.
|
2013-06-18 22:14:21 +02:00
|
|
|
usenetrc: Use netrc for authentication instead.
|
|
|
|
verbose: Print additional info to stdout.
|
|
|
|
quiet: Do not print messages to stdout.
|
2014-03-26 00:43:46 +01:00
|
|
|
no_warnings: Do not print out anything for warnings.
|
2022-01-23 22:34:19 +01:00
|
|
|
forceprint: A dict with keys WHEN mapped to a list of templates to
|
|
|
|
print to stdout. The allowed keys are video or any of the
|
|
|
|
items in utils.POSTPROCESS_WHEN.
|
2022-01-02 11:52:00 +01:00
|
|
|
For compatibility, a single list is also accepted
|
2022-01-23 22:34:19 +01:00
|
|
|
print_to_file: A dict with keys WHEN (same as forceprint) mapped to
|
|
|
|
a list of tuples with (template, filename)
|
2021-05-14 09:44:38 +02:00
|
|
|
forceurl: Force printing final URL. (Deprecated)
|
|
|
|
forcetitle: Force printing title. (Deprecated)
|
|
|
|
forceid: Force printing ID. (Deprecated)
|
|
|
|
forcethumbnail: Force printing thumbnail URL. (Deprecated)
|
|
|
|
forcedescription: Force printing description. (Deprecated)
|
|
|
|
forcefilename: Force printing final filename. (Deprecated)
|
|
|
|
forceduration: Force printing duration. (Deprecated)
|
2013-11-20 06:18:24 +01:00
|
|
|
forcejson: Force printing info_dict as JSON.
|
2014-10-25 00:30:57 +02:00
|
|
|
dump_single_json: Force printing the info_dict of the whole playlist
|
|
|
|
(or video) as a single JSON line.
|
2021-02-04 23:53:04 +01:00
|
|
|
force_write_download_archive: Force writing download archive regardless
|
|
|
|
of 'skip_download' or 'simulate'.
|
2021-08-07 02:01:51 +02:00
|
|
|
simulate: Do not download the video files. If unset (or None),
|
|
|
|
simulate only if listsubtitles, listformats or list_thumbnails is used
|
Better Format Sorting (Squashed)
* Added --format-sort (-S height,filesize)
* Made fields reversible (-S +height)
* Added --format-sort-force, --no-format-sort-force
* Added limit (-S height:720)
* Added codec preference (-S vcodec,acodec)
* Correct handling of preference<-1000
* Rebased to yt-dlc
* Automatically determine missing bitrates
* aext, vext, protocol, acodec, vcodec can now takes priority as string, not number (-S vext:webm)
* Correct handling of None in codec, audio_codec (None means the codec is unknown while 'none' means it doesn't exist)
* Correctly parse filesize (-S filesize:200M)
* Generalized preference calculation
* Rewrote entire code into the class FormatSort
* Correctly handle user input errors
* Combined fields (-S +ext:webm:webm)
* Closest mode (-S filesize~50M)
* Aliases (framerate=fps, br=bitrate etc)
* Documentation
2020-10-26 16:50:09 +01:00
|
|
|
format: Video format code. see "FORMAT SELECTION" for more details.
|
2021-11-10 17:11:41 +01:00
|
|
|
You can also pass a function. The function takes 'ctx' as
|
|
|
|
argument and returns the formats to download.
|
|
|
|
See "build_format_selector" for an implementation
|
2021-02-12 04:51:59 +01:00
|
|
|
allow_unplayable_formats: Allow unplayable formats to be extracted and downloaded.
|
2021-04-17 02:09:58 +02:00
|
|
|
ignore_no_formats_error: Ignore "No video formats" error. Usefull for
|
|
|
|
extracting metadata even if the video is not actually
|
|
|
|
available for download (experimental)
|
2021-10-31 10:15:59 +01:00
|
|
|
format_sort: A list of fields by which to sort the video formats.
|
|
|
|
See "Sorting Formats" for more details.
|
2021-02-04 23:53:04 +01:00
|
|
|
format_sort_force: Force the given format_sort. see "Sorting Formats"
|
|
|
|
for more details.
|
|
|
|
allow_multiple_video_streams: Allow multiple video streams to be merged
|
|
|
|
into a single file
|
|
|
|
allow_multiple_audio_streams: Allow multiple audio streams to be merged
|
|
|
|
into a single file
|
2021-07-15 19:19:59 +02:00
|
|
|
check_formats Whether to test if the formats are downloadable.
|
2021-10-24 11:16:07 +02:00
|
|
|
Can be True (check all), False (check none),
|
|
|
|
'selected' (check selected formats),
|
2021-07-15 19:19:59 +02:00
|
|
|
or None (check only if requested by extractor)
|
2021-02-19 22:33:17 +01:00
|
|
|
paths: Dictionary of output paths. The allowed keys are 'home'
|
|
|
|
'temp' and the keys of OUTTMPL_TYPES (in utils.py)
|
2021-02-03 14:36:09 +01:00
|
|
|
outtmpl: Dictionary of templates for output names. Allowed keys
|
2021-02-19 22:33:17 +01:00
|
|
|
are 'default' and the keys of OUTTMPL_TYPES (in utils.py).
|
2021-08-07 10:49:17 +02:00
|
|
|
For compatibility with youtube-dl, a single string can also be used
|
2021-01-16 18:12:05 +01:00
|
|
|
outtmpl_na_placeholder: Placeholder for unavailable meta fields.
|
|
|
|
restrictfilenames: Do not allow "&" and spaces in file names
|
|
|
|
trim_file_name: Limit length of filename (extension excluded)
|
2021-02-19 22:33:17 +01:00
|
|
|
windowsfilenames: Force the filenames to be windows compatible
|
2021-09-24 02:21:54 +02:00
|
|
|
ignoreerrors: Do not stop on download/postprocessing errors.
|
|
|
|
Can be 'only_download' to ignore only download errors.
|
|
|
|
Default is 'only_download' for CLI, but False for API
|
2021-04-21 08:00:43 +02:00
|
|
|
skip_playlist_after_errors: Number of allowed failures until the rest of
|
|
|
|
the playlist is skipped
|
2015-06-12 15:20:12 +02:00
|
|
|
force_generic_extractor: Force downloader to use the generic extractor
|
2019-10-13 18:00:48 +02:00
|
|
|
overwrites: Overwrite all video and metadata files if True,
|
|
|
|
overwrite only non-video files if None
|
|
|
|
and don't overwrite any file if False
|
2021-08-07 10:49:17 +02:00
|
|
|
For compatibility with youtube-dl,
|
|
|
|
"nooverwrites" may also be used instead
|
2013-06-18 22:14:21 +02:00
|
|
|
playliststart: Playlist item to start at.
|
|
|
|
playlistend: Playlist item to end at.
|
2015-01-25 04:24:55 +01:00
|
|
|
playlist_items: Specific indices of playlist to download.
|
2014-07-11 05:11:11 +02:00
|
|
|
playlistreverse: Download playlist items in reverse order.
|
2017-01-31 10:03:31 +01:00
|
|
|
playlistrandom: Download playlist items in random order.
|
2013-06-18 22:14:21 +02:00
|
|
|
matchtitle: Download only matching titles.
|
|
|
|
rejecttitle: Reject downloads for matching titles.
|
2013-11-24 06:08:11 +01:00
|
|
|
logger: Log messages to a logging.Logger instance.
|
2013-06-18 22:14:21 +02:00
|
|
|
logtostderr: Log messages to stderr instead of stdout.
|
2021-10-08 21:11:59 +02:00
|
|
|
consoletitle: Display progress in console window's titlebar.
|
2013-06-18 22:14:21 +02:00
|
|
|
writedescription: Write the video description to a .description file
|
|
|
|
writeinfojson: Write the video description to a .info.json file
|
2021-03-18 16:27:20 +01:00
|
|
|
clean_infojson: Remove private fields from the infojson
|
2021-08-07 10:49:17 +02:00
|
|
|
getcomments: Extract video comments. This will not be written to disk
|
2021-01-27 16:02:51 +01:00
|
|
|
unless writeinfojson is also given
|
2013-10-14 07:18:58 +02:00
|
|
|
writeannotations: Write the video annotations to a .annotations.xml file
|
2013-06-18 22:14:21 +02:00
|
|
|
writethumbnail: Write the thumbnail image to a file
|
2021-02-04 23:53:04 +01:00
|
|
|
allow_playlist_files: Whether to write playlists' description, infojson etc
|
|
|
|
also to disk when using the 'write*' options
|
2015-01-25 03:11:12 +01:00
|
|
|
write_all_thumbnails: Write all thumbnail formats to files
|
2020-10-27 11:37:21 +01:00
|
|
|
writelink: Write an internet shortcut file, depending on the
|
|
|
|
current platform (.url/.webloc/.desktop)
|
|
|
|
writeurllink: Write a Windows internet shortcut file (.url)
|
|
|
|
writewebloclink: Write a macOS internet shortcut file (.webloc)
|
|
|
|
writedesktoplink: Write a Linux internet shortcut file (.desktop)
|
2013-06-18 22:14:21 +02:00
|
|
|
writesubtitles: Write the video subtitles to a file
|
2015-11-16 15:15:25 +01:00
|
|
|
writeautomaticsub: Write the automatically generated subtitles to a file
|
2021-07-21 20:02:21 +02:00
|
|
|
allsubtitles: Deprecated - Use subtitleslangs = ['all']
|
2021-04-19 23:17:09 +02:00
|
|
|
Downloads all the subtitles of the video
|
2013-09-14 11:14:40 +02:00
|
|
|
(requires writesubtitles or writeautomaticsub)
|
2013-06-18 22:14:21 +02:00
|
|
|
listsubtitles: Lists all available subtitles for the video
|
2015-02-15 18:03:41 +01:00
|
|
|
subtitlesformat: The format code for subtitles
|
2021-04-19 23:17:09 +02:00
|
|
|
subtitleslangs: List of languages of the subtitles to download (can be regex).
|
|
|
|
The list may contain "all" to refer to all the available
|
|
|
|
subtitles. The language can be prefixed with a "-" to
|
|
|
|
exclude it from the requested languages. Eg: ['all', '-live_chat']
|
2013-06-18 22:14:21 +02:00
|
|
|
keepvideo: Keep the video file after post-processing
|
|
|
|
daterange: A DateRange object, download only if the upload_date is in the range.
|
|
|
|
skip_download: Skip the actual download of the video file
|
2013-09-22 11:09:25 +02:00
|
|
|
cachedir: Location of the cache files in the filesystem.
|
2014-09-03 12:41:05 +02:00
|
|
|
False to disable filesystem cache.
|
2013-09-30 22:26:25 +02:00
|
|
|
noplaylist: Download single video instead of a playlist if in doubt.
|
2013-10-06 06:06:30 +02:00
|
|
|
age_limit: An integer representing the user's age in years.
|
|
|
|
Unsuitable videos for the given age are skipped.
|
2013-12-16 03:09:49 +01:00
|
|
|
min_views: An integer representing the minimum view count the video
|
|
|
|
must have in order to not be skipped.
|
|
|
|
Videos without view count information are always
|
|
|
|
downloaded. None for no limit.
|
|
|
|
max_views: An integer representing the maximum view count.
|
|
|
|
Videos that are more popular than that are not
|
|
|
|
downloaded.
|
|
|
|
Videos without view count information are always
|
|
|
|
downloaded. None for no limit.
|
|
|
|
download_archive: File name of a file where all downloads are recorded.
|
2013-10-06 04:27:09 +02:00
|
|
|
Videos already present in the file are not downloaded
|
|
|
|
again.
|
2021-01-18 20:17:48 +01:00
|
|
|
break_on_existing: Stop the download process after attempting to download a
|
|
|
|
file that is in the archive.
|
|
|
|
break_on_reject: Stop the download process when encountering a video that
|
|
|
|
has been filtered out.
|
2021-11-28 22:11:55 +01:00
|
|
|
break_per_url: Whether break_on_reject and break_on_existing
|
|
|
|
should act on each input URL as opposed to for the entire queue
|
2021-01-18 20:17:48 +01:00
|
|
|
cookiefile: File name where cookies should be read from and dumped to
|
2021-12-27 02:28:44 +01:00
|
|
|
cookiesfrombrowser: A tuple containing the name of the browser, the profile
|
|
|
|
name/pathfrom where cookies are loaded, and the name of the
|
|
|
|
keyring. Eg: ('chrome', ) or ('vivaldi', 'default', 'BASICTEXT')
|
2022-01-21 07:12:30 +01:00
|
|
|
legacyserverconnect: Explicitly allow HTTPS connection to servers that do not
|
|
|
|
support RFC 5746 secure renegotiation
|
2021-12-27 02:28:44 +01:00
|
|
|
nocheckcertificate: Do not verify SSL certificates
|
2014-03-21 00:33:53 +01:00
|
|
|
prefer_insecure: Use HTTP instead of HTTPS to retrieve information.
|
|
|
|
At the moment, this is only supported by YouTube.
|
2013-11-24 15:03:25 +01:00
|
|
|
proxy: URL of the proxy server to use
|
2016-07-03 17:23:48 +02:00
|
|
|
geo_verification_proxy: URL of the proxy to use for IP address verification
|
2018-05-19 18:53:24 +02:00
|
|
|
on geo-restricted sites.
|
2013-12-01 11:42:02 +01:00
|
|
|
socket_timeout: Time to wait for unresponsive hosts, in seconds
|
2013-12-09 04:08:51 +01:00
|
|
|
bidi_workaround: Work around buggy terminals without bidirectional text
|
|
|
|
support, using fridibi
|
2013-12-29 15:28:32 +01:00
|
|
|
debug_printtraffic:Print out sent and received HTTP traffic
|
2021-12-01 01:16:15 +01:00
|
|
|
include_ads: Download ads as well (deprecated)
|
2014-01-22 14:16:43 +01:00
|
|
|
default_search: Prepend this string if an input url is not valid.
|
|
|
|
'auto' for elaborate guessing
|
2014-03-30 06:02:41 +02:00
|
|
|
encoding: Use this encoding instead of the system-specified.
|
2014-08-21 11:52:07 +02:00
|
|
|
extract_flat: Do not resolve URLs, return the immediate result.
|
2014-10-24 14:48:12 +02:00
|
|
|
Pass in 'in_playlist' to only show this behavior for
|
|
|
|
playlist items.
|
2021-11-28 19:57:44 +01:00
|
|
|
wait_for_video: If given, wait for scheduled streams to become available.
|
|
|
|
The value should be a tuple containing the range
|
|
|
|
(min_secs, max_secs) to wait between retries
|
2014-12-15 01:06:25 +01:00
|
|
|
postprocessors: A list of dictionaries, each with an entry
|
2014-12-15 01:26:18 +01:00
|
|
|
* key: The name of the postprocessor. See
|
2021-02-24 19:45:56 +01:00
|
|
|
yt_dlp/postprocessor/__init__.py for a list.
|
2022-01-23 22:34:19 +01:00
|
|
|
* when: When to run the postprocessor. Allowed values are
|
|
|
|
the entries of utils.POSTPROCESS_WHEN
|
2021-04-11 00:18:07 +02:00
|
|
|
Assumed to be 'post_process' if not given
|
2021-10-09 02:23:15 +02:00
|
|
|
post_hooks: Deprecated - Register a custom postprocessor instead
|
|
|
|
A list of functions that get called as the final step
|
2020-12-29 16:03:07 +01:00
|
|
|
for each video file, after all postprocessors have been
|
|
|
|
called. The filename will be passed as the only argument.
|
2014-12-15 01:26:18 +01:00
|
|
|
progress_hooks: A list of functions that get called on download
|
|
|
|
progress, with a dictionary with the entries
|
2015-02-17 21:37:48 +01:00
|
|
|
* status: One of "downloading", "error", or "finished".
|
2015-01-25 06:15:51 +01:00
|
|
|
Check this first and ignore unknown values.
|
2021-07-21 19:28:43 +02:00
|
|
|
* info_dict: The extracted info_dict
|
2014-12-15 01:26:18 +01:00
|
|
|
|
2015-02-17 21:37:48 +01:00
|
|
|
If status is one of "downloading", or "finished", the
|
2015-01-25 06:15:51 +01:00
|
|
|
following properties may also be present:
|
|
|
|
* filename: The final filename (always present)
|
2015-02-17 21:37:48 +01:00
|
|
|
* tmpfilename: The filename we're currently writing to
|
2014-12-15 01:26:18 +01:00
|
|
|
* downloaded_bytes: Bytes on disk
|
|
|
|
* total_bytes: Size of the whole file, None if unknown
|
2015-02-17 21:37:48 +01:00
|
|
|
* total_bytes_estimate: Guess of the eventual file size,
|
|
|
|
None if unavailable.
|
|
|
|
* elapsed: The number of seconds since download started.
|
2014-12-15 01:26:18 +01:00
|
|
|
* eta: The estimated time in seconds, None if unknown
|
|
|
|
* speed: The download speed in bytes/second, None if
|
|
|
|
unknown
|
2015-02-17 21:37:48 +01:00
|
|
|
* fragment_index: The counter of the currently
|
|
|
|
downloaded video fragment.
|
|
|
|
* fragment_count: The number of fragments (= individual
|
|
|
|
files that will be merged)
|
2014-12-15 01:26:18 +01:00
|
|
|
|
|
|
|
Progress hooks are guaranteed to be called at least once
|
|
|
|
(with status "finished") if the download is successful.
|
2021-10-08 21:11:59 +02:00
|
|
|
postprocessor_hooks: A list of functions that get called on postprocessing
|
|
|
|
progress, with a dictionary with the entries
|
|
|
|
* status: One of "started", "processing", or "finished".
|
|
|
|
Check this first and ignore unknown values.
|
|
|
|
* postprocessor: Name of the postprocessor
|
|
|
|
* info_dict: The extracted info_dict
|
|
|
|
|
|
|
|
Progress hooks are guaranteed to be called at least twice
|
|
|
|
(with status "started" and "finished") if the processing is successful.
|
2015-01-10 01:59:14 +01:00
|
|
|
merge_output_format: Extension to use when merging formats.
|
2021-01-28 06:18:36 +01:00
|
|
|
final_ext: Expected final extension; used to detect when the file was
|
2021-11-09 23:42:25 +01:00
|
|
|
already downloaded and converted
|
2015-01-10 05:45:51 +01:00
|
|
|
fixup: Automatically correct known faults of the file.
|
|
|
|
One of:
|
|
|
|
- "never": do nothing
|
|
|
|
- "warn": only emit a warning
|
|
|
|
- "detect_or_warn": check whether we can do anything
|
2015-01-23 18:39:12 +01:00
|
|
|
about it, warn otherwise (default)
|
2018-05-19 18:53:24 +02:00
|
|
|
source_address: Client-side IP address to bind to.
|
2016-01-10 19:27:22 +01:00
|
|
|
call_home: Boolean, true iff we are allowed to contact the
|
2021-02-24 19:45:56 +01:00
|
|
|
yt-dlp servers for debugging. (BROKEN)
|
2021-02-27 13:41:23 +01:00
|
|
|
sleep_interval_requests: Number of seconds to sleep between requests
|
|
|
|
during extraction
|
2016-08-08 22:46:52 +02:00
|
|
|
sleep_interval: Number of seconds to sleep before each download when
|
|
|
|
used alone or a lower bound of a range for randomized
|
|
|
|
sleep before each download (minimum possible number
|
|
|
|
of seconds to sleep) when used along with
|
|
|
|
max_sleep_interval.
|
|
|
|
max_sleep_interval:Upper bound of a range for randomized sleep before each
|
|
|
|
download (maximum possible number of seconds to sleep).
|
|
|
|
Must only be used along with sleep_interval.
|
|
|
|
Actual sleep time will be a random float from range
|
|
|
|
[sleep_interval; max_sleep_interval].
|
2021-02-27 13:41:23 +01:00
|
|
|
sleep_interval_subtitles: Number of seconds to sleep before each subtitle download
|
2015-01-25 02:38:47 +01:00
|
|
|
listformats: Print an overview of available video formats and exit.
|
|
|
|
list_thumbnails: Print a table of all thumbnails and exit.
|
2015-02-10 03:32:21 +01:00
|
|
|
match_filter: A function that gets called with the info_dict of
|
|
|
|
every video.
|
|
|
|
If it returns a message, the video is ignored.
|
|
|
|
If it returns None, the video is downloaded.
|
|
|
|
match_filter_func in utils.py is one example for this.
|
2015-02-10 04:22:10 +01:00
|
|
|
no_color: Do not emit color codes in output.
|
2017-02-18 19:53:41 +01:00
|
|
|
geo_bypass: Bypass geographic restriction via faking X-Forwarded-For
|
2018-05-19 18:53:24 +02:00
|
|
|
HTTP header
|
2017-02-18 19:53:41 +01:00
|
|
|
geo_bypass_country:
|
2017-02-04 12:49:58 +01:00
|
|
|
Two-letter ISO 3166-2 country code that will be used for
|
|
|
|
explicit geographic restriction bypassing via faking
|
2018-05-19 18:53:24 +02:00
|
|
|
X-Forwarded-For HTTP header
|
2018-05-02 02:18:01 +02:00
|
|
|
geo_bypass_ip_block:
|
|
|
|
IP range in CIDR notation that will be used similarly to
|
2018-05-19 18:53:24 +02:00
|
|
|
geo_bypass_country
|
2014-12-15 01:26:18 +01:00
|
|
|
|
2015-02-17 12:09:12 +01:00
|
|
|
The following options determine which downloader is picked:
|
2021-04-10 17:08:33 +02:00
|
|
|
external_downloader: A dictionary of protocol keys and the executable of the
|
|
|
|
external downloader to use for it. The allowed protocols
|
|
|
|
are default|http|ftp|m3u8|dash|rtsp|rtmp|mms.
|
|
|
|
Set the value to 'native' to use the native downloader
|
|
|
|
hls_prefer_native: Deprecated - Use external_downloader = {'m3u8': 'native'}
|
|
|
|
or {'m3u8': 'ffmpeg'} instead.
|
|
|
|
Use the native HLS downloader instead of ffmpeg/avconv
|
2016-04-21 19:02:17 +02:00
|
|
|
if True, otherwise use ffmpeg/avconv if False, otherwise
|
|
|
|
use downloader suggested by extractor if None.
|
2021-05-11 10:00:48 +02:00
|
|
|
compat_opts: Compatibility options. See "Differences in default behavior".
|
2021-07-06 23:21:29 +02:00
|
|
|
The following options do not work when used through the API:
|
2021-10-09 02:23:15 +02:00
|
|
|
filename, abort-on-error, multistreams, no-live-chat, format-sort
|
2021-11-14 23:33:41 +01:00
|
|
|
no-clean-infojson, no-playlist-metafiles, no-keep-subs, no-attach-info-json.
|
2021-07-31 08:08:39 +02:00
|
|
|
Refer __init__.py for their implementation
|
2021-10-08 21:11:59 +02:00
|
|
|
progress_template: Dictionary of templates for progress outputs.
|
|
|
|
Allowed keys are 'download', 'postprocess',
|
|
|
|
'download-title' (console title) and 'postprocess-title'.
|
|
|
|
The template is mapped on a dictionary with keys 'progress' and 'info'
|
2013-10-22 14:49:34 +02:00
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
The following parameters are not used by YoutubeDL itself, they are used by
|
2021-02-24 19:45:56 +01:00
|
|
|
the downloader (see yt_dlp/downloader/common.py):
|
2021-06-23 01:11:09 +02:00
|
|
|
nopart, updatetime, buffersize, ratelimit, throttledratelimit, min_filesize,
|
2021-12-23 03:29:03 +01:00
|
|
|
max_filesize, test, noresizebuffer, retries, file_access_retries, fragment_retries,
|
|
|
|
continuedl, noprogress, xattr_set_filesize, hls_use_mpegts, http_chunk_size,
|
2021-11-09 23:42:25 +01:00
|
|
|
external_downloader_args, concurrent_fragment_downloads.
|
2014-01-08 17:53:34 +01:00
|
|
|
|
|
|
|
The following options are used by the post processors:
|
2018-06-28 20:09:14 +02:00
|
|
|
prefer_ffmpeg: If False, use avconv instead of ffmpeg if both are available,
|
2021-01-26 18:57:32 +01:00
|
|
|
otherwise prefer ffmpeg. (avconv support is deprecated)
|
2019-04-01 20:29:44 +02:00
|
|
|
ffmpeg_location: Location of the ffmpeg/avconv binary; either the path
|
|
|
|
to the binary or its containing directory.
|
2021-01-20 17:07:40 +01:00
|
|
|
postprocessor_args: A dictionary of postprocessor/executable keys (in lower case)
|
2021-08-07 10:49:17 +02:00
|
|
|
and a list of additional command-line arguments for the
|
|
|
|
postprocessor/executable. The dict can also have "PP+EXE" keys
|
|
|
|
which are used when the given exe is used by the given PP.
|
|
|
|
Use 'default' as the name for arguments to passed to all PP
|
|
|
|
For compatibility with youtube-dl, a single list of args
|
|
|
|
can also be used
|
2021-02-24 17:01:01 +01:00
|
|
|
|
|
|
|
The following options are used by the extractors:
|
2021-03-01 00:48:37 +01:00
|
|
|
extractor_retries: Number of times to retry for known errors
|
|
|
|
dynamic_mpd: Whether to process dynamic DASH manifests (default: True)
|
2021-02-24 17:01:01 +01:00
|
|
|
hls_split_discontinuity: Split HLS playlists to different formats at
|
2021-03-01 00:48:37 +01:00
|
|
|
discontinuities such as ad breaks (default: False)
|
2021-06-25 16:05:41 +02:00
|
|
|
extractor_args: A dictionary of arguments to be passed to the extractors.
|
|
|
|
See "EXTRACTOR ARGUMENTS" for details.
|
|
|
|
Eg: {'youtube': {'skip': ['dash', 'hls']}}
|
2022-01-23 20:51:39 +01:00
|
|
|
mark_watched: Mark videos watched (even with --simulate). Only for YouTube
|
2021-06-25 16:05:41 +02:00
|
|
|
youtube_include_dash_manifest: Deprecated - Use extractor_args instead.
|
|
|
|
If True (default), DASH manifests and related
|
2021-03-01 00:48:37 +01:00
|
|
|
data will be downloaded and processed by extractor.
|
|
|
|
You can reduce network I/O by disabling it if you don't
|
|
|
|
care about DASH. (only for youtube)
|
2021-06-25 16:05:41 +02:00
|
|
|
youtube_include_hls_manifest: Deprecated - Use extractor_args instead.
|
|
|
|
If True (default), HLS manifests and related
|
2021-03-01 00:48:37 +01:00
|
|
|
data will be downloaded and processed by extractor.
|
|
|
|
You can reduce network I/O by disabling it if you don't
|
|
|
|
care about HLS. (only for youtube)
|
2013-06-18 22:14:21 +02:00
|
|
|
"""
|
|
|
|
|
2017-06-08 17:53:14 +02:00
|
|
|
_NUMERIC_FIELDS = set((
|
|
|
|
'width', 'height', 'tbr', 'abr', 'asr', 'vbr', 'fps', 'filesize', 'filesize_approx',
|
2021-09-17 20:23:55 +02:00
|
|
|
'timestamp', 'release_timestamp',
|
2017-06-08 17:53:14 +02:00
|
|
|
'duration', 'view_count', 'like_count', 'dislike_count', 'repost_count',
|
|
|
|
'average_rating', 'comment_count', 'age_limit',
|
|
|
|
'start_time', 'end_time',
|
|
|
|
'chapter_number', 'season_number', 'episode_number',
|
|
|
|
'track_number', 'disc_number', 'release_year',
|
|
|
|
))
|
|
|
|
|
2021-10-15 15:20:28 +02:00
|
|
|
_format_selection_exts = {
|
|
|
|
'audio': {'m4a', 'mp3', 'ogg', 'aac'},
|
|
|
|
'video': {'mp4', 'flv', 'webm', '3gp'},
|
|
|
|
'storyboards': {'mhtml'},
|
|
|
|
}
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
params = None
|
2021-08-23 01:56:45 +02:00
|
|
|
_ies = {}
|
2022-01-03 12:13:54 +01:00
|
|
|
_pps = {k: [] for k in POSTPROCESS_WHEN}
|
2021-07-21 14:36:34 +02:00
|
|
|
_printed_messages = set()
|
2021-02-27 13:41:23 +01:00
|
|
|
_first_webpage_request = True
|
2013-06-18 22:14:21 +02:00
|
|
|
_download_retcode = None
|
|
|
|
_num_downloads = None
|
2021-01-16 13:40:15 +01:00
|
|
|
_playlist_level = 0
|
|
|
|
_playlist_urls = set()
|
2013-06-18 22:14:21 +02:00
|
|
|
_screen_file = None
|
|
|
|
|
2014-10-28 12:54:29 +01:00
|
|
|
def __init__(self, params=None, auto_init=True):
|
2021-10-14 06:28:29 +02:00
|
|
|
"""Create a FileDownloader object with the given options.
|
|
|
|
@param auto_init Whether to load the default extractors and print header (if verbose).
|
2021-10-22 22:37:20 +02:00
|
|
|
Set to 'no_verbose_header' to not print the header
|
2021-10-14 06:28:29 +02:00
|
|
|
"""
|
2013-12-31 13:34:52 +01:00
|
|
|
if params is None:
|
|
|
|
params = {}
|
2021-08-23 01:56:45 +02:00
|
|
|
self._ies = {}
|
2013-07-08 15:14:27 +02:00
|
|
|
self._ies_instances = {}
|
2022-01-03 12:13:54 +01:00
|
|
|
self._pps = {k: [] for k in POSTPROCESS_WHEN}
|
2021-07-21 14:36:34 +02:00
|
|
|
self._printed_messages = set()
|
2021-02-27 13:41:23 +01:00
|
|
|
self._first_webpage_request = True
|
2020-12-29 16:03:07 +01:00
|
|
|
self._post_hooks = []
|
2013-12-23 10:37:27 +01:00
|
|
|
self._progress_hooks = []
|
2021-10-08 21:11:59 +02:00
|
|
|
self._postprocessor_hooks = []
|
2013-06-18 22:14:21 +02:00
|
|
|
self._download_retcode = 0
|
|
|
|
self._num_downloads = 0
|
2022-01-03 14:07:35 +01:00
|
|
|
self._num_videos = 0
|
2013-06-18 22:14:21 +02:00
|
|
|
self._screen_file = [sys.stdout, sys.stderr][params.get('logtostderr', False)]
|
2013-12-09 04:08:51 +01:00
|
|
|
self._err_file = sys.stderr
|
2021-10-08 21:11:59 +02:00
|
|
|
self.params = params
|
2014-09-03 12:41:05 +02:00
|
|
|
self.cache = Cache(self)
|
2013-09-21 11:48:07 +02:00
|
|
|
|
2021-10-08 21:11:59 +02:00
|
|
|
windows_enable_vt_mode()
|
2021-10-20 18:37:32 +02:00
|
|
|
self._allow_colors = {
|
|
|
|
'screen': not self.params.get('no_color') and supports_terminal_sequences(self._screen_file),
|
|
|
|
'err': not self.params.get('no_color') and supports_terminal_sequences(self._err_file),
|
|
|
|
}
|
2021-10-08 21:11:59 +02:00
|
|
|
|
2021-05-09 00:24:44 +02:00
|
|
|
if sys.version_info < (3, 6):
|
|
|
|
self.report_warning(
|
2021-06-05 21:17:18 +02:00
|
|
|
'Python version %d.%d is not supported! Please update to Python 3.6 or above' % sys.version_info[:2])
|
2021-05-09 00:24:44 +02:00
|
|
|
|
2021-08-22 22:08:38 +02:00
|
|
|
if self.params.get('allow_unplayable_formats'):
|
|
|
|
self.report_warning(
|
2021-10-20 18:37:32 +02:00
|
|
|
f'You have asked for {self._format_err("UNPLAYABLE", self.Styles.EMPHASIS)} formats to be listed/downloaded. '
|
2021-10-08 21:11:59 +02:00
|
|
|
'This is a developer option intended for debugging. \n'
|
|
|
|
' If you experience any issues while using this option, '
|
2021-10-20 18:37:32 +02:00
|
|
|
f'{self._format_err("DO NOT", self.Styles.ERROR)} open a bug report')
|
2021-08-22 22:08:38 +02:00
|
|
|
|
2017-02-24 00:04:27 +01:00
|
|
|
def check_deprecated(param, option, suggestion):
|
|
|
|
if self.params.get(param) is not None:
|
2021-05-11 10:00:48 +02:00
|
|
|
self.report_warning('%s is deprecated. Use %s instead' % (option, suggestion))
|
2017-02-24 00:04:27 +01:00
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
if check_deprecated('cn_verification_proxy', '--cn-verification-proxy', '--geo-verification-proxy'):
|
2016-07-03 17:23:48 +02:00
|
|
|
if self.params.get('geo_verification_proxy') is None:
|
|
|
|
self.params['geo_verification_proxy'] = self.params['cn_verification_proxy']
|
|
|
|
|
2021-05-03 11:41:59 +02:00
|
|
|
check_deprecated('autonumber', '--auto-number', '-o "%(autonumber)s-%(title)s.%(ext)s"')
|
|
|
|
check_deprecated('usetitle', '--title', '-o "%(title)s-%(id)s.%(ext)s"')
|
2021-05-11 10:00:48 +02:00
|
|
|
check_deprecated('useid', '--id', '-o "%(id)s.%(ext)s"')
|
2021-05-03 11:41:59 +02:00
|
|
|
|
2021-10-22 22:37:20 +02:00
|
|
|
for msg in self.params.get('_warnings', []):
|
2021-05-03 11:41:59 +02:00
|
|
|
self.report_warning(msg)
|
2021-11-29 18:46:06 +01:00
|
|
|
for msg in self.params.get('_deprecation_warnings', []):
|
|
|
|
self.deprecation_warning(msg)
|
2021-05-03 11:41:59 +02:00
|
|
|
|
2021-10-20 18:37:32 +02:00
|
|
|
if 'list-formats' in self.params.get('compat_opts', []):
|
|
|
|
self.params['listformats_table'] = False
|
|
|
|
|
2021-10-09 02:23:15 +02:00
|
|
|
if 'overwrites' not in self.params and self.params.get('nooverwrites') is not None:
|
2021-08-07 13:20:46 +02:00
|
|
|
# nooverwrites was unnecessarily changed to overwrites
|
|
|
|
# in 0c3d0f51778b153f65c21906031c2e091fcfb641
|
|
|
|
# This ensures compatibility with both keys
|
|
|
|
self.params['overwrites'] = not self.params['nooverwrites']
|
2021-10-09 02:23:15 +02:00
|
|
|
elif self.params.get('overwrites') is None:
|
|
|
|
self.params.pop('overwrites', None)
|
2021-08-07 13:20:46 +02:00
|
|
|
else:
|
|
|
|
self.params['nooverwrites'] = not self.params['overwrites']
|
2021-02-01 16:15:46 +01:00
|
|
|
|
2022-02-03 16:02:10 +01:00
|
|
|
self.params.setdefault('forceprint', {})
|
|
|
|
self.params.setdefault('print_to_file', {})
|
2022-01-23 22:34:19 +01:00
|
|
|
|
|
|
|
# Compatibility with older syntax
|
2022-01-02 11:52:00 +01:00
|
|
|
if not isinstance(params['forceprint'], dict):
|
2022-02-03 16:02:10 +01:00
|
|
|
self.params['forceprint'] = {'video': params['forceprint']}
|
2022-01-02 11:52:00 +01:00
|
|
|
|
2022-02-03 16:02:10 +01:00
|
|
|
if self.params.get('bidi_workaround', False):
|
2013-12-09 18:29:07 +01:00
|
|
|
try:
|
|
|
|
import pty
|
|
|
|
master, slave = pty.openpty()
|
2015-02-28 21:42:16 +01:00
|
|
|
width = compat_get_terminal_size().columns
|
2013-12-09 18:29:07 +01:00
|
|
|
if width is None:
|
|
|
|
width_args = []
|
|
|
|
else:
|
|
|
|
width_args = ['-w', str(width)]
|
2013-12-23 04:19:20 +01:00
|
|
|
sp_kwargs = dict(
|
2013-12-09 18:29:07 +01:00
|
|
|
stdin=subprocess.PIPE,
|
|
|
|
stdout=slave,
|
|
|
|
stderr=self._err_file)
|
2013-12-23 04:19:20 +01:00
|
|
|
try:
|
2021-10-20 18:19:40 +02:00
|
|
|
self._output_process = Popen(['bidiv'] + width_args, **sp_kwargs)
|
2013-12-23 04:19:20 +01:00
|
|
|
except OSError:
|
2021-10-20 18:19:40 +02:00
|
|
|
self._output_process = Popen(['fribidi', '-c', 'UTF-8'] + width_args, **sp_kwargs)
|
2013-12-23 04:19:20 +01:00
|
|
|
self._output_channel = os.fdopen(master, 'rb')
|
2013-12-09 18:29:07 +01:00
|
|
|
except OSError as ose:
|
2016-05-14 13:41:41 +02:00
|
|
|
if ose.errno == errno.ENOENT:
|
2021-10-22 22:37:20 +02:00
|
|
|
self.report_warning(
|
|
|
|
'Could not find fribidi executable, ignoring --bidi-workaround. '
|
|
|
|
'Make sure that fribidi is an executable file in one of the directories in your $PATH.')
|
2013-12-09 18:29:07 +01:00
|
|
|
else:
|
|
|
|
raise
|
2013-12-09 04:08:51 +01:00
|
|
|
|
2019-05-10 22:56:22 +02:00
|
|
|
if (sys.platform != 'win32'
|
|
|
|
and sys.getfilesystemencoding() in ['ascii', 'ANSI_X3.4-1968']
|
2022-02-03 16:02:10 +01:00
|
|
|
and not self.params.get('restrictfilenames', False)):
|
2017-05-08 20:14:02 +02:00
|
|
|
# Unicode filesystem API will throw errors (#1474, #13027)
|
2013-09-21 11:48:07 +02:00
|
|
|
self.report_warning(
|
2014-01-05 01:52:03 +01:00
|
|
|
'Assuming --restrict-filenames since file system encoding '
|
2014-10-09 17:00:24 +02:00
|
|
|
'cannot encode all characters. '
|
2014-01-05 01:52:03 +01:00
|
|
|
'Set the LC_ALL environment variable to fix this.')
|
2013-11-26 18:53:36 +01:00
|
|
|
self.params['restrictfilenames'] = True
|
2013-09-21 11:48:07 +02:00
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
self.outtmpl_dict = self.parse_outtmpl()
|
2015-03-13 08:40:20 +01:00
|
|
|
|
2021-06-11 15:43:22 +02:00
|
|
|
# Creating format selector here allows us to catch syntax errors before the extraction
|
|
|
|
self.format_selector = (
|
2021-12-21 12:32:13 +01:00
|
|
|
self.params.get('format') if self.params.get('format') in (None, '-')
|
2021-11-10 17:11:41 +01:00
|
|
|
else self.params['format'] if callable(self.params['format'])
|
2021-06-11 15:43:22 +02:00
|
|
|
else self.build_format_selector(self.params['format']))
|
|
|
|
|
2013-11-22 19:57:52 +01:00
|
|
|
self._setup_opener()
|
|
|
|
|
2014-10-28 12:54:29 +01:00
|
|
|
if auto_init:
|
2021-10-14 06:28:29 +02:00
|
|
|
if auto_init != 'no_verbose_header':
|
|
|
|
self.print_debug_header()
|
2014-10-28 12:54:29 +01:00
|
|
|
self.add_default_info_extractors()
|
|
|
|
|
2021-11-15 00:20:11 +01:00
|
|
|
hooks = {
|
|
|
|
'post_hooks': self.add_post_hook,
|
|
|
|
'progress_hooks': self.add_progress_hook,
|
|
|
|
'postprocessor_hooks': self.add_postprocessor_hook,
|
|
|
|
}
|
|
|
|
for opt, fn in hooks.items():
|
|
|
|
for ph in self.params.get(opt, []):
|
|
|
|
fn(ph)
|
2014-12-15 01:26:18 +01:00
|
|
|
|
2021-12-14 16:38:24 +01:00
|
|
|
for pp_def_raw in self.params.get('postprocessors', []):
|
|
|
|
pp_def = dict(pp_def_raw)
|
|
|
|
when = pp_def.pop('when', 'post_process')
|
|
|
|
self.add_post_processor(
|
|
|
|
get_postprocessor(pp_def.pop('key'))(self, **compat_kwargs(pp_def)),
|
|
|
|
when=when)
|
|
|
|
|
2016-05-03 09:15:32 +02:00
|
|
|
register_socks_protocols()
|
|
|
|
|
2021-10-11 00:30:52 +02:00
|
|
|
def preload_download_archive(fn):
|
|
|
|
"""Preload the archive, if any is specified"""
|
|
|
|
if fn is None:
|
|
|
|
return False
|
2021-10-22 22:37:20 +02:00
|
|
|
self.write_debug(f'Loading archive file {fn!r}')
|
2021-10-11 00:30:52 +02:00
|
|
|
try:
|
|
|
|
with locked_file(fn, 'r', encoding='utf-8') as archive_file:
|
|
|
|
for line in archive_file:
|
|
|
|
self.archive.add(line.strip())
|
|
|
|
except IOError as ioe:
|
|
|
|
if ioe.errno != errno.ENOENT:
|
|
|
|
raise
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
self.archive = set()
|
|
|
|
preload_download_archive(self.params.get('download_archive'))
|
|
|
|
|
2014-11-23 10:49:19 +01:00
|
|
|
def warn_if_short_id(self, argv):
|
|
|
|
# short YouTube ID starting with dash?
|
|
|
|
idxs = [
|
|
|
|
i for i, a in enumerate(argv)
|
|
|
|
if re.match(r'^-[0-9A-Za-z_-]{10}$', a)]
|
|
|
|
if idxs:
|
|
|
|
correct_argv = (
|
2021-02-24 19:45:56 +01:00
|
|
|
['yt-dlp']
|
2019-05-10 22:56:22 +02:00
|
|
|
+ [a for i, a in enumerate(argv) if i not in idxs]
|
|
|
|
+ ['--'] + [argv[i] for i in idxs]
|
2014-11-23 10:49:19 +01:00
|
|
|
)
|
|
|
|
self.report_warning(
|
|
|
|
'Long argument string detected. '
|
2021-10-22 22:37:20 +02:00
|
|
|
'Use -- to separate parameters and URLs, like this:\n%s' %
|
2014-11-23 10:49:19 +01:00
|
|
|
args_to_str(correct_argv))
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
def add_info_extractor(self, ie):
|
|
|
|
"""Add an InfoExtractor object to the end of the list."""
|
2021-08-23 01:56:45 +02:00
|
|
|
ie_key = ie.ie_key()
|
|
|
|
self._ies[ie_key] = ie
|
2016-02-10 13:16:18 +01:00
|
|
|
if not isinstance(ie, type):
|
2021-08-23 01:56:45 +02:00
|
|
|
self._ies_instances[ie_key] = ie
|
2016-02-10 13:16:18 +01:00
|
|
|
ie.set_downloader(self)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-08-23 01:56:45 +02:00
|
|
|
def _get_info_extractor_class(self, ie_key):
|
|
|
|
ie = self._ies.get(ie_key)
|
|
|
|
if ie is None:
|
|
|
|
ie = get_info_extractor(ie_key)
|
|
|
|
self.add_info_extractor(ie)
|
|
|
|
return ie
|
|
|
|
|
2013-07-08 15:14:27 +02:00
|
|
|
def get_info_extractor(self, ie_key):
|
|
|
|
"""
|
|
|
|
Get an instance of an IE with name ie_key, it will try to get one from
|
|
|
|
the _ies list, if there's no instance it will create a new one and add
|
|
|
|
it to the extractor list.
|
|
|
|
"""
|
|
|
|
ie = self._ies_instances.get(ie_key)
|
|
|
|
if ie is None:
|
|
|
|
ie = get_info_extractor(ie_key)()
|
|
|
|
self.add_info_extractor(ie)
|
|
|
|
return ie
|
|
|
|
|
2013-06-27 23:51:06 +02:00
|
|
|
def add_default_info_extractors(self):
|
|
|
|
"""
|
|
|
|
Add the InfoExtractors returned by gen_extractors to the end of the list
|
|
|
|
"""
|
2016-02-10 13:16:18 +01:00
|
|
|
for ie in gen_extractor_classes():
|
2013-06-27 23:51:06 +02:00
|
|
|
self.add_info_extractor(ie)
|
|
|
|
|
2021-04-11 00:18:07 +02:00
|
|
|
def add_post_processor(self, pp, when='post_process'):
|
2013-06-18 22:14:21 +02:00
|
|
|
"""Add a PostProcessor object to the end of the chain."""
|
2021-01-26 11:20:20 +01:00
|
|
|
self._pps[when].append(pp)
|
2013-06-18 22:14:21 +02:00
|
|
|
pp.set_downloader(self)
|
|
|
|
|
2020-12-29 16:03:07 +01:00
|
|
|
def add_post_hook(self, ph):
|
|
|
|
"""Add the post hook"""
|
|
|
|
self._post_hooks.append(ph)
|
|
|
|
|
2013-12-23 10:37:27 +01:00
|
|
|
def add_progress_hook(self, ph):
|
2021-10-08 21:11:59 +02:00
|
|
|
"""Add the download progress hook"""
|
2013-12-23 10:37:27 +01:00
|
|
|
self._progress_hooks.append(ph)
|
2013-09-23 18:09:28 +02:00
|
|
|
|
2021-10-08 21:11:59 +02:00
|
|
|
def add_postprocessor_hook(self, ph):
|
|
|
|
"""Add the postprocessing progress hook"""
|
|
|
|
self._postprocessor_hooks.append(ph)
|
2021-12-14 16:38:24 +01:00
|
|
|
for pps in self._pps.values():
|
|
|
|
for pp in pps:
|
|
|
|
pp.add_progress_hook(ph)
|
2021-10-08 21:11:59 +02:00
|
|
|
|
2013-12-09 18:29:07 +01:00
|
|
|
def _bidi_workaround(self, message):
|
2013-12-23 04:19:20 +01:00
|
|
|
if not hasattr(self, '_output_channel'):
|
2013-12-09 18:29:07 +01:00
|
|
|
return message
|
|
|
|
|
2013-12-23 04:19:20 +01:00
|
|
|
assert hasattr(self, '_output_process')
|
2014-07-25 23:37:32 +02:00
|
|
|
assert isinstance(message, compat_str)
|
2014-01-05 01:52:03 +01:00
|
|
|
line_count = message.count('\n') + 1
|
|
|
|
self._output_process.stdin.write((message + '\n').encode('utf-8'))
|
2013-12-23 04:19:20 +01:00
|
|
|
self._output_process.stdin.flush()
|
2014-01-05 01:52:03 +01:00
|
|
|
res = ''.join(self._output_channel.readline().decode('utf-8')
|
2014-11-23 21:39:15 +01:00
|
|
|
for _ in range(line_count))
|
2014-01-05 01:52:03 +01:00
|
|
|
return res[:-len('\n')]
|
2013-12-09 18:29:07 +01:00
|
|
|
|
2021-07-21 14:36:34 +02:00
|
|
|
def _write_string(self, message, out=None, only_once=False):
|
|
|
|
if only_once:
|
|
|
|
if message in self._printed_messages:
|
|
|
|
return
|
|
|
|
self._printed_messages.add(message)
|
|
|
|
write_string(message, out=out, encoding=self.params.get('encoding'))
|
2014-04-07 19:57:42 +02:00
|
|
|
|
2021-05-04 17:39:36 +02:00
|
|
|
def to_stdout(self, message, skip_eol=False, quiet=False):
|
2021-05-14 09:45:29 +02:00
|
|
|
"""Print message to stdout"""
|
2013-11-24 06:08:11 +01:00
|
|
|
if self.params.get('logger'):
|
2013-11-23 09:22:18 +01:00
|
|
|
self.params['logger'].debug(message)
|
2021-05-28 23:01:10 +02:00
|
|
|
elif not quiet or self.params.get('verbose'):
|
|
|
|
self._write_string(
|
|
|
|
'%s%s' % (self._bidi_workaround(message), ('' if skip_eol else '\n')),
|
|
|
|
self._err_file if quiet else self._screen_file)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-07-21 14:36:34 +02:00
|
|
|
def to_stderr(self, message, only_once=False):
|
2021-05-14 09:45:29 +02:00
|
|
|
"""Print message to stderr"""
|
2014-07-25 23:37:32 +02:00
|
|
|
assert isinstance(message, compat_str)
|
2013-11-24 06:08:11 +01:00
|
|
|
if self.params.get('logger'):
|
2013-11-23 09:22:18 +01:00
|
|
|
self.params['logger'].error(message)
|
|
|
|
else:
|
2021-07-21 14:36:34 +02:00
|
|
|
self._write_string('%s\n' % self._bidi_workaround(message), self._err_file, only_once=only_once)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2013-11-17 11:39:52 +01:00
|
|
|
def to_console_title(self, message):
|
|
|
|
if not self.params.get('consoletitle', False):
|
|
|
|
return
|
2021-12-01 18:09:57 +01:00
|
|
|
message = remove_terminal_sequences(message)
|
2017-06-03 14:14:23 +02:00
|
|
|
if compat_os_name == 'nt':
|
|
|
|
if ctypes.windll.kernel32.GetConsoleWindow():
|
|
|
|
# c_wchar_p() might not be necessary if `message` is
|
|
|
|
# already of type unicode()
|
|
|
|
ctypes.windll.kernel32.SetConsoleTitleW(ctypes.c_wchar_p(message))
|
2013-11-17 11:39:52 +01:00
|
|
|
elif 'TERM' in os.environ:
|
2021-01-21 20:39:24 +01:00
|
|
|
self._write_string('\033]0;%s\007' % message, self._screen_file)
|
2013-11-17 11:39:52 +01:00
|
|
|
|
2013-11-17 21:05:14 +01:00
|
|
|
def save_console_title(self):
|
|
|
|
if not self.params.get('consoletitle', False):
|
|
|
|
return
|
2021-08-07 02:01:51 +02:00
|
|
|
if self.params.get('simulate'):
|
2018-04-08 20:03:55 +02:00
|
|
|
return
|
2017-06-03 14:14:23 +02:00
|
|
|
if compat_os_name != 'nt' and 'TERM' in os.environ:
|
2013-11-18 16:35:41 +01:00
|
|
|
# Save the title on stack
|
2014-04-07 19:57:42 +02:00
|
|
|
self._write_string('\033[22;0t', self._screen_file)
|
2013-11-17 21:05:14 +01:00
|
|
|
|
|
|
|
def restore_console_title(self):
|
|
|
|
if not self.params.get('consoletitle', False):
|
|
|
|
return
|
2021-08-07 02:01:51 +02:00
|
|
|
if self.params.get('simulate'):
|
2018-04-08 20:03:55 +02:00
|
|
|
return
|
2017-06-03 14:14:23 +02:00
|
|
|
if compat_os_name != 'nt' and 'TERM' in os.environ:
|
2013-11-18 16:35:41 +01:00
|
|
|
# Restore the title from stack
|
2014-04-07 19:57:42 +02:00
|
|
|
self._write_string('\033[23;0t', self._screen_file)
|
2013-11-17 21:05:14 +01:00
|
|
|
|
|
|
|
def __enter__(self):
|
|
|
|
self.save_console_title()
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __exit__(self, *args):
|
|
|
|
self.restore_console_title()
|
2014-01-25 12:02:43 +01:00
|
|
|
|
2013-11-22 19:57:52 +01:00
|
|
|
if self.params.get('cookiefile') is not None:
|
2018-12-09 00:00:32 +01:00
|
|
|
self.cookiejar.save(ignore_discard=True, ignore_expires=True)
|
2013-11-17 21:05:14 +01:00
|
|
|
|
2021-12-21 12:32:13 +01:00
|
|
|
def trouble(self, message=None, tb=None, is_error=True):
|
2013-06-18 22:14:21 +02:00
|
|
|
"""Determine action to take when a download problem appears.
|
|
|
|
|
|
|
|
Depending on if the downloader has been configured to ignore
|
|
|
|
download errors or not, this method may throw an exception or
|
|
|
|
not when errors are found, after printing the message.
|
|
|
|
|
2021-12-21 12:32:13 +01:00
|
|
|
@param tb If given, is additional traceback information
|
|
|
|
@param is_error Whether to raise error according to ignorerrors
|
2013-06-18 22:14:21 +02:00
|
|
|
"""
|
|
|
|
if message is not None:
|
|
|
|
self.to_stderr(message)
|
|
|
|
if self.params.get('verbose'):
|
|
|
|
if tb is None:
|
|
|
|
if sys.exc_info()[0]: # if .trouble has been called from an except block
|
2014-01-05 01:52:03 +01:00
|
|
|
tb = ''
|
2013-06-18 22:14:21 +02:00
|
|
|
if hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
2014-01-05 01:52:03 +01:00
|
|
|
tb += ''.join(traceback.format_exception(*sys.exc_info()[1].exc_info))
|
2015-12-20 01:29:36 +01:00
|
|
|
tb += encode_compat_str(traceback.format_exc())
|
2013-06-18 22:14:21 +02:00
|
|
|
else:
|
|
|
|
tb_data = traceback.format_list(traceback.extract_stack())
|
2014-01-05 01:52:03 +01:00
|
|
|
tb = ''.join(tb_data)
|
2021-05-25 21:43:08 +02:00
|
|
|
if tb:
|
|
|
|
self.to_stderr(tb)
|
2021-12-21 12:32:13 +01:00
|
|
|
if not is_error:
|
|
|
|
return
|
2021-09-24 02:21:54 +02:00
|
|
|
if not self.params.get('ignoreerrors'):
|
2013-06-18 22:14:21 +02:00
|
|
|
if sys.exc_info()[0] and hasattr(sys.exc_info()[1], 'exc_info') and sys.exc_info()[1].exc_info[0]:
|
|
|
|
exc_info = sys.exc_info()[1].exc_info
|
|
|
|
else:
|
|
|
|
exc_info = sys.exc_info()
|
|
|
|
raise DownloadError(message, exc_info)
|
|
|
|
self._download_retcode = 1
|
|
|
|
|
2021-05-14 09:45:29 +02:00
|
|
|
def to_screen(self, message, skip_eol=False):
|
|
|
|
"""Print message to stdout if not in quiet mode"""
|
|
|
|
self.to_stdout(
|
|
|
|
message, skip_eol, quiet=self.params.get('quiet', False))
|
|
|
|
|
2021-10-20 18:37:32 +02:00
|
|
|
class Styles(Enum):
|
|
|
|
HEADERS = 'yellow'
|
2021-11-28 22:22:52 +01:00
|
|
|
EMPHASIS = 'light blue'
|
2021-10-20 18:37:32 +02:00
|
|
|
ID = 'green'
|
|
|
|
DELIM = 'blue'
|
|
|
|
ERROR = 'red'
|
|
|
|
WARNING = 'yellow'
|
2021-11-23 16:08:30 +01:00
|
|
|
SUPPRESS = 'light black'
|
2021-10-20 18:37:32 +02:00
|
|
|
|
2021-11-28 22:25:37 +01:00
|
|
|
def _format_text(self, handle, allow_colors, text, f, fallback=None, *, test_encoding=False):
|
2021-10-20 18:37:32 +02:00
|
|
|
if test_encoding:
|
|
|
|
original_text = text
|
2022-02-17 14:51:59 +01:00
|
|
|
# handle.encoding can be None. See https://github.com/yt-dlp/yt-dlp/issues/2711
|
|
|
|
encoding = self.params.get('encoding') or getattr(handle, 'encoding', None) or 'ascii'
|
2021-10-20 18:37:32 +02:00
|
|
|
text = text.encode(encoding, 'ignore').decode(encoding)
|
|
|
|
if fallback is not None and text != original_text:
|
|
|
|
text = fallback
|
|
|
|
if isinstance(f, self.Styles):
|
2021-11-28 22:22:52 +01:00
|
|
|
f = f.value
|
2021-11-28 22:25:37 +01:00
|
|
|
return format_text(text, f) if allow_colors else text if fallback is None else fallback
|
2021-10-20 18:37:32 +02:00
|
|
|
|
|
|
|
def _format_screen(self, *args, **kwargs):
|
2021-11-28 22:25:37 +01:00
|
|
|
return self._format_text(
|
|
|
|
self._screen_file, self._allow_colors['screen'], *args, **kwargs)
|
2021-10-20 18:37:32 +02:00
|
|
|
|
|
|
|
def _format_err(self, *args, **kwargs):
|
2021-11-28 22:25:37 +01:00
|
|
|
return self._format_text(
|
|
|
|
self._err_file, self._allow_colors['err'], *args, **kwargs)
|
2021-10-08 21:11:59 +02:00
|
|
|
|
2021-07-20 22:05:35 +02:00
|
|
|
def report_warning(self, message, only_once=False):
|
2013-06-18 22:14:21 +02:00
|
|
|
'''
|
|
|
|
Print the message to stderr, it will be prefixed with 'WARNING:'
|
|
|
|
If stderr is a tty file the 'WARNING:' will be colored
|
|
|
|
'''
|
2014-03-09 14:53:07 +01:00
|
|
|
if self.params.get('logger') is not None:
|
|
|
|
self.params['logger'].warning(message)
|
2013-06-18 22:14:21 +02:00
|
|
|
else:
|
2014-03-26 00:43:46 +01:00
|
|
|
if self.params.get('no_warnings'):
|
|
|
|
return
|
2021-10-20 18:37:32 +02:00
|
|
|
self.to_stderr(f'{self._format_err("WARNING:", self.Styles.WARNING)} {message}', only_once)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-11-29 18:46:06 +01:00
|
|
|
def deprecation_warning(self, message):
|
|
|
|
if self.params.get('logger') is not None:
|
|
|
|
self.params['logger'].warning('DeprecationWarning: {message}')
|
|
|
|
else:
|
|
|
|
self.to_stderr(f'{self._format_err("DeprecationWarning:", self.Styles.ERROR)} {message}', True)
|
|
|
|
|
2021-12-21 12:32:13 +01:00
|
|
|
def report_error(self, message, *args, **kwargs):
|
2013-06-18 22:14:21 +02:00
|
|
|
'''
|
|
|
|
Do the same as trouble, but prefixes the message with 'ERROR:', colored
|
|
|
|
in red if stderr is a tty file.
|
|
|
|
'''
|
2021-12-21 12:32:13 +01:00
|
|
|
self.trouble(f'{self._format_err("ERROR:", self.Styles.ERROR)} {message}', *args, **kwargs)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-07-21 14:36:34 +02:00
|
|
|
def write_debug(self, message, only_once=False):
|
2021-05-14 09:45:29 +02:00
|
|
|
'''Log debug message or Print message to stderr'''
|
|
|
|
if not self.params.get('verbose', False):
|
|
|
|
return
|
|
|
|
message = '[debug] %s' % message
|
|
|
|
if self.params.get('logger'):
|
|
|
|
self.params['logger'].debug(message)
|
|
|
|
else:
|
2021-07-21 14:36:34 +02:00
|
|
|
self.to_stderr(message, only_once)
|
2021-05-14 09:45:29 +02:00
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
def report_file_already_downloaded(self, file_name):
|
|
|
|
"""Report file has already been fully downloaded."""
|
|
|
|
try:
|
2014-01-05 01:52:03 +01:00
|
|
|
self.to_screen('[download] %s has already been downloaded' % file_name)
|
2013-11-17 16:47:52 +01:00
|
|
|
except UnicodeEncodeError:
|
2014-01-05 01:52:03 +01:00
|
|
|
self.to_screen('[download] The file has already been downloaded')
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2019-10-13 18:00:48 +02:00
|
|
|
def report_file_delete(self, file_name):
|
|
|
|
"""Report that existing file will be deleted."""
|
|
|
|
try:
|
2021-02-04 23:53:04 +01:00
|
|
|
self.to_screen('Deleting existing file %s' % file_name)
|
2019-10-13 18:00:48 +02:00
|
|
|
except UnicodeEncodeError:
|
2021-02-04 23:53:04 +01:00
|
|
|
self.to_screen('Deleting existing file')
|
2019-10-13 18:00:48 +02:00
|
|
|
|
2021-08-19 03:49:23 +02:00
|
|
|
def raise_no_formats(self, info, forced=False):
|
|
|
|
has_drm = info.get('__has_drm')
|
2021-08-22 22:08:38 +02:00
|
|
|
msg = 'This video is DRM protected' if has_drm else 'No video formats found!'
|
|
|
|
expected = self.params.get('ignore_no_formats_error')
|
|
|
|
if forced or not expected:
|
2021-08-19 03:49:23 +02:00
|
|
|
raise ExtractorError(msg, video_id=info['id'], ie=info['extractor'],
|
|
|
|
expected=has_drm or expected)
|
2021-08-22 22:08:38 +02:00
|
|
|
else:
|
|
|
|
self.report_warning(msg)
|
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
def parse_outtmpl(self):
|
|
|
|
outtmpl_dict = self.params.get('outtmpl', {})
|
|
|
|
if not isinstance(outtmpl_dict, dict):
|
|
|
|
outtmpl_dict = {'default': outtmpl_dict}
|
2021-10-16 21:33:04 +02:00
|
|
|
# Remove spaces in the default template
|
|
|
|
if self.params.get('restrictfilenames'):
|
|
|
|
sanitize = lambda x: x.replace(' - ', ' ').replace(' ', '-')
|
|
|
|
else:
|
|
|
|
sanitize = lambda x: x
|
2021-02-03 14:36:09 +01:00
|
|
|
outtmpl_dict.update({
|
2021-10-16 21:33:04 +02:00
|
|
|
k: sanitize(v) for k, v in DEFAULT_OUTTMPL.items()
|
2021-09-29 22:44:42 +02:00
|
|
|
if outtmpl_dict.get(k) is None})
|
2021-02-03 14:36:09 +01:00
|
|
|
for key, val in outtmpl_dict.items():
|
|
|
|
if isinstance(val, bytes):
|
|
|
|
self.report_warning(
|
|
|
|
'Parameter outtmpl is bytes, but should be a unicode string. '
|
|
|
|
'Put from __future__ import unicode_literals at the top of your code file or consider switching to Python 3.x.')
|
|
|
|
return outtmpl_dict
|
|
|
|
|
2021-06-12 17:18:06 +02:00
|
|
|
def get_output_path(self, dir_type='', filename=None):
|
|
|
|
paths = self.params.get('paths', {})
|
|
|
|
assert isinstance(paths, dict)
|
|
|
|
path = os.path.join(
|
|
|
|
expand_path(paths.get('home', '').strip()),
|
|
|
|
expand_path(paths.get(dir_type, '').strip()) if dir_type else '',
|
|
|
|
filename or '')
|
|
|
|
|
|
|
|
# Temporary fix for #4787
|
|
|
|
# 'Treat' all problem characters by passing filename through preferredencoding
|
|
|
|
# to workaround encoding issues with subprocess on python2 @ Windows
|
|
|
|
if sys.version_info < (3, 0) and sys.platform == 'win32':
|
|
|
|
path = encodeFilename(path, True).decode(preferredencoding())
|
|
|
|
return sanitize_path(path, force=self.params.get('windowsfilenames'))
|
|
|
|
|
2021-06-08 16:41:00 +02:00
|
|
|
@staticmethod
|
2021-07-29 01:49:26 +02:00
|
|
|
def _outtmpl_expandpath(outtmpl):
|
|
|
|
# expand_path translates '%%' into '%' and '$$' into '$'
|
|
|
|
# correspondingly that is not what we want since we need to keep
|
|
|
|
# '%%' intact for template dict substitution step. Working around
|
|
|
|
# with boundary-alike separator hack.
|
|
|
|
sep = ''.join([random.choice(ascii_letters) for _ in range(32)])
|
|
|
|
outtmpl = outtmpl.replace('%%', '%{0}%'.format(sep)).replace('$$', '${0}$'.format(sep))
|
|
|
|
|
|
|
|
# outtmpl should be expand_path'ed before template dict substitution
|
|
|
|
# because meta fields may contain env variables we don't want to
|
|
|
|
# be expanded. For example, for outtmpl "%(title)s.%(ext)s" and
|
|
|
|
# title "Hello $PATH", we don't want `$PATH` to be expanded.
|
|
|
|
return expand_path(outtmpl).replace(sep, '')
|
|
|
|
|
|
|
|
@staticmethod
|
|
|
|
def escape_outtmpl(outtmpl):
|
|
|
|
''' Escape any remaining strings like %s, %abc% etc. '''
|
|
|
|
return re.sub(
|
|
|
|
STR_FORMAT_RE_TMPL.format('', '(?![%(\0])'),
|
|
|
|
lambda mobj: ('' if mobj.group('has_key') else '%') + mobj.group(0),
|
|
|
|
outtmpl)
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def validate_outtmpl(cls, outtmpl):
|
2021-06-08 16:41:00 +02:00
|
|
|
''' @return None or Exception object '''
|
2021-07-29 04:56:17 +02:00
|
|
|
outtmpl = re.sub(
|
2021-12-23 04:33:46 +01:00
|
|
|
STR_FORMAT_RE_TMPL.format('[^)]*', '[ljqBUDS]'),
|
2021-07-29 04:56:17 +02:00
|
|
|
lambda mobj: f'{mobj.group(0)[:-1]}s',
|
|
|
|
cls._outtmpl_expandpath(outtmpl))
|
2021-06-08 16:41:00 +02:00
|
|
|
try:
|
2021-07-29 04:56:17 +02:00
|
|
|
cls.escape_outtmpl(outtmpl) % collections.defaultdict(int)
|
2021-06-08 16:41:00 +02:00
|
|
|
return None
|
|
|
|
except ValueError as err:
|
|
|
|
return err
|
|
|
|
|
2021-10-16 15:01:00 +02:00
|
|
|
@staticmethod
|
|
|
|
def _copy_infodict(info_dict):
|
|
|
|
info_dict = dict(info_dict)
|
2022-02-22 12:43:30 +01:00
|
|
|
info_dict.pop('__postprocessors', None)
|
2021-10-16 15:01:00 +02:00
|
|
|
return info_dict
|
|
|
|
|
2021-12-23 02:14:42 +01:00
|
|
|
def prepare_outtmpl(self, outtmpl, info_dict, sanitize=False):
|
|
|
|
""" Make the outtmpl and info_dict suitable for substitution: ydl.escape_outtmpl(outtmpl) % info_dict
|
|
|
|
@param sanitize Whether to sanitize the output as a filename.
|
|
|
|
For backward compatibility, a function can also be passed
|
|
|
|
"""
|
|
|
|
|
2021-08-07 17:46:55 +02:00
|
|
|
info_dict.setdefault('epoch', int(time.time())) # keep epoch consistent once set
|
2021-03-24 23:02:15 +01:00
|
|
|
|
2021-10-16 15:01:00 +02:00
|
|
|
info_dict = self._copy_infodict(info_dict)
|
2021-06-03 20:00:38 +02:00
|
|
|
info_dict['duration_string'] = ( # %(duration>%H-%M-%S)s is wrong if duration > 24hrs
|
2021-05-14 09:44:38 +02:00
|
|
|
formatSeconds(info_dict['duration'], '-' if sanitize else ':')
|
2021-03-24 23:02:15 +01:00
|
|
|
if info_dict.get('duration', None) is not None
|
|
|
|
else None)
|
2021-06-03 20:00:38 +02:00
|
|
|
info_dict['autonumber'] = self.params.get('autonumber_start', 1) - 1 + self._num_downloads
|
2022-01-03 14:07:35 +01:00
|
|
|
info_dict['video_autonumber'] = self._num_videos
|
2021-06-03 20:00:38 +02:00
|
|
|
if info_dict.get('resolution') is None:
|
|
|
|
info_dict['resolution'] = self.format_resolution(info_dict, default=None)
|
2021-03-24 23:02:15 +01:00
|
|
|
|
2021-09-17 20:23:55 +02:00
|
|
|
# For fields playlist_index, playlist_autonumber and autonumber convert all occurrences
|
2021-03-24 23:02:15 +01:00
|
|
|
# of %(field)s to %(field)0Nd for backward compatibility
|
|
|
|
field_size_compat_map = {
|
2021-10-20 18:37:32 +02:00
|
|
|
'playlist_index': number_of_digits(info_dict.get('_last_playlist_index') or 0),
|
|
|
|
'playlist_autonumber': number_of_digits(info_dict.get('n_entries') or 0),
|
2021-06-03 20:00:38 +02:00
|
|
|
'autonumber': self.params.get('autonumber_size') or 5,
|
2021-03-24 23:02:15 +01:00
|
|
|
}
|
2021-06-03 20:00:38 +02:00
|
|
|
|
2021-06-09 16:17:50 +02:00
|
|
|
TMPL_DICT = {}
|
2021-12-23 04:33:46 +01:00
|
|
|
EXTERNAL_FORMAT_RE = re.compile(STR_FORMAT_RE_TMPL.format('[^)]*', f'[{STR_FORMAT_TYPES}ljqBUDS]'))
|
2021-06-09 16:17:50 +02:00
|
|
|
MATH_FUNCTIONS = {
|
|
|
|
'+': float.__add__,
|
|
|
|
'-': float.__sub__,
|
|
|
|
}
|
2021-05-03 19:06:03 +02:00
|
|
|
# Field is of the form key1.key2...
|
|
|
|
# where keys (except first) can be string, int or slice
|
2021-08-07 01:42:54 +02:00
|
|
|
FIELD_RE = r'\w*(?:\.(?:\w+|{num}|{num}?(?::{num}?){{1,2}}))*'.format(num=r'(?:-?\d+)')
|
2021-12-23 02:14:42 +01:00
|
|
|
MATH_FIELD_RE = r'''(?:{field}|{num})'''.format(field=FIELD_RE, num=r'-?\d+(?:.\d+)?')
|
2021-06-09 16:17:50 +02:00
|
|
|
MATH_OPERATORS_RE = r'(?:%s)' % '|'.join(map(re.escape, MATH_FUNCTIONS.keys()))
|
2021-05-03 19:06:03 +02:00
|
|
|
INTERNAL_FORMAT_RE = re.compile(r'''(?x)
|
|
|
|
(?P<negate>-)?
|
2021-06-09 16:17:50 +02:00
|
|
|
(?P<fields>{field})
|
|
|
|
(?P<maths>(?:{math_op}{math_field})*)
|
2021-05-03 19:06:03 +02:00
|
|
|
(?:>(?P<strf_format>.+?))?
|
2021-12-17 21:35:48 +01:00
|
|
|
(?P<alternate>(?<!\\),[^|&)]+)?
|
|
|
|
(?:&(?P<replacement>.*?))?
|
2021-05-03 19:06:03 +02:00
|
|
|
(?:\|(?P<default>.*?))?
|
2021-06-09 16:17:50 +02:00
|
|
|
$'''.format(field=FIELD_RE, math_op=MATH_OPERATORS_RE, math_field=MATH_FIELD_RE))
|
2021-06-03 20:00:38 +02:00
|
|
|
|
2021-08-07 01:42:54 +02:00
|
|
|
def _traverse_infodict(k):
|
|
|
|
k = k.split('.')
|
|
|
|
if k[0] == '':
|
|
|
|
k.pop(0)
|
|
|
|
return traverse_obj(info_dict, k, is_user_input=True, traverse_string=True)
|
2021-06-08 16:41:00 +02:00
|
|
|
|
2021-06-03 20:00:38 +02:00
|
|
|
def get_value(mdict):
|
|
|
|
# Object traversal
|
2021-08-07 01:42:54 +02:00
|
|
|
value = _traverse_infodict(mdict['fields'])
|
2021-06-03 20:00:38 +02:00
|
|
|
# Negative
|
|
|
|
if mdict['negate']:
|
|
|
|
value = float_or_none(value)
|
|
|
|
if value is not None:
|
|
|
|
value *= -1
|
|
|
|
# Do maths
|
2021-06-09 16:17:50 +02:00
|
|
|
offset_key = mdict['maths']
|
|
|
|
if offset_key:
|
2021-06-03 20:00:38 +02:00
|
|
|
value = float_or_none(value)
|
|
|
|
operator = None
|
2021-06-09 16:17:50 +02:00
|
|
|
while offset_key:
|
|
|
|
item = re.match(
|
|
|
|
MATH_FIELD_RE if operator else MATH_OPERATORS_RE,
|
|
|
|
offset_key).group(0)
|
|
|
|
offset_key = offset_key[len(item):]
|
|
|
|
if operator is None:
|
2021-06-03 20:00:38 +02:00
|
|
|
operator = MATH_FUNCTIONS[item]
|
2021-06-09 16:17:50 +02:00
|
|
|
continue
|
|
|
|
item, multiplier = (item[1:], -1) if item[0] == '-' else (item, 1)
|
|
|
|
offset = float_or_none(item)
|
|
|
|
if offset is None:
|
2021-08-07 01:42:54 +02:00
|
|
|
offset = float_or_none(_traverse_infodict(item))
|
2021-06-09 16:17:50 +02:00
|
|
|
try:
|
|
|
|
value = operator(value, multiplier * offset)
|
|
|
|
except (TypeError, ZeroDivisionError):
|
|
|
|
return None
|
|
|
|
operator = None
|
2021-06-03 20:00:38 +02:00
|
|
|
# Datetime formatting
|
|
|
|
if mdict['strf_format']:
|
2021-09-18 12:51:38 +02:00
|
|
|
value = strftime_or_none(value, mdict['strf_format'].replace('\\,', ','))
|
2021-06-03 20:00:38 +02:00
|
|
|
|
|
|
|
return value
|
|
|
|
|
2021-08-07 13:20:46 +02:00
|
|
|
na = self.params.get('outtmpl_na_placeholder', 'NA')
|
|
|
|
|
2021-12-23 02:14:42 +01:00
|
|
|
def filename_sanitizer(key, value, restricted=self.params.get('restrictfilenames')):
|
|
|
|
return sanitize_filename(str(value), restricted=restricted,
|
|
|
|
is_id=re.search(r'(^|[_.])id(\.|$)', key))
|
|
|
|
|
|
|
|
sanitizer = sanitize if callable(sanitize) else filename_sanitizer
|
|
|
|
sanitize = bool(sanitize)
|
|
|
|
|
2021-08-07 17:46:55 +02:00
|
|
|
def _dumpjson_default(obj):
|
|
|
|
if isinstance(obj, (set, LazyList)):
|
|
|
|
return list(obj)
|
2021-12-20 07:06:46 +01:00
|
|
|
return repr(obj)
|
2021-08-07 17:46:55 +02:00
|
|
|
|
2021-06-03 20:00:38 +02:00
|
|
|
def create_key(outer_mobj):
|
|
|
|
if not outer_mobj.group('has_key'):
|
2021-10-12 14:04:24 +02:00
|
|
|
return outer_mobj.group(0)
|
2021-06-03 20:00:38 +02:00
|
|
|
key = outer_mobj.group('key')
|
|
|
|
mobj = re.match(INTERNAL_FORMAT_RE, key)
|
2021-12-23 02:14:42 +01:00
|
|
|
initial_field = mobj.group('fields') if mobj else ''
|
2021-12-17 21:35:48 +01:00
|
|
|
value, replacement, default = None, None, na
|
2021-09-18 12:51:38 +02:00
|
|
|
while mobj:
|
2021-05-03 19:06:03 +02:00
|
|
|
mobj = mobj.groupdict()
|
2021-09-18 12:51:38 +02:00
|
|
|
default = mobj['default'] if mobj['default'] is not None else default
|
2021-06-03 20:00:38 +02:00
|
|
|
value = get_value(mobj)
|
2021-12-17 21:35:48 +01:00
|
|
|
replacement = mobj['replacement']
|
2021-09-18 12:51:38 +02:00
|
|
|
if value is None and mobj['alternate']:
|
|
|
|
mobj = re.match(INTERNAL_FORMAT_RE, mobj['alternate'][1:])
|
|
|
|
else:
|
|
|
|
break
|
2021-06-03 20:00:38 +02:00
|
|
|
|
2021-08-07 13:20:46 +02:00
|
|
|
fmt = outer_mobj.group('format')
|
2021-06-03 20:00:38 +02:00
|
|
|
if fmt == 's' and value is not None and key in field_size_compat_map.keys():
|
|
|
|
fmt = '0{:d}d'.format(field_size_compat_map[key])
|
|
|
|
|
2021-12-17 21:35:48 +01:00
|
|
|
value = default if value is None else value if replacement is None else replacement
|
2021-06-03 20:00:38 +02:00
|
|
|
|
2021-11-08 16:47:57 +01:00
|
|
|
flags = outer_mobj.group('conversion') or ''
|
2021-07-29 04:56:17 +02:00
|
|
|
str_fmt = f'{fmt[:-1]}s'
|
2021-09-25 22:09:44 +02:00
|
|
|
if fmt[-1] == 'l': # list
|
2021-11-08 16:47:57 +01:00
|
|
|
delim = '\n' if '#' in flags else ', '
|
2022-01-03 20:37:24 +01:00
|
|
|
value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
|
2021-09-25 22:09:44 +02:00
|
|
|
elif fmt[-1] == 'j': # json
|
2021-11-08 16:47:57 +01:00
|
|
|
value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
|
2021-09-25 22:09:44 +02:00
|
|
|
elif fmt[-1] == 'q': # quoted
|
2021-11-08 16:47:57 +01:00
|
|
|
value = map(str, variadic(value) if '#' in flags else [value])
|
|
|
|
value, fmt = ' '.join(map(compat_shlex_quote, value)), str_fmt
|
2021-09-25 22:09:44 +02:00
|
|
|
elif fmt[-1] == 'B': # bytes
|
2021-09-17 20:16:17 +02:00
|
|
|
value = f'%{str_fmt}'.encode('utf-8') % str(value).encode('utf-8')
|
|
|
|
value, fmt = value.decode('utf-8', 'ignore'), 's'
|
2021-09-25 22:09:44 +02:00
|
|
|
elif fmt[-1] == 'U': # unicode normalized
|
|
|
|
value, fmt = unicodedata.normalize(
|
|
|
|
# "+" = compatibility equivalence, "#" = NFD
|
2021-11-08 16:47:57 +01:00
|
|
|
'NF%s%s' % ('K' if '+' in flags else '', 'D' if '#' in flags else 'C'),
|
2021-09-25 22:09:44 +02:00
|
|
|
value), str_fmt
|
2021-12-23 02:14:42 +01:00
|
|
|
elif fmt[-1] == 'D': # decimal suffix
|
2021-12-30 04:13:40 +01:00
|
|
|
num_fmt, fmt = fmt[:-1].replace('#', ''), 's'
|
|
|
|
value = format_decimal_suffix(value, f'%{num_fmt}f%s' if num_fmt else '%d%s',
|
|
|
|
factor=1024 if '#' in flags else 1000)
|
2021-12-23 04:33:46 +01:00
|
|
|
elif fmt[-1] == 'S': # filename sanitization
|
2021-12-23 02:14:42 +01:00
|
|
|
value, fmt = filename_sanitizer(initial_field, value, restricted='#' in flags), str_fmt
|
2021-07-29 04:56:17 +02:00
|
|
|
elif fmt[-1] == 'c':
|
2021-09-25 22:09:44 +02:00
|
|
|
if value:
|
|
|
|
value = str(value)[0]
|
2021-06-08 16:41:00 +02:00
|
|
|
else:
|
2021-09-25 22:09:44 +02:00
|
|
|
fmt = str_fmt
|
2021-06-08 16:41:00 +02:00
|
|
|
elif fmt[-1] not in 'rs': # numeric
|
2021-04-15 14:31:16 +02:00
|
|
|
value = float_or_none(value)
|
2021-06-03 20:00:38 +02:00
|
|
|
if value is None:
|
|
|
|
value, fmt = default, 's'
|
2021-07-29 01:49:26 +02:00
|
|
|
|
2021-06-03 20:00:38 +02:00
|
|
|
if sanitize:
|
|
|
|
if fmt[-1] == 'r':
|
|
|
|
# If value is an object, sanitize might convert it to a string
|
|
|
|
# So we convert it to repr first
|
2021-07-29 04:56:17 +02:00
|
|
|
value, fmt = repr(value), str_fmt
|
2021-06-09 11:13:51 +02:00
|
|
|
if fmt[-1] in 'csr':
|
2021-12-23 02:14:42 +01:00
|
|
|
value = sanitizer(initial_field, value)
|
2021-07-29 01:49:26 +02:00
|
|
|
|
2021-08-07 13:20:46 +02:00
|
|
|
key = '%s\0%s' % (key.replace('%', '%\0'), outer_mobj.group('format'))
|
2021-06-09 16:17:50 +02:00
|
|
|
TMPL_DICT[key] = value
|
2021-08-07 13:20:46 +02:00
|
|
|
return '{prefix}%({key}){fmt}'.format(key=key, fmt=fmt, prefix=outer_mobj.group('prefix'))
|
2021-06-03 20:00:38 +02:00
|
|
|
|
2021-06-09 16:17:50 +02:00
|
|
|
return EXTERNAL_FORMAT_RE.sub(create_key, outtmpl), TMPL_DICT
|
2021-03-24 23:02:15 +01:00
|
|
|
|
2021-10-08 21:11:59 +02:00
|
|
|
def evaluate_outtmpl(self, outtmpl, info_dict, *args, **kwargs):
|
|
|
|
outtmpl, info_dict = self.prepare_outtmpl(outtmpl, info_dict, *args, **kwargs)
|
|
|
|
return self.escape_outtmpl(outtmpl) % info_dict
|
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
def _prepare_filename(self, info_dict, tmpl_type='default'):
|
2013-06-18 22:14:21 +02:00
|
|
|
try:
|
2021-10-12 14:04:24 +02:00
|
|
|
outtmpl = self._outtmpl_expandpath(self.outtmpl_dict.get(tmpl_type, self.outtmpl_dict['default']))
|
2021-12-23 02:14:42 +01:00
|
|
|
filename = self.evaluate_outtmpl(outtmpl, info_dict, True)
|
2022-02-01 01:48:25 +01:00
|
|
|
if not filename:
|
|
|
|
return None
|
2017-07-13 19:40:54 +02:00
|
|
|
|
2022-02-01 01:48:25 +01:00
|
|
|
if tmpl_type in ('default', 'temp'):
|
|
|
|
final_ext, ext = self.params.get('final_ext'), info_dict.get('ext')
|
|
|
|
if final_ext and ext and final_ext != ext and filename.endswith(f'.{final_ext}'):
|
|
|
|
filename = replace_extension(filename, ext, final_ext)
|
|
|
|
else:
|
|
|
|
force_ext = OUTTMPL_TYPES[tmpl_type]
|
|
|
|
if force_ext:
|
|
|
|
filename = replace_extension(filename, force_ext, info_dict.get('ext'))
|
2021-02-03 14:36:09 +01:00
|
|
|
|
2020-09-30 05:50:09 +02:00
|
|
|
# https://github.com/blackjack4494/youtube-dlc/issues/85
|
|
|
|
trim_file_name = self.params.get('trim_file_name', False)
|
|
|
|
if trim_file_name:
|
2021-11-29 19:38:46 +01:00
|
|
|
no_ext, *ext = filename.rsplit('.', 2)
|
|
|
|
filename = join_nonempty(no_ext[:trim_file_name], *ext, delim='.')
|
2020-09-30 05:50:09 +02:00
|
|
|
|
2021-01-23 13:18:12 +01:00
|
|
|
return filename
|
2013-06-18 22:14:21 +02:00
|
|
|
except ValueError as err:
|
2014-01-05 01:52:03 +01:00
|
|
|
self.report_error('Error in output template: ' + str(err) + ' (encoding: ' + repr(preferredencoding()) + ')')
|
2013-06-18 22:14:21 +02:00
|
|
|
return None
|
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
def prepare_filename(self, info_dict, dir_type='', warn=False):
|
|
|
|
"""Generate the output filename."""
|
2021-06-12 17:18:06 +02:00
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
filename = self._prepare_filename(info_dict, dir_type or 'default')
|
2021-09-29 22:44:42 +02:00
|
|
|
if not filename and dir_type not in ('', 'temp'):
|
|
|
|
return ''
|
2021-02-03 14:36:09 +01:00
|
|
|
|
2021-07-20 22:05:35 +02:00
|
|
|
if warn:
|
2021-06-12 17:18:06 +02:00
|
|
|
if not self.params.get('paths'):
|
2021-02-03 14:36:09 +01:00
|
|
|
pass
|
|
|
|
elif filename == '-':
|
2021-07-20 22:05:35 +02:00
|
|
|
self.report_warning('--paths is ignored when an outputting to stdout', only_once=True)
|
2021-02-03 14:36:09 +01:00
|
|
|
elif os.path.isabs(filename):
|
2021-07-20 22:05:35 +02:00
|
|
|
self.report_warning('--paths is ignored since an absolute path is given in output template', only_once=True)
|
2021-02-03 14:36:09 +01:00
|
|
|
if filename == '-' or not filename:
|
|
|
|
return filename
|
|
|
|
|
2021-06-12 17:18:06 +02:00
|
|
|
return self.get_output_path(dir_type, filename)
|
2021-01-23 13:18:12 +01:00
|
|
|
|
2021-05-28 18:38:01 +02:00
|
|
|
def _match_entry(self, info_dict, incomplete=False, silent=False):
|
2020-09-17 20:22:07 +02:00
|
|
|
""" Returns None if the file should be downloaded """
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-05-28 22:42:07 +02:00
|
|
|
video_title = info_dict.get('title', info_dict.get('id', 'video'))
|
|
|
|
|
2021-01-13 02:01:01 +01:00
|
|
|
def check_filter():
|
|
|
|
if 'title' in info_dict:
|
|
|
|
# This can happen when we're just evaluating the playlist
|
|
|
|
title = info_dict['title']
|
|
|
|
matchtitle = self.params.get('matchtitle', False)
|
|
|
|
if matchtitle:
|
|
|
|
if not re.search(matchtitle, title, re.IGNORECASE):
|
|
|
|
return '"' + title + '" title did not match pattern "' + matchtitle + '"'
|
|
|
|
rejecttitle = self.params.get('rejecttitle', False)
|
|
|
|
if rejecttitle:
|
|
|
|
if re.search(rejecttitle, title, re.IGNORECASE):
|
|
|
|
return '"' + title + '" title matched reject pattern "' + rejecttitle + '"'
|
|
|
|
date = info_dict.get('upload_date')
|
|
|
|
if date is not None:
|
|
|
|
dateRange = self.params.get('daterange', DateRange())
|
|
|
|
if date not in dateRange:
|
|
|
|
return '%s upload date is not in range %s' % (date_from_str(date).isoformat(), dateRange)
|
|
|
|
view_count = info_dict.get('view_count')
|
|
|
|
if view_count is not None:
|
|
|
|
min_views = self.params.get('min_views')
|
|
|
|
if min_views is not None and view_count < min_views:
|
|
|
|
return 'Skipping %s, because it has not reached minimum view count (%d/%d)' % (video_title, view_count, min_views)
|
|
|
|
max_views = self.params.get('max_views')
|
|
|
|
if max_views is not None and view_count > max_views:
|
|
|
|
return 'Skipping %s, because it has exceeded the maximum view count (%d/%d)' % (video_title, view_count, max_views)
|
|
|
|
if age_restricted(info_dict.get('age_limit'), self.params.get('age_limit')):
|
|
|
|
return 'Skipping "%s" because it is age restricted' % video_title
|
|
|
|
|
2021-08-15 10:12:23 +02:00
|
|
|
match_filter = self.params.get('match_filter')
|
|
|
|
if match_filter is not None:
|
|
|
|
try:
|
|
|
|
ret = match_filter(info_dict, incomplete=incomplete)
|
|
|
|
except TypeError:
|
|
|
|
# For backward compatibility
|
|
|
|
ret = None if incomplete else match_filter(info_dict)
|
|
|
|
if ret is not None:
|
|
|
|
return ret
|
2021-01-13 02:01:01 +01:00
|
|
|
return None
|
|
|
|
|
2021-05-28 22:42:07 +02:00
|
|
|
if self.in_download_archive(info_dict):
|
|
|
|
reason = '%s has already been recorded in the archive' % video_title
|
|
|
|
break_opt, break_err = 'break_on_existing', ExistingVideoReached
|
|
|
|
else:
|
|
|
|
reason = check_filter()
|
|
|
|
break_opt, break_err = 'break_on_reject', RejectedVideoReached
|
2021-01-13 02:01:01 +01:00
|
|
|
if reason is not None:
|
2021-05-28 18:38:01 +02:00
|
|
|
if not silent:
|
|
|
|
self.to_screen('[download] ' + reason)
|
2021-05-28 22:42:07 +02:00
|
|
|
if self.params.get(break_opt, False):
|
|
|
|
raise break_err()
|
2021-01-13 02:01:01 +01:00
|
|
|
return reason
|
2013-10-22 14:49:34 +02:00
|
|
|
|
2013-11-03 11:56:45 +01:00
|
|
|
@staticmethod
|
|
|
|
def add_extra_info(info_dict, extra_info):
|
|
|
|
'''Set the keys from extra_info in info dict if they are missing'''
|
|
|
|
for key, value in extra_info.items():
|
|
|
|
info_dict.setdefault(key, value)
|
|
|
|
|
2021-09-03 19:18:42 +02:00
|
|
|
def extract_info(self, url, download=True, ie_key=None, extra_info=None,
|
2015-06-12 22:05:21 +02:00
|
|
|
process=True, force_generic_extractor=False):
|
2021-05-06 18:01:20 +02:00
|
|
|
"""
|
|
|
|
Return a list with a dictionary for each video extracted.
|
|
|
|
|
|
|
|
Arguments:
|
|
|
|
url -- URL to extract
|
|
|
|
|
|
|
|
Keyword arguments:
|
|
|
|
download -- whether to download videos during extraction
|
|
|
|
ie_key -- extractor key hint
|
|
|
|
extra_info -- dictionary containing the extra values to add to each result
|
|
|
|
process -- whether to resolve all unresolved references (URLs, playlist items),
|
|
|
|
must be True for download to work.
|
|
|
|
force_generic_extractor -- force using the generic extractor
|
|
|
|
"""
|
2013-10-22 14:49:34 +02:00
|
|
|
|
2021-09-03 19:18:42 +02:00
|
|
|
if extra_info is None:
|
|
|
|
extra_info = {}
|
|
|
|
|
2015-06-12 22:05:21 +02:00
|
|
|
if not ie_key and force_generic_extractor:
|
2015-06-12 15:20:12 +02:00
|
|
|
ie_key = 'Generic'
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
if ie_key:
|
2021-08-23 01:56:45 +02:00
|
|
|
ies = {ie_key: self._get_info_extractor_class(ie_key)}
|
2013-06-18 22:14:21 +02:00
|
|
|
else:
|
|
|
|
ies = self._ies
|
|
|
|
|
2021-08-23 01:56:45 +02:00
|
|
|
for ie_key, ie in ies.items():
|
2013-06-18 22:14:21 +02:00
|
|
|
if not ie.suitable(url):
|
|
|
|
continue
|
|
|
|
|
|
|
|
if not ie.working():
|
2014-01-05 01:52:03 +01:00
|
|
|
self.report_warning('The program functionality for this site has been marked as broken, '
|
|
|
|
'and will probably not work.')
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-08-19 03:49:23 +02:00
|
|
|
temp_id = ie.get_temp_id(url)
|
2020-11-21 15:50:42 +01:00
|
|
|
if temp_id is not None and self.in_download_archive({'id': temp_id, 'ie_key': ie_key}):
|
2021-11-29 09:31:53 +01:00
|
|
|
self.to_screen(f'[{ie_key}] {temp_id}: has already been recorded in the archive')
|
|
|
|
if self.params.get('break_on_existing', False):
|
|
|
|
raise ExistingVideoReached()
|
2020-11-21 15:50:42 +01:00
|
|
|
break
|
2021-08-23 01:56:45 +02:00
|
|
|
return self.__extract_info(url, self.get_info_extractor(ie_key), download, extra_info, process)
|
2020-11-21 15:50:42 +01:00
|
|
|
else:
|
|
|
|
self.report_error('no suitable InfoExtractor for URL %s' % url)
|
|
|
|
|
2021-09-03 23:37:27 +02:00
|
|
|
def __handle_extraction_exceptions(func):
|
2021-10-09 02:23:15 +02:00
|
|
|
@functools.wraps(func)
|
2020-11-21 15:50:42 +01:00
|
|
|
def wrapper(self, *args, **kwargs):
|
2021-12-25 23:48:59 +01:00
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
return func(self, *args, **kwargs)
|
|
|
|
except (DownloadCancelled, LazyList.IndexError, PagedList.IndexError):
|
2013-06-18 22:14:21 +02:00
|
|
|
raise
|
2021-12-25 23:48:59 +01:00
|
|
|
except ReExtractInfo as e:
|
|
|
|
if e.expected:
|
|
|
|
self.to_screen(f'{e}; Re-extracting data')
|
|
|
|
else:
|
|
|
|
self.to_stderr('\r')
|
|
|
|
self.report_warning(f'{e}; Re-extracting data')
|
|
|
|
continue
|
|
|
|
except GeoRestrictedError as e:
|
|
|
|
msg = e.msg
|
|
|
|
if e.countries:
|
|
|
|
msg += '\nThis video is available in %s.' % ', '.join(
|
|
|
|
map(ISO3166Utils.short2full, e.countries))
|
|
|
|
msg += '\nYou might want to use a VPN or a proxy server (with --proxy) to workaround.'
|
|
|
|
self.report_error(msg)
|
|
|
|
except ExtractorError as e: # An error we somewhat expected
|
|
|
|
self.report_error(str(e), e.format_traceback())
|
|
|
|
except Exception as e:
|
|
|
|
if self.params.get('ignoreerrors'):
|
|
|
|
self.report_error(str(e), tb=encode_compat_str(traceback.format_exc()))
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
break
|
2020-11-21 15:50:42 +01:00
|
|
|
return wrapper
|
|
|
|
|
2021-11-28 19:57:44 +01:00
|
|
|
def _wait_for_video(self, ie_result):
|
|
|
|
if (not self.params.get('wait_for_video')
|
|
|
|
or ie_result.get('_type', 'video') != 'video'
|
|
|
|
or ie_result.get('formats') or ie_result.get('url')):
|
|
|
|
return
|
|
|
|
|
|
|
|
format_dur = lambda dur: '%02d:%02d:%02d' % timetuple_from_msec(dur * 1000)[:-1]
|
|
|
|
last_msg = ''
|
|
|
|
|
|
|
|
def progress(msg):
|
|
|
|
nonlocal last_msg
|
|
|
|
self.to_screen(msg + ' ' * (len(last_msg) - len(msg)) + '\r', skip_eol=True)
|
|
|
|
last_msg = msg
|
|
|
|
|
|
|
|
min_wait, max_wait = self.params.get('wait_for_video')
|
|
|
|
diff = try_get(ie_result, lambda x: x['release_timestamp'] - time.time())
|
|
|
|
if diff is None and ie_result.get('live_status') == 'is_upcoming':
|
2021-12-06 19:00:33 +01:00
|
|
|
diff = random.randrange(min_wait, max_wait) if (max_wait and min_wait) else (max_wait or min_wait)
|
2021-11-28 19:57:44 +01:00
|
|
|
self.report_warning('Release time of video is not known')
|
|
|
|
elif (diff or 0) <= 0:
|
|
|
|
self.report_warning('Video should already be available according to extracted info')
|
2021-12-06 19:00:33 +01:00
|
|
|
diff = min(max(diff or 0, min_wait or 0), max_wait or float('inf'))
|
2021-11-28 19:57:44 +01:00
|
|
|
self.to_screen(f'[wait] Waiting for {format_dur(diff)} - Press Ctrl+C to try now')
|
|
|
|
|
|
|
|
wait_till = time.time() + diff
|
|
|
|
try:
|
|
|
|
while True:
|
|
|
|
diff = wait_till - time.time()
|
|
|
|
if diff <= 0:
|
|
|
|
progress('')
|
|
|
|
raise ReExtractInfo('[wait] Wait period ended', expected=True)
|
|
|
|
progress(f'[wait] Remaining time until next attempt: {self._format_screen(format_dur(diff), self.Styles.EMPHASIS)}')
|
|
|
|
time.sleep(1)
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
progress('')
|
|
|
|
raise ReExtractInfo('[wait] Interrupted by user', expected=True)
|
|
|
|
except BaseException as e:
|
|
|
|
if not isinstance(e, ReExtractInfo):
|
|
|
|
self.to_screen('')
|
|
|
|
raise
|
|
|
|
|
2020-11-21 15:50:42 +01:00
|
|
|
@__handle_extraction_exceptions
|
2021-04-27 11:02:08 +02:00
|
|
|
def __extract_info(self, url, ie, download, extra_info, process):
|
2020-11-21 15:50:42 +01:00
|
|
|
ie_result = ie.extract(url)
|
|
|
|
if ie_result is None: # Finished already (backwards compatibility; listformats and friends should be moved here)
|
|
|
|
return
|
|
|
|
if isinstance(ie_result, list):
|
|
|
|
# Backwards compatibility: old IE result format
|
|
|
|
ie_result = {
|
|
|
|
'_type': 'compat_list',
|
|
|
|
'entries': ie_result,
|
|
|
|
}
|
2021-07-21 21:45:32 +02:00
|
|
|
if extra_info.get('original_url'):
|
|
|
|
ie_result.setdefault('original_url', extra_info['original_url'])
|
2020-11-21 15:50:42 +01:00
|
|
|
self.add_default_extra_info(ie_result, ie, url)
|
|
|
|
if process:
|
2021-11-28 19:57:44 +01:00
|
|
|
self._wait_for_video(ie_result)
|
2020-11-21 15:50:42 +01:00
|
|
|
return self.process_ie_result(ie_result, download, extra_info)
|
2013-06-18 22:14:21 +02:00
|
|
|
else:
|
2020-11-21 15:50:42 +01:00
|
|
|
return ie_result
|
2013-10-22 14:49:34 +02:00
|
|
|
|
2014-03-23 16:06:03 +01:00
|
|
|
def add_default_extra_info(self, ie_result, ie, url):
|
2021-06-24 16:38:43 +02:00
|
|
|
if url is not None:
|
|
|
|
self.add_extra_info(ie_result, {
|
|
|
|
'webpage_url': url,
|
|
|
|
'original_url': url,
|
2022-02-13 15:40:20 +01:00
|
|
|
})
|
|
|
|
webpage_url = ie_result.get('webpage_url')
|
|
|
|
if webpage_url:
|
|
|
|
self.add_extra_info(ie_result, {
|
|
|
|
'webpage_url_basename': url_basename(webpage_url),
|
|
|
|
'webpage_url_domain': get_domain(webpage_url),
|
2021-06-24 16:38:43 +02:00
|
|
|
})
|
|
|
|
if ie is not None:
|
|
|
|
self.add_extra_info(ie_result, {
|
|
|
|
'extractor': ie.IE_NAME,
|
|
|
|
'extractor_key': ie.ie_key(),
|
|
|
|
})
|
2014-03-23 16:06:03 +01:00
|
|
|
|
2021-08-18 23:40:32 +02:00
|
|
|
def process_ie_result(self, ie_result, download=True, extra_info=None):
|
2013-06-18 22:14:21 +02:00
|
|
|
"""
|
|
|
|
Take the result of the ie(may be modified) and resolve all unresolved
|
|
|
|
references (URLs, playlist items).
|
|
|
|
|
|
|
|
It will also download the videos if 'download'.
|
|
|
|
Returns the resolved ie_result.
|
|
|
|
"""
|
2021-08-18 23:40:32 +02:00
|
|
|
if extra_info is None:
|
|
|
|
extra_info = {}
|
2014-08-21 11:52:07 +02:00
|
|
|
result_type = ie_result.get('_type', 'video')
|
|
|
|
|
2014-10-24 14:48:12 +02:00
|
|
|
if result_type in ('url', 'url_transparent'):
|
2016-05-14 00:46:38 +02:00
|
|
|
ie_result['url'] = sanitize_url(ie_result['url'])
|
2021-07-21 21:45:32 +02:00
|
|
|
if ie_result.get('original_url'):
|
|
|
|
extra_info.setdefault('original_url', ie_result['original_url'])
|
|
|
|
|
2014-10-24 14:48:12 +02:00
|
|
|
extract_flat = self.params.get('extract_flat', False)
|
2019-05-10 22:56:22 +02:00
|
|
|
if ((extract_flat == 'in_playlist' and 'playlist' in extra_info)
|
|
|
|
or extract_flat is True):
|
2021-06-07 20:47:53 +02:00
|
|
|
info_copy = ie_result.copy()
|
2021-06-24 16:38:43 +02:00
|
|
|
ie = try_get(ie_result.get('ie_key'), self.get_info_extractor)
|
2021-09-27 07:54:22 +02:00
|
|
|
if ie and not ie_result.get('id'):
|
2021-09-03 02:44:26 +02:00
|
|
|
info_copy['id'] = ie.get_temp_id(ie_result['url'])
|
2021-06-24 16:38:43 +02:00
|
|
|
self.add_default_extra_info(info_copy, ie, ie_result['url'])
|
2021-09-03 02:44:26 +02:00
|
|
|
self.add_extra_info(info_copy, extra_info)
|
2021-12-06 21:37:48 +01:00
|
|
|
info_copy, _ = self.pre_process(info_copy)
|
2021-06-07 20:47:53 +02:00
|
|
|
self.__forced_printings(info_copy, self.prepare_filename(info_copy), incomplete=True)
|
2021-09-03 02:44:26 +02:00
|
|
|
if self.params.get('force_write_download_archive', False):
|
|
|
|
self.record_download_archive(info_copy)
|
2014-08-21 11:52:07 +02:00
|
|
|
return ie_result
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
if result_type == 'video':
|
2013-11-03 11:56:45 +01:00
|
|
|
self.add_extra_info(ie_result, extra_info)
|
2021-05-18 20:20:29 +02:00
|
|
|
ie_result = self.process_video_result(ie_result, download=download)
|
2021-05-20 14:32:58 +02:00
|
|
|
additional_urls = (ie_result or {}).get('additional_urls')
|
2021-05-18 20:20:29 +02:00
|
|
|
if additional_urls:
|
2021-08-09 21:52:55 +02:00
|
|
|
# TODO: Improve MetadataParserPP to allow setting a list
|
2021-05-18 20:20:29 +02:00
|
|
|
if isinstance(additional_urls, compat_str):
|
|
|
|
additional_urls = [additional_urls]
|
|
|
|
self.to_screen(
|
|
|
|
'[info] %s: %d additional URL(s) requested' % (ie_result['id'], len(additional_urls)))
|
|
|
|
self.write_debug('Additional URLs: "%s"' % '", "'.join(additional_urls))
|
|
|
|
ie_result['additional_entries'] = [
|
|
|
|
self.extract_info(
|
2021-12-23 02:42:26 +01:00
|
|
|
url, download, extra_info=extra_info,
|
2021-05-18 20:20:29 +02:00
|
|
|
force_generic_extractor=self.params.get('force_generic_extractor'))
|
|
|
|
for url in additional_urls
|
|
|
|
]
|
|
|
|
return ie_result
|
2013-06-18 22:14:21 +02:00
|
|
|
elif result_type == 'url':
|
|
|
|
# We have to add extra_info to the results because it may be
|
|
|
|
# contained in a playlist
|
2021-05-18 20:20:59 +02:00
|
|
|
return self.extract_info(
|
|
|
|
ie_result['url'], download,
|
|
|
|
ie_key=ie_result.get('ie_key'),
|
|
|
|
extra_info=extra_info)
|
2013-12-05 14:29:08 +01:00
|
|
|
elif result_type == 'url_transparent':
|
|
|
|
# Use the information from the embedding page
|
|
|
|
info = self.extract_info(
|
|
|
|
ie_result['url'], ie_key=ie_result.get('ie_key'),
|
|
|
|
extra_info=extra_info, download=False, process=False)
|
|
|
|
|
2017-03-31 18:57:35 +02:00
|
|
|
# extract_info may return None when ignoreerrors is enabled and
|
|
|
|
# extraction failed with an error, don't crash and return early
|
|
|
|
# in this case
|
|
|
|
if not info:
|
|
|
|
return info
|
|
|
|
|
2014-12-12 15:55:55 +01:00
|
|
|
force_properties = dict(
|
|
|
|
(k, v) for k, v in ie_result.items() if v is not None)
|
2017-07-20 19:13:32 +02:00
|
|
|
for f in ('_type', 'url', 'id', 'extractor', 'extractor_key', 'ie_key'):
|
2014-12-12 15:55:55 +01:00
|
|
|
if f in force_properties:
|
|
|
|
del force_properties[f]
|
|
|
|
new_result = info.copy()
|
|
|
|
new_result.update(force_properties)
|
2013-12-05 14:29:08 +01:00
|
|
|
|
2017-04-15 19:56:53 +02:00
|
|
|
# Extracted info may not be a video result (i.e.
|
|
|
|
# info.get('_type', 'video') != video) but rather an url or
|
|
|
|
# url_transparent. In such cases outer metadata (from ie_result)
|
|
|
|
# should be propagated to inner one (info). For this to happen
|
|
|
|
# _type of info should be overridden with url_transparent. This
|
2019-03-09 13:14:41 +01:00
|
|
|
# fixes issue from https://github.com/ytdl-org/youtube-dl/pull/11163.
|
2017-04-15 19:56:53 +02:00
|
|
|
if new_result.get('_type') == 'url':
|
|
|
|
new_result['_type'] = 'url_transparent'
|
2013-12-05 14:29:08 +01:00
|
|
|
|
|
|
|
return self.process_ie_result(
|
|
|
|
new_result, download=download, extra_info=extra_info)
|
2017-04-12 21:38:43 +02:00
|
|
|
elif result_type in ('playlist', 'multi_video'):
|
2021-01-16 13:40:15 +01:00
|
|
|
# Protect from infinite recursion due to recursively nested playlists
|
|
|
|
# (see https://github.com/ytdl-org/youtube-dl/issues/27833)
|
|
|
|
webpage_url = ie_result['webpage_url']
|
|
|
|
if webpage_url in self._playlist_urls:
|
2017-10-06 18:34:46 +02:00
|
|
|
self.to_screen(
|
2021-01-16 13:40:15 +01:00
|
|
|
'[download] Skipping already downloaded playlist: %s'
|
|
|
|
% ie_result.get('title') or ie_result.get('id'))
|
|
|
|
return
|
2017-10-06 18:34:46 +02:00
|
|
|
|
2021-01-16 13:40:15 +01:00
|
|
|
self._playlist_level += 1
|
|
|
|
self._playlist_urls.add(webpage_url)
|
2021-05-23 13:58:15 +02:00
|
|
|
self._sanitize_thumbnails(ie_result)
|
2021-01-16 13:40:15 +01:00
|
|
|
try:
|
|
|
|
return self.__process_playlist(ie_result, download)
|
|
|
|
finally:
|
|
|
|
self._playlist_level -= 1
|
|
|
|
if not self._playlist_level:
|
|
|
|
self._playlist_urls.clear()
|
2013-06-18 22:14:21 +02:00
|
|
|
elif result_type == 'compat_list':
|
2014-11-20 16:29:31 +01:00
|
|
|
self.report_warning(
|
|
|
|
'Extractor %s returned a compat_list result. '
|
|
|
|
'It needs to be updated.' % ie_result.get('extractor'))
|
2014-11-23 20:41:03 +01:00
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
def _fixup(r):
|
2021-08-07 13:20:46 +02:00
|
|
|
self.add_extra_info(r, {
|
|
|
|
'extractor': ie_result['extractor'],
|
|
|
|
'webpage_url': ie_result['webpage_url'],
|
|
|
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
2021-10-18 04:13:21 +02:00
|
|
|
'webpage_url_domain': get_domain(ie_result['webpage_url']),
|
2021-08-07 13:20:46 +02:00
|
|
|
'extractor_key': ie_result['extractor_key'],
|
|
|
|
})
|
2013-06-18 22:14:21 +02:00
|
|
|
return r
|
|
|
|
ie_result['entries'] = [
|
2013-11-03 11:56:45 +01:00
|
|
|
self.process_ie_result(_fixup(r), download, extra_info)
|
2013-06-18 22:14:21 +02:00
|
|
|
for r in ie_result['entries']
|
|
|
|
]
|
|
|
|
return ie_result
|
|
|
|
else:
|
|
|
|
raise Exception('Invalid result type: %s' % result_type)
|
|
|
|
|
2021-03-09 03:17:21 +01:00
|
|
|
def _ensure_dir_exists(self, path):
|
|
|
|
return make_dir(path, self.report_error)
|
|
|
|
|
2022-01-13 12:01:08 +01:00
|
|
|
@staticmethod
|
|
|
|
def _playlist_infodict(ie_result, **kwargs):
|
|
|
|
return {
|
|
|
|
**ie_result,
|
|
|
|
'playlist': ie_result.get('title') or ie_result.get('id'),
|
|
|
|
'playlist_id': ie_result.get('id'),
|
|
|
|
'playlist_title': ie_result.get('title'),
|
|
|
|
'playlist_uploader': ie_result.get('uploader'),
|
|
|
|
'playlist_uploader_id': ie_result.get('uploader_id'),
|
|
|
|
'playlist_index': 0,
|
|
|
|
**kwargs,
|
|
|
|
}
|
|
|
|
|
2021-01-16 13:40:15 +01:00
|
|
|
def __process_playlist(self, ie_result, download):
|
|
|
|
# We process each entry in the playlist
|
|
|
|
playlist = ie_result.get('title') or ie_result.get('id')
|
|
|
|
self.to_screen('[download] Downloading playlist: %s' % playlist)
|
|
|
|
|
2021-03-23 20:45:53 +01:00
|
|
|
if 'entries' not in ie_result:
|
2021-11-09 23:49:33 +01:00
|
|
|
raise EntryNotInPlaylist('There are no entries')
|
2021-11-13 13:00:33 +01:00
|
|
|
|
|
|
|
MissingEntry = object()
|
2021-03-23 20:45:53 +01:00
|
|
|
incomplete_entries = bool(ie_result.get('requested_entries'))
|
|
|
|
if incomplete_entries:
|
2021-11-11 04:14:54 +01:00
|
|
|
def fill_missing_entries(entries, indices):
|
2021-11-13 13:00:33 +01:00
|
|
|
ret = [MissingEntry] * max(indices)
|
2021-11-11 04:14:54 +01:00
|
|
|
for i, entry in zip(indices, entries):
|
2021-03-23 20:45:53 +01:00
|
|
|
ret[i - 1] = entry
|
|
|
|
return ret
|
|
|
|
ie_result['entries'] = fill_missing_entries(ie_result['entries'], ie_result['requested_entries'])
|
2021-01-28 01:54:58 +01:00
|
|
|
|
2021-01-16 13:40:15 +01:00
|
|
|
playlist_results = []
|
|
|
|
|
2021-05-28 18:37:11 +02:00
|
|
|
playliststart = self.params.get('playliststart', 1)
|
2021-01-16 13:40:15 +01:00
|
|
|
playlistend = self.params.get('playlistend')
|
|
|
|
# For backwards compatibility, interpret -1 as whole list
|
|
|
|
if playlistend == -1:
|
|
|
|
playlistend = None
|
|
|
|
|
|
|
|
playlistitems_str = self.params.get('playlist_items')
|
|
|
|
playlistitems = None
|
|
|
|
if playlistitems_str is not None:
|
|
|
|
def iter_playlistitems(format):
|
|
|
|
for string_segment in format.split(','):
|
|
|
|
if '-' in string_segment:
|
|
|
|
start, end = string_segment.split('-')
|
|
|
|
for item in range(int(start), int(end) + 1):
|
|
|
|
yield int(item)
|
|
|
|
else:
|
|
|
|
yield int(string_segment)
|
|
|
|
playlistitems = orderedSet(iter_playlistitems(playlistitems_str))
|
|
|
|
|
|
|
|
ie_entries = ie_result['entries']
|
2021-09-03 23:37:27 +02:00
|
|
|
if isinstance(ie_entries, list):
|
2022-01-12 04:30:21 +01:00
|
|
|
playlist_count = len(ie_entries)
|
2022-01-07 12:03:02 +01:00
|
|
|
msg = f'Collected {playlist_count} videos; downloading %d of them'
|
|
|
|
ie_result['playlist_count'] = ie_result.get('playlist_count') or playlist_count
|
|
|
|
|
2021-09-03 23:37:27 +02:00
|
|
|
def get_entry(i):
|
|
|
|
return ie_entries[i - 1]
|
|
|
|
else:
|
2022-01-07 12:03:02 +01:00
|
|
|
msg = 'Downloading %d videos'
|
2021-11-09 23:44:42 +01:00
|
|
|
if not isinstance(ie_entries, (PagedList, LazyList)):
|
2021-09-03 23:37:27 +02:00
|
|
|
ie_entries = LazyList(ie_entries)
|
2022-01-23 18:55:17 +01:00
|
|
|
elif isinstance(ie_entries, InAdvancePagedList):
|
|
|
|
if ie_entries._pagesize == 1:
|
|
|
|
playlist_count = ie_entries._pagecount
|
2021-09-03 23:37:27 +02:00
|
|
|
|
|
|
|
def get_entry(i):
|
|
|
|
return YoutubeDL.__handle_extraction_exceptions(
|
|
|
|
lambda self, i: ie_entries[i - 1]
|
|
|
|
)(self, i)
|
2021-07-20 21:30:46 +02:00
|
|
|
|
2022-01-07 12:03:02 +01:00
|
|
|
entries, broken = [], False
|
2021-09-25 00:01:35 +02:00
|
|
|
items = playlistitems if playlistitems is not None else itertools.count(playliststart)
|
|
|
|
for i in items:
|
|
|
|
if i == 0:
|
|
|
|
continue
|
2021-05-28 18:37:11 +02:00
|
|
|
if playlistitems is None and playlistend is not None and playlistend < i:
|
|
|
|
break
|
|
|
|
entry = None
|
|
|
|
try:
|
2021-07-20 21:30:46 +02:00
|
|
|
entry = get_entry(i)
|
2021-11-13 13:00:33 +01:00
|
|
|
if entry is MissingEntry:
|
2021-03-23 20:45:53 +01:00
|
|
|
raise EntryNotInPlaylist()
|
2021-05-28 18:37:11 +02:00
|
|
|
except (IndexError, EntryNotInPlaylist):
|
|
|
|
if incomplete_entries:
|
2021-11-09 23:49:33 +01:00
|
|
|
raise EntryNotInPlaylist(f'Entry {i} cannot be found')
|
2021-05-28 18:37:11 +02:00
|
|
|
elif not playlistitems:
|
|
|
|
break
|
|
|
|
entries.append(entry)
|
2021-05-28 18:38:01 +02:00
|
|
|
try:
|
|
|
|
if entry is not None:
|
|
|
|
self._match_entry(entry, incomplete=True, silent=True)
|
|
|
|
except (ExistingVideoReached, RejectedVideoReached):
|
2022-01-07 12:03:02 +01:00
|
|
|
broken = True
|
2021-05-28 18:38:01 +02:00
|
|
|
break
|
2021-05-28 18:37:11 +02:00
|
|
|
ie_result['entries'] = entries
|
2021-01-16 13:40:15 +01:00
|
|
|
|
2021-05-28 18:37:11 +02:00
|
|
|
# Save playlist_index before re-ordering
|
|
|
|
entries = [
|
2021-08-17 15:32:06 +02:00
|
|
|
((playlistitems[i - 1] if playlistitems else i + playliststart - 1), entry)
|
2021-05-28 18:37:11 +02:00
|
|
|
for i, entry in enumerate(entries, 1)
|
|
|
|
if entry is not None]
|
|
|
|
n_entries = len(entries)
|
2021-03-23 20:45:53 +01:00
|
|
|
|
2022-01-07 12:03:02 +01:00
|
|
|
if not (ie_result.get('playlist_count') or broken or playlistitems or playlistend):
|
|
|
|
ie_result['playlist_count'] = n_entries
|
|
|
|
|
2021-11-11 03:30:43 +01:00
|
|
|
if not playlistitems and (playliststart != 1 or playlistend):
|
2021-05-28 18:37:11 +02:00
|
|
|
playlistitems = list(range(playliststart, playliststart + n_entries))
|
2021-03-23 20:45:53 +01:00
|
|
|
ie_result['requested_entries'] = playlistitems
|
|
|
|
|
2021-11-11 03:30:43 +01:00
|
|
|
_infojson_written = False
|
2022-01-21 08:21:06 +01:00
|
|
|
write_playlist_files = self.params.get('allow_playlist_files', True)
|
|
|
|
if write_playlist_files and self.params.get('list_thumbnails'):
|
|
|
|
self.list_thumbnails(ie_result)
|
|
|
|
if write_playlist_files and not self.params.get('simulate'):
|
2022-01-13 12:01:08 +01:00
|
|
|
ie_copy = self._playlist_infodict(ie_result, n_entries=n_entries)
|
2021-11-11 03:30:43 +01:00
|
|
|
_infojson_written = self._write_info_json(
|
|
|
|
'playlist', ie_result, self.prepare_filename(ie_copy, 'pl_infojson'))
|
|
|
|
if _infojson_written is None:
|
2021-09-29 22:44:42 +02:00
|
|
|
return
|
|
|
|
if self._write_description('playlist', ie_result,
|
|
|
|
self.prepare_filename(ie_copy, 'pl_description')) is None:
|
|
|
|
return
|
2021-05-17 13:45:33 +02:00
|
|
|
# TODO: This should be passed to ThumbnailsConvertor if necessary
|
2021-09-29 22:44:42 +02:00
|
|
|
self._write_thumbnails('playlist', ie_copy, self.prepare_filename(ie_copy, 'pl_thumbnail'))
|
2021-01-16 13:40:15 +01:00
|
|
|
|
|
|
|
if self.params.get('playlistreverse', False):
|
|
|
|
entries = entries[::-1]
|
|
|
|
if self.params.get('playlistrandom', False):
|
|
|
|
random.shuffle(entries)
|
|
|
|
|
|
|
|
x_forwarded_for = ie_result.get('__x_forwarded_for_ip')
|
|
|
|
|
2021-05-28 18:37:11 +02:00
|
|
|
self.to_screen('[%s] playlist %s: %s' % (ie_result['extractor'], playlist, msg % n_entries))
|
2021-04-21 08:00:43 +02:00
|
|
|
failures = 0
|
|
|
|
max_failures = self.params.get('skip_playlist_after_errors') or float('inf')
|
2021-05-06 17:26:19 +02:00
|
|
|
for i, entry_tuple in enumerate(entries, 1):
|
|
|
|
playlist_index, entry = entry_tuple
|
2021-09-03 03:04:55 +02:00
|
|
|
if 'playlist-index' in self.params.get('compat_opts', []):
|
|
|
|
playlist_index = playlistitems[i - 1] if playlistitems else i + playliststart - 1
|
2021-01-16 13:40:15 +01:00
|
|
|
self.to_screen('[download] Downloading video %s of %s' % (i, n_entries))
|
|
|
|
# This __x_forwarded_for_ip thing is a bit ugly but requires
|
|
|
|
# minimal changes
|
|
|
|
if x_forwarded_for:
|
|
|
|
entry['__x_forwarded_for_ip'] = x_forwarded_for
|
|
|
|
extra = {
|
|
|
|
'n_entries': n_entries,
|
2021-05-03 17:11:33 +02:00
|
|
|
'_last_playlist_index': max(playlistitems) if playlistitems else (playlistend or n_entries),
|
2022-01-07 12:03:02 +01:00
|
|
|
'playlist_count': ie_result.get('playlist_count'),
|
2021-05-06 17:26:19 +02:00
|
|
|
'playlist_index': playlist_index,
|
|
|
|
'playlist_autonumber': i,
|
2021-01-16 13:40:15 +01:00
|
|
|
'playlist': playlist,
|
|
|
|
'playlist_id': ie_result.get('id'),
|
|
|
|
'playlist_title': ie_result.get('title'),
|
|
|
|
'playlist_uploader': ie_result.get('uploader'),
|
|
|
|
'playlist_uploader_id': ie_result.get('uploader_id'),
|
|
|
|
'extractor': ie_result['extractor'],
|
|
|
|
'webpage_url': ie_result['webpage_url'],
|
|
|
|
'webpage_url_basename': url_basename(ie_result['webpage_url']),
|
2021-10-18 04:13:21 +02:00
|
|
|
'webpage_url_domain': get_domain(ie_result['webpage_url']),
|
2021-01-16 13:40:15 +01:00
|
|
|
'extractor_key': ie_result['extractor_key'],
|
|
|
|
}
|
|
|
|
|
|
|
|
if self._match_entry(entry, incomplete=True) is not None:
|
|
|
|
continue
|
|
|
|
|
|
|
|
entry_result = self.__process_iterable_entry(entry, download, extra)
|
2021-04-21 08:00:43 +02:00
|
|
|
if not entry_result:
|
|
|
|
failures += 1
|
|
|
|
if failures >= max_failures:
|
|
|
|
self.report_error(
|
|
|
|
'Skipping the remaining entries in playlist "%s" since %d items failed extraction' % (playlist, failures))
|
|
|
|
break
|
2021-01-16 13:40:15 +01:00
|
|
|
playlist_results.append(entry_result)
|
|
|
|
ie_result['entries'] = playlist_results
|
2021-11-11 03:30:43 +01:00
|
|
|
|
|
|
|
# Write the updated info to json
|
|
|
|
if _infojson_written and self._write_info_json(
|
|
|
|
'updated playlist', ie_result,
|
|
|
|
self.prepare_filename(ie_copy, 'pl_infojson'), overwrite=True) is None:
|
|
|
|
return
|
2022-01-02 11:52:00 +01:00
|
|
|
|
2022-01-10 19:27:59 +01:00
|
|
|
ie_result = self.run_all_pps('playlist', ie_result)
|
|
|
|
self.to_screen(f'[download] Finished downloading playlist: {playlist}')
|
2021-01-16 13:40:15 +01:00
|
|
|
return ie_result
|
|
|
|
|
2020-11-21 15:50:42 +01:00
|
|
|
@__handle_extraction_exceptions
|
|
|
|
def __process_iterable_entry(self, entry, download, extra_info):
|
|
|
|
return self.process_ie_result(
|
|
|
|
entry, download=download, extra_info=extra_info)
|
|
|
|
|
2015-06-28 22:08:29 +02:00
|
|
|
def _build_format_filter(self, filter_spec):
|
|
|
|
" Returns a function to filter the formats according to the filter_spec "
|
2015-01-23 00:04:05 +01:00
|
|
|
|
|
|
|
OPERATORS = {
|
|
|
|
'<': operator.lt,
|
|
|
|
'<=': operator.le,
|
|
|
|
'>': operator.gt,
|
|
|
|
'>=': operator.ge,
|
|
|
|
'=': operator.eq,
|
|
|
|
'!=': operator.ne,
|
|
|
|
}
|
2015-06-28 22:08:29 +02:00
|
|
|
operator_rex = re.compile(r'''(?x)\s*
|
2021-06-11 15:43:22 +02:00
|
|
|
(?P<key>width|height|tbr|abr|vbr|asr|filesize|filesize_approx|fps)\s*
|
|
|
|
(?P<op>%s)(?P<none_inclusive>\s*\?)?\s*
|
|
|
|
(?P<value>[0-9.]+(?:[kKmMgGtTpPeEzZyY]i?[Bb]?)?)\s*
|
2015-01-23 00:04:05 +01:00
|
|
|
''' % '|'.join(map(re.escape, OPERATORS.keys())))
|
2021-06-11 15:43:22 +02:00
|
|
|
m = operator_rex.fullmatch(filter_spec)
|
2015-02-08 20:07:43 +01:00
|
|
|
if m:
|
|
|
|
try:
|
|
|
|
comparison_value = int(m.group('value'))
|
|
|
|
except ValueError:
|
|
|
|
comparison_value = parse_filesize(m.group('value'))
|
|
|
|
if comparison_value is None:
|
|
|
|
comparison_value = parse_filesize(m.group('value') + 'B')
|
|
|
|
if comparison_value is None:
|
|
|
|
raise ValueError(
|
|
|
|
'Invalid value %r in format specification %r' % (
|
2015-06-28 22:08:29 +02:00
|
|
|
m.group('value'), filter_spec))
|
2015-02-08 20:07:43 +01:00
|
|
|
op = OPERATORS[m.group('op')]
|
|
|
|
|
2015-01-23 00:04:05 +01:00
|
|
|
if not m:
|
2015-02-08 20:07:43 +01:00
|
|
|
STR_OPERATORS = {
|
|
|
|
'=': operator.eq,
|
2016-01-13 09:24:48 +01:00
|
|
|
'^=': lambda attr, value: attr.startswith(value),
|
|
|
|
'$=': lambda attr, value: attr.endswith(value),
|
|
|
|
'*=': lambda attr, value: value in attr,
|
2022-02-11 22:35:34 +01:00
|
|
|
'~=': lambda attr, value: value.search(attr) is not None
|
2015-02-08 20:07:43 +01:00
|
|
|
}
|
2021-06-11 15:43:22 +02:00
|
|
|
str_operator_rex = re.compile(r'''(?x)\s*
|
|
|
|
(?P<key>[a-zA-Z0-9._-]+)\s*
|
2022-02-11 22:35:34 +01:00
|
|
|
(?P<negation>!\s*)?(?P<op>%s)\s*(?P<none_inclusive>\?\s*)?
|
|
|
|
(?P<quote>["'])?
|
|
|
|
(?P<value>(?(quote)(?:(?!(?P=quote))[^\\]|\\.)+|[\w.-]+))
|
|
|
|
(?(quote)(?P=quote))\s*
|
2015-02-08 20:07:43 +01:00
|
|
|
''' % '|'.join(map(re.escape, STR_OPERATORS.keys())))
|
2021-06-11 15:43:22 +02:00
|
|
|
m = str_operator_rex.fullmatch(filter_spec)
|
2015-02-08 20:07:43 +01:00
|
|
|
if m:
|
2022-02-11 22:35:34 +01:00
|
|
|
if m.group('op') == '~=':
|
|
|
|
comparison_value = re.compile(m.group('value'))
|
|
|
|
else:
|
|
|
|
comparison_value = re.sub(r'''\\([\\"'])''', r'\1', m.group('value'))
|
2019-01-20 07:48:09 +01:00
|
|
|
str_op = STR_OPERATORS[m.group('op')]
|
|
|
|
if m.group('negation'):
|
2019-01-23 19:34:41 +01:00
|
|
|
op = lambda attr, value: not str_op(attr, value)
|
2019-01-20 07:48:09 +01:00
|
|
|
else:
|
|
|
|
op = str_op
|
2015-01-23 00:04:05 +01:00
|
|
|
|
2015-02-08 20:07:43 +01:00
|
|
|
if not m:
|
2021-06-11 15:43:22 +02:00
|
|
|
raise SyntaxError('Invalid filter specification %r' % filter_spec)
|
2015-01-23 00:04:05 +01:00
|
|
|
|
|
|
|
def _filter(f):
|
|
|
|
actual_value = f.get(m.group('key'))
|
|
|
|
if actual_value is None:
|
|
|
|
return m.group('none_inclusive')
|
|
|
|
return op(actual_value, comparison_value)
|
2015-06-28 22:08:29 +02:00
|
|
|
return _filter
|
|
|
|
|
2021-10-24 11:16:07 +02:00
|
|
|
def _check_formats(self, formats):
|
|
|
|
for f in formats:
|
|
|
|
self.to_screen('[info] Testing format %s' % f['format_id'])
|
2021-11-26 21:32:45 +01:00
|
|
|
path = self.get_output_path('temp')
|
|
|
|
if not self._ensure_dir_exists(f'{path}/'):
|
|
|
|
continue
|
|
|
|
temp_file = tempfile.NamedTemporaryFile(suffix='.tmp', delete=False, dir=path or None)
|
2021-10-24 11:16:07 +02:00
|
|
|
temp_file.close()
|
|
|
|
try:
|
|
|
|
success, _ = self.dl(temp_file.name, f, test=True)
|
|
|
|
except (DownloadError, IOError, OSError, ValueError) + network_exceptions:
|
|
|
|
success = False
|
|
|
|
finally:
|
|
|
|
if os.path.exists(temp_file.name):
|
|
|
|
try:
|
|
|
|
os.remove(temp_file.name)
|
|
|
|
except OSError:
|
|
|
|
self.report_warning('Unable to delete temporary file "%s"' % temp_file.name)
|
|
|
|
if success:
|
|
|
|
yield f
|
|
|
|
else:
|
|
|
|
self.to_screen('[info] Unable to download format %s. Skipping...' % f['format_id'])
|
|
|
|
|
2017-07-22 19:12:01 +02:00
|
|
|
def _default_format_spec(self, info_dict, download=True):
|
|
|
|
|
2017-10-11 18:45:03 +02:00
|
|
|
def can_merge():
|
|
|
|
merger = FFmpegMergerPP(self)
|
|
|
|
return merger.available and merger.can_merge()
|
|
|
|
|
Change defaults
* Enabled --ignore by default
* Disabled --video-multistreams and --audio-multistreams by default
* Changed default format selection to 'bv*+ba/b' when --audio-multistreams is disabled
* Changed default format sort order to 'res,fps,codec,size,br,asr,proto,ext,has_audio,source,format_id'
* Changed default output template to '%(title)s [%(id)s].%(ext)s'
* Enabled `--list-formats-as-table` by default
2021-01-04 17:40:47 +01:00
|
|
|
prefer_best = (
|
2021-08-07 02:01:51 +02:00
|
|
|
not self.params.get('simulate')
|
Change defaults
* Enabled --ignore by default
* Disabled --video-multistreams and --audio-multistreams by default
* Changed default format selection to 'bv*+ba/b' when --audio-multistreams is disabled
* Changed default format sort order to 'res,fps,codec,size,br,asr,proto,ext,has_audio,source,format_id'
* Changed default output template to '%(title)s [%(id)s].%(ext)s'
* Enabled `--list-formats-as-table` by default
2021-01-04 17:40:47 +01:00
|
|
|
and download
|
|
|
|
and (
|
|
|
|
not can_merge()
|
2021-01-07 12:41:39 +01:00
|
|
|
or info_dict.get('is_live', False)
|
2021-02-03 14:36:09 +01:00
|
|
|
or self.outtmpl_dict['default'] == '-'))
|
2021-05-11 10:00:48 +02:00
|
|
|
compat = (
|
|
|
|
prefer_best
|
|
|
|
or self.params.get('allow_multiple_audio_streams', False)
|
|
|
|
or 'format-spec' in self.params.get('compat_opts', []))
|
Change defaults
* Enabled --ignore by default
* Disabled --video-multistreams and --audio-multistreams by default
* Changed default format selection to 'bv*+ba/b' when --audio-multistreams is disabled
* Changed default format sort order to 'res,fps,codec,size,br,asr,proto,ext,has_audio,source,format_id'
* Changed default output template to '%(title)s [%(id)s].%(ext)s'
* Enabled `--list-formats-as-table` by default
2021-01-04 17:40:47 +01:00
|
|
|
|
|
|
|
return (
|
2021-05-11 10:00:48 +02:00
|
|
|
'best/bestvideo+bestaudio' if prefer_best
|
|
|
|
else 'bestvideo*+bestaudio/best' if not compat
|
Change defaults
* Enabled --ignore by default
* Disabled --video-multistreams and --audio-multistreams by default
* Changed default format selection to 'bv*+ba/b' when --audio-multistreams is disabled
* Changed default format sort order to 'res,fps,codec,size,br,asr,proto,ext,has_audio,source,format_id'
* Changed default output template to '%(title)s [%(id)s].%(ext)s'
* Enabled `--list-formats-as-table` by default
2021-01-04 17:40:47 +01:00
|
|
|
else 'bestvideo+bestaudio/best')
|
2017-07-22 19:12:01 +02:00
|
|
|
|
2015-06-28 22:08:29 +02:00
|
|
|
def build_format_selector(self, format_spec):
|
|
|
|
def syntax_error(note, start):
|
|
|
|
message = (
|
|
|
|
'Invalid format specification: '
|
|
|
|
'{0}\n\t{1}\n\t{2}^'.format(note, format_spec, ' ' * start[1]))
|
|
|
|
return SyntaxError(message)
|
|
|
|
|
|
|
|
PICKFIRST = 'PICKFIRST'
|
|
|
|
MERGE = 'MERGE'
|
|
|
|
SINGLE = 'SINGLE'
|
2015-06-29 12:42:02 +02:00
|
|
|
GROUP = 'GROUP'
|
2015-06-28 22:08:29 +02:00
|
|
|
FormatSelector = collections.namedtuple('FormatSelector', ['type', 'selector', 'filters'])
|
|
|
|
|
Change defaults
* Enabled --ignore by default
* Disabled --video-multistreams and --audio-multistreams by default
* Changed default format selection to 'bv*+ba/b' when --audio-multistreams is disabled
* Changed default format sort order to 'res,fps,codec,size,br,asr,proto,ext,has_audio,source,format_id'
* Changed default output template to '%(title)s [%(id)s].%(ext)s'
* Enabled `--list-formats-as-table` by default
2021-01-04 17:40:47 +01:00
|
|
|
allow_multiple_streams = {'audio': self.params.get('allow_multiple_audio_streams', False),
|
|
|
|
'video': self.params.get('allow_multiple_video_streams', False)}
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
2021-10-24 11:16:07 +02:00
|
|
|
check_formats = self.params.get('check_formats') == 'selected'
|
2021-05-04 17:54:00 +02:00
|
|
|
|
2015-06-28 22:08:29 +02:00
|
|
|
def _parse_filter(tokens):
|
|
|
|
filter_parts = []
|
|
|
|
for type, string, start, _, _ in tokens:
|
|
|
|
if type == tokenize.OP and string == ']':
|
|
|
|
return ''.join(filter_parts)
|
|
|
|
else:
|
|
|
|
filter_parts.append(string)
|
|
|
|
|
2015-08-04 22:29:23 +02:00
|
|
|
def _remove_unused_ops(tokens):
|
2015-11-20 18:21:46 +01:00
|
|
|
# Remove operators that we don't use and join them with the surrounding strings
|
2015-08-04 22:29:23 +02:00
|
|
|
# for example: 'mp4' '-' 'baseline' '-' '16x9' is converted to 'mp4-baseline-16x9'
|
|
|
|
ALLOWED_OPS = ('/', '+', ',', '(', ')')
|
|
|
|
last_string, last_start, last_end, last_line = None, None, None, None
|
|
|
|
for type, string, start, end, line in tokens:
|
|
|
|
if type == tokenize.OP and string == '[':
|
|
|
|
if last_string:
|
|
|
|
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
|
|
|
last_string = None
|
|
|
|
yield type, string, start, end, line
|
|
|
|
# everything inside brackets will be handled by _parse_filter
|
|
|
|
for type, string, start, end, line in tokens:
|
|
|
|
yield type, string, start, end, line
|
|
|
|
if type == tokenize.OP and string == ']':
|
|
|
|
break
|
|
|
|
elif type == tokenize.OP and string in ALLOWED_OPS:
|
|
|
|
if last_string:
|
|
|
|
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
|
|
|
last_string = None
|
|
|
|
yield type, string, start, end, line
|
|
|
|
elif type in [tokenize.NAME, tokenize.NUMBER, tokenize.OP]:
|
|
|
|
if not last_string:
|
|
|
|
last_string = string
|
|
|
|
last_start = start
|
|
|
|
last_end = end
|
|
|
|
else:
|
|
|
|
last_string += string
|
|
|
|
if last_string:
|
|
|
|
yield tokenize.NAME, last_string, last_start, last_end, last_line
|
|
|
|
|
2015-06-30 19:45:42 +02:00
|
|
|
def _parse_format_selection(tokens, inside_merge=False, inside_choice=False, inside_group=False):
|
2015-06-28 22:08:29 +02:00
|
|
|
selectors = []
|
|
|
|
current_selector = None
|
|
|
|
for type, string, start, _, _ in tokens:
|
|
|
|
# ENCODING is only defined in python 3.x
|
|
|
|
if type == getattr(tokenize, 'ENCODING', None):
|
|
|
|
continue
|
|
|
|
elif type in [tokenize.NAME, tokenize.NUMBER]:
|
|
|
|
current_selector = FormatSelector(SINGLE, string, [])
|
|
|
|
elif type == tokenize.OP:
|
2015-06-30 19:45:42 +02:00
|
|
|
if string == ')':
|
|
|
|
if not inside_group:
|
|
|
|
# ')' will be handled by the parentheses group
|
|
|
|
tokens.restore_last_token()
|
2015-06-28 22:08:29 +02:00
|
|
|
break
|
2015-06-30 19:45:42 +02:00
|
|
|
elif inside_merge and string in ['/', ',']:
|
2015-06-29 12:42:02 +02:00
|
|
|
tokens.restore_last_token()
|
|
|
|
break
|
2015-06-30 19:45:42 +02:00
|
|
|
elif inside_choice and string == ',':
|
|
|
|
tokens.restore_last_token()
|
|
|
|
break
|
|
|
|
elif string == ',':
|
2015-07-10 22:46:25 +02:00
|
|
|
if not current_selector:
|
|
|
|
raise syntax_error('"," must follow a format selector', start)
|
2015-06-28 22:08:29 +02:00
|
|
|
selectors.append(current_selector)
|
|
|
|
current_selector = None
|
|
|
|
elif string == '/':
|
2015-08-03 23:04:11 +02:00
|
|
|
if not current_selector:
|
|
|
|
raise syntax_error('"/" must follow a format selector', start)
|
2015-06-28 22:08:29 +02:00
|
|
|
first_choice = current_selector
|
2015-06-30 19:45:42 +02:00
|
|
|
second_choice = _parse_format_selection(tokens, inside_choice=True)
|
2015-07-04 21:30:26 +02:00
|
|
|
current_selector = FormatSelector(PICKFIRST, (first_choice, second_choice), [])
|
2015-06-28 22:08:29 +02:00
|
|
|
elif string == '[':
|
|
|
|
if not current_selector:
|
|
|
|
current_selector = FormatSelector(SINGLE, 'best', [])
|
|
|
|
format_filter = _parse_filter(tokens)
|
|
|
|
current_selector.filters.append(format_filter)
|
2015-06-29 12:42:02 +02:00
|
|
|
elif string == '(':
|
|
|
|
if current_selector:
|
|
|
|
raise syntax_error('Unexpected "("', start)
|
2015-06-30 19:45:42 +02:00
|
|
|
group = _parse_format_selection(tokens, inside_group=True)
|
|
|
|
current_selector = FormatSelector(GROUP, group, [])
|
2015-06-28 22:08:29 +02:00
|
|
|
elif string == '+':
|
2015-08-04 09:07:44 +02:00
|
|
|
if not current_selector:
|
|
|
|
raise syntax_error('Unexpected "+"', start)
|
|
|
|
selector_1 = current_selector
|
|
|
|
selector_2 = _parse_format_selection(tokens, inside_merge=True)
|
|
|
|
if not selector_2:
|
|
|
|
raise syntax_error('Expected a selector', start)
|
|
|
|
current_selector = FormatSelector(MERGE, (selector_1, selector_2), [])
|
2015-06-28 22:08:29 +02:00
|
|
|
else:
|
|
|
|
raise syntax_error('Operator not recognized: "{0}"'.format(string), start)
|
|
|
|
elif type == tokenize.ENDMARKER:
|
|
|
|
break
|
|
|
|
if current_selector:
|
|
|
|
selectors.append(current_selector)
|
|
|
|
return selectors
|
|
|
|
|
2021-04-10 16:40:30 +02:00
|
|
|
def _merge(formats_pair):
|
|
|
|
format_1, format_2 = formats_pair
|
|
|
|
|
|
|
|
formats_info = []
|
|
|
|
formats_info.extend(format_1.get('requested_formats', (format_1,)))
|
|
|
|
formats_info.extend(format_2.get('requested_formats', (format_2,)))
|
|
|
|
|
|
|
|
if not allow_multiple_streams['video'] or not allow_multiple_streams['audio']:
|
2021-06-13 00:46:42 +02:00
|
|
|
get_no_more = {'video': False, 'audio': False}
|
2021-04-10 16:40:30 +02:00
|
|
|
for (i, fmt_info) in enumerate(formats_info):
|
2021-06-13 00:46:42 +02:00
|
|
|
if fmt_info.get('acodec') == fmt_info.get('vcodec') == 'none':
|
|
|
|
formats_info.pop(i)
|
|
|
|
continue
|
|
|
|
for aud_vid in ['audio', 'video']:
|
2021-04-10 16:40:30 +02:00
|
|
|
if not allow_multiple_streams[aud_vid] and fmt_info.get(aud_vid[0] + 'codec') != 'none':
|
|
|
|
if get_no_more[aud_vid]:
|
|
|
|
formats_info.pop(i)
|
2021-07-31 12:29:52 +02:00
|
|
|
break
|
2021-04-10 16:40:30 +02:00
|
|
|
get_no_more[aud_vid] = True
|
|
|
|
|
|
|
|
if len(formats_info) == 1:
|
|
|
|
return formats_info[0]
|
|
|
|
|
|
|
|
video_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('vcodec') != 'none']
|
|
|
|
audio_fmts = [fmt_info for fmt_info in formats_info if fmt_info.get('acodec') != 'none']
|
|
|
|
|
|
|
|
the_only_video = video_fmts[0] if len(video_fmts) == 1 else None
|
|
|
|
the_only_audio = audio_fmts[0] if len(audio_fmts) == 1 else None
|
|
|
|
|
|
|
|
output_ext = self.params.get('merge_output_format')
|
|
|
|
if not output_ext:
|
|
|
|
if the_only_video:
|
|
|
|
output_ext = the_only_video['ext']
|
|
|
|
elif the_only_audio and not video_fmts:
|
|
|
|
output_ext = the_only_audio['ext']
|
|
|
|
else:
|
|
|
|
output_ext = 'mkv'
|
|
|
|
|
2021-10-12 13:17:18 +02:00
|
|
|
filtered = lambda *keys: filter(None, (traverse_obj(fmt, *keys) for fmt in formats_info))
|
|
|
|
|
2021-04-10 16:40:30 +02:00
|
|
|
new_dict = {
|
|
|
|
'requested_formats': formats_info,
|
2021-10-12 13:17:18 +02:00
|
|
|
'format': '+'.join(filtered('format')),
|
|
|
|
'format_id': '+'.join(filtered('format_id')),
|
2021-04-10 16:40:30 +02:00
|
|
|
'ext': output_ext,
|
2021-10-12 13:17:18 +02:00
|
|
|
'protocol': '+'.join(map(determine_protocol, formats_info)),
|
2021-11-10 17:11:41 +01:00
|
|
|
'language': '+'.join(orderedSet(filtered('language'))) or None,
|
|
|
|
'format_note': '+'.join(orderedSet(filtered('format_note'))) or None,
|
|
|
|
'filesize_approx': sum(filtered('filesize', 'filesize_approx')) or None,
|
2021-10-12 13:17:18 +02:00
|
|
|
'tbr': sum(filtered('tbr', 'vbr', 'abr')),
|
2021-04-10 16:40:30 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
if the_only_video:
|
|
|
|
new_dict.update({
|
|
|
|
'width': the_only_video.get('width'),
|
|
|
|
'height': the_only_video.get('height'),
|
|
|
|
'resolution': the_only_video.get('resolution') or self.format_resolution(the_only_video),
|
|
|
|
'fps': the_only_video.get('fps'),
|
2021-10-22 22:37:20 +02:00
|
|
|
'dynamic_range': the_only_video.get('dynamic_range'),
|
2021-04-10 16:40:30 +02:00
|
|
|
'vcodec': the_only_video.get('vcodec'),
|
|
|
|
'vbr': the_only_video.get('vbr'),
|
|
|
|
'stretched_ratio': the_only_video.get('stretched_ratio'),
|
|
|
|
})
|
|
|
|
|
|
|
|
if the_only_audio:
|
|
|
|
new_dict.update({
|
|
|
|
'acodec': the_only_audio.get('acodec'),
|
|
|
|
'abr': the_only_audio.get('abr'),
|
2021-10-12 13:17:18 +02:00
|
|
|
'asr': the_only_audio.get('asr'),
|
2021-04-10 16:40:30 +02:00
|
|
|
})
|
|
|
|
|
|
|
|
return new_dict
|
|
|
|
|
2021-05-04 17:54:00 +02:00
|
|
|
def _check_formats(formats):
|
2021-06-27 04:05:58 +02:00
|
|
|
if not check_formats:
|
|
|
|
yield from formats
|
2021-07-07 17:35:58 +02:00
|
|
|
return
|
2021-10-24 11:16:07 +02:00
|
|
|
yield from self._check_formats(formats)
|
2021-05-04 17:54:00 +02:00
|
|
|
|
2015-06-28 22:08:29 +02:00
|
|
|
def _build_selector_function(selector):
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
if isinstance(selector, list): # ,
|
2015-06-28 22:08:29 +02:00
|
|
|
fs = [_build_selector_function(s) for s in selector]
|
|
|
|
|
2016-07-15 19:55:43 +02:00
|
|
|
def selector_function(ctx):
|
2015-06-28 22:08:29 +02:00
|
|
|
for f in fs:
|
2021-06-27 04:05:58 +02:00
|
|
|
yield from f(ctx)
|
2015-06-28 22:08:29 +02:00
|
|
|
return selector_function
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
|
|
|
elif selector.type == GROUP: # ()
|
2015-06-29 12:42:02 +02:00
|
|
|
selector_function = _build_selector_function(selector.selector)
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
|
|
|
elif selector.type == PICKFIRST: # /
|
2015-06-28 22:08:29 +02:00
|
|
|
fs = [_build_selector_function(s) for s in selector.selector]
|
|
|
|
|
2016-07-15 19:55:43 +02:00
|
|
|
def selector_function(ctx):
|
2015-06-28 22:08:29 +02:00
|
|
|
for f in fs:
|
2016-07-15 19:55:43 +02:00
|
|
|
picked_formats = list(f(ctx))
|
2015-06-28 22:08:29 +02:00
|
|
|
if picked_formats:
|
|
|
|
return picked_formats
|
|
|
|
return []
|
|
|
|
|
2021-06-27 04:05:58 +02:00
|
|
|
elif selector.type == MERGE: # +
|
|
|
|
selector_1, selector_2 = map(_build_selector_function, selector.selector)
|
|
|
|
|
|
|
|
def selector_function(ctx):
|
2021-12-20 07:06:46 +01:00
|
|
|
for pair in itertools.product(selector_1(ctx), selector_2(ctx)):
|
2021-06-27 04:05:58 +02:00
|
|
|
yield _merge(pair)
|
|
|
|
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
elif selector.type == SINGLE: # atom
|
2021-04-26 07:19:22 +02:00
|
|
|
format_spec = selector.selector or 'best'
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
2021-04-10 16:40:30 +02:00
|
|
|
# TODO: Add allvideo, allaudio etc by generalizing the code with best/worst selector
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
if format_spec == 'all':
|
|
|
|
def selector_function(ctx):
|
2021-11-19 01:06:28 +01:00
|
|
|
yield from _check_formats(ctx['formats'][::-1])
|
2021-04-10 16:40:30 +02:00
|
|
|
elif format_spec == 'mergeall':
|
|
|
|
def selector_function(ctx):
|
2021-11-19 02:00:25 +01:00
|
|
|
formats = list(_check_formats(ctx['formats']))
|
2021-04-10 18:59:58 +02:00
|
|
|
if not formats:
|
|
|
|
return
|
2021-04-13 07:23:25 +02:00
|
|
|
merged_format = formats[-1]
|
|
|
|
for f in formats[-2::-1]:
|
2021-04-10 16:40:30 +02:00
|
|
|
merged_format = _merge((merged_format, f))
|
|
|
|
yield merged_format
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
|
|
|
else:
|
2021-05-04 17:54:00 +02:00
|
|
|
format_fallback, format_reverse, format_idx = False, True, 1
|
2021-04-02 18:42:42 +02:00
|
|
|
mobj = re.match(
|
|
|
|
r'(?P<bw>best|worst|b|w)(?P<type>video|audio|v|a)?(?P<mod>\*)?(?:\.(?P<n>[1-9]\d*))?$',
|
|
|
|
format_spec)
|
|
|
|
if mobj is not None:
|
|
|
|
format_idx = int_or_none(mobj.group('n'), default=1)
|
2021-05-04 17:54:00 +02:00
|
|
|
format_reverse = mobj.group('bw')[0] == 'b'
|
2021-04-02 18:42:42 +02:00
|
|
|
format_type = (mobj.group('type') or [None])[0]
|
|
|
|
not_format_type = {'v': 'a', 'a': 'v'}.get(format_type)
|
|
|
|
format_modified = mobj.group('mod') is not None
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
|
|
|
format_fallback = not format_type and not format_modified # for b, w
|
2021-06-12 22:02:19 +02:00
|
|
|
_filter_f = (
|
2021-04-02 18:42:42 +02:00
|
|
|
(lambda f: f.get('%scodec' % format_type) != 'none')
|
|
|
|
if format_type and format_modified # bv*, ba*, wv*, wa*
|
|
|
|
else (lambda f: f.get('%scodec' % not_format_type) == 'none')
|
|
|
|
if format_type # bv, ba, wv, wa
|
|
|
|
else (lambda f: f.get('vcodec') != 'none' and f.get('acodec') != 'none')
|
|
|
|
if not format_modified # b, w
|
2021-06-12 22:02:19 +02:00
|
|
|
else lambda f: True) # b*, w*
|
|
|
|
filter_f = lambda f: _filter_f(f) and (
|
|
|
|
f.get('vcodec') != 'none' or f.get('acodec') != 'none')
|
2015-06-28 22:08:29 +02:00
|
|
|
else:
|
2021-10-15 15:20:28 +02:00
|
|
|
if format_spec in self._format_selection_exts['audio']:
|
2021-10-03 23:25:11 +02:00
|
|
|
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none'
|
2021-10-15 15:20:28 +02:00
|
|
|
elif format_spec in self._format_selection_exts['video']:
|
2021-10-03 23:25:11 +02:00
|
|
|
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') != 'none' and f.get('vcodec') != 'none'
|
2021-10-15 15:20:28 +02:00
|
|
|
elif format_spec in self._format_selection_exts['storyboards']:
|
2021-10-03 23:25:11 +02:00
|
|
|
filter_f = lambda f: f.get('ext') == format_spec and f.get('acodec') == 'none' and f.get('vcodec') == 'none'
|
|
|
|
else:
|
2021-10-09 02:23:15 +02:00
|
|
|
filter_f = lambda f: f.get('format_id') == format_spec # id
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
|
|
|
|
def selector_function(ctx):
|
|
|
|
formats = list(ctx['formats'])
|
|
|
|
matches = list(filter(filter_f, formats)) if filter_f is not None else formats
|
2021-05-04 17:54:00 +02:00
|
|
|
if format_fallback and ctx['incomplete_formats'] and not matches:
|
Better Format Selection
* Added options: --video-multistreams, --no-video-multistreams, --audio-multistreams, --no-audio-multistreams
* New format selectors: best*, worst*, bestvideo*, bestaudio*, worstvideo*, worstaudio*
* Added b,w,v,a as alias for best, worst, video and audio respectively in format selection
* Changed video format sorting to show video only files and video+audio files together.
2020-11-05 16:35:36 +01:00
|
|
|
# for extractors with incomplete formats (audio only (soundcloud)
|
|
|
|
# or video only (imgur)) best/worst will fallback to
|
|
|
|
# best/worst {video,audio}-only format
|
2021-05-04 17:54:00 +02:00
|
|
|
matches = formats
|
2021-06-27 04:05:58 +02:00
|
|
|
matches = LazyList(_check_formats(matches[::-1 if format_reverse else 1]))
|
|
|
|
try:
|
2021-05-04 17:54:00 +02:00
|
|
|
yield matches[format_idx - 1]
|
2021-06-27 04:05:58 +02:00
|
|
|
except IndexError:
|
|
|
|
return
|
2015-01-23 00:04:05 +01:00
|
|
|
|
2015-06-28 22:08:29 +02:00
|
|
|
filters = [self._build_format_filter(f) for f in selector.filters]
|
2015-01-23 00:04:05 +01:00
|
|
|
|
2016-07-15 19:55:43 +02:00
|
|
|
def final_selector(ctx):
|
2021-12-20 07:06:46 +01:00
|
|
|
ctx_copy = dict(ctx)
|
2015-06-28 22:08:29 +02:00
|
|
|
for _filter in filters:
|
2016-07-15 19:55:43 +02:00
|
|
|
ctx_copy['formats'] = list(filter(_filter, ctx_copy['formats']))
|
|
|
|
return selector_function(ctx_copy)
|
2015-06-28 22:08:29 +02:00
|
|
|
return final_selector
|
2015-01-23 00:04:05 +01:00
|
|
|
|
2015-06-28 22:08:29 +02:00
|
|
|
stream = io.BytesIO(format_spec.encode('utf-8'))
|
2015-06-29 12:42:02 +02:00
|
|
|
try:
|
2015-08-04 22:29:23 +02:00
|
|
|
tokens = list(_remove_unused_ops(compat_tokenize_tokenize(stream.readline)))
|
2015-06-29 12:42:02 +02:00
|
|
|
except tokenize.TokenError:
|
|
|
|
raise syntax_error('Missing closing/opening brackets or parenthesis', (0, len(format_spec)))
|
|
|
|
|
|
|
|
class TokenIterator(object):
|
|
|
|
def __init__(self, tokens):
|
|
|
|
self.tokens = tokens
|
|
|
|
self.counter = 0
|
|
|
|
|
|
|
|
def __iter__(self):
|
|
|
|
return self
|
|
|
|
|
|
|
|
def __next__(self):
|
|
|
|
if self.counter >= len(self.tokens):
|
|
|
|
raise StopIteration()
|
|
|
|
value = self.tokens[self.counter]
|
|
|
|
self.counter += 1
|
|
|
|
return value
|
|
|
|
|
|
|
|
next = __next__
|
|
|
|
|
|
|
|
def restore_last_token(self):
|
|
|
|
self.counter -= 1
|
|
|
|
|
|
|
|
parsed_selector = _parse_format_selection(iter(TokenIterator(tokens)))
|
2015-06-28 22:08:29 +02:00
|
|
|
return _build_selector_function(parsed_selector)
|
2013-10-21 13:19:58 +02:00
|
|
|
|
2015-01-24 18:52:26 +01:00
|
|
|
def _calc_headers(self, info_dict):
|
|
|
|
res = std_headers.copy()
|
2022-02-03 16:02:10 +01:00
|
|
|
res.update(info_dict.get('http_headers') or {})
|
2015-01-24 18:52:26 +01:00
|
|
|
|
|
|
|
cookies = self._calc_cookies(info_dict)
|
|
|
|
if cookies:
|
|
|
|
res['Cookie'] = cookies
|
|
|
|
|
2017-02-04 15:06:07 +01:00
|
|
|
if 'X-Forwarded-For' not in res:
|
|
|
|
x_forwarded_for_ip = info_dict.get('__x_forwarded_for_ip')
|
|
|
|
if x_forwarded_for_ip:
|
|
|
|
res['X-Forwarded-For'] = x_forwarded_for_ip
|
|
|
|
|
2015-01-24 18:52:26 +01:00
|
|
|
return res
|
|
|
|
|
|
|
|
def _calc_cookies(self, info_dict):
|
2015-11-21 17:18:17 +01:00
|
|
|
pr = sanitized_Request(info_dict['url'])
|
2015-01-24 18:52:26 +01:00
|
|
|
self.cookiejar.add_cookie_header(pr)
|
2015-02-17 16:29:24 +01:00
|
|
|
return pr.get_header('Cookie')
|
2015-01-24 18:52:26 +01:00
|
|
|
|
2021-10-24 11:16:07 +02:00
|
|
|
def _sort_thumbnails(self, thumbnails):
|
|
|
|
thumbnails.sort(key=lambda t: (
|
|
|
|
t.get('preference') if t.get('preference') is not None else -1,
|
|
|
|
t.get('width') if t.get('width') is not None else -1,
|
|
|
|
t.get('height') if t.get('height') is not None else -1,
|
|
|
|
t.get('id') if t.get('id') is not None else '',
|
|
|
|
t.get('url')))
|
|
|
|
|
2021-06-12 17:21:00 +02:00
|
|
|
def _sanitize_thumbnails(self, info_dict):
|
2021-05-23 13:58:15 +02:00
|
|
|
thumbnails = info_dict.get('thumbnails')
|
|
|
|
if thumbnails is None:
|
|
|
|
thumbnail = info_dict.get('thumbnail')
|
|
|
|
if thumbnail:
|
|
|
|
info_dict['thumbnails'] = thumbnails = [{'url': thumbnail}]
|
2021-10-24 11:16:07 +02:00
|
|
|
if not thumbnails:
|
|
|
|
return
|
|
|
|
|
|
|
|
def check_thumbnails(thumbnails):
|
|
|
|
for t in thumbnails:
|
|
|
|
self.to_screen(f'[info] Testing thumbnail {t["id"]}')
|
|
|
|
try:
|
|
|
|
self.urlopen(HEADRequest(t['url']))
|
|
|
|
except network_exceptions as err:
|
|
|
|
self.to_screen(f'[info] Unable to connect to thumbnail {t["id"]} URL {t["url"]!r} - {err}. Skipping...')
|
|
|
|
continue
|
|
|
|
yield t
|
|
|
|
|
|
|
|
self._sort_thumbnails(thumbnails)
|
|
|
|
for i, t in enumerate(thumbnails):
|
|
|
|
if t.get('id') is None:
|
|
|
|
t['id'] = '%d' % i
|
|
|
|
if t.get('width') and t.get('height'):
|
|
|
|
t['resolution'] = '%dx%d' % (t['width'], t['height'])
|
|
|
|
t['url'] = sanitize_url(t['url'])
|
|
|
|
|
|
|
|
if self.params.get('check_formats') is True:
|
2021-11-20 03:35:57 +01:00
|
|
|
info_dict['thumbnails'] = LazyList(check_thumbnails(thumbnails[::-1]), reverse=True)
|
2021-10-24 11:16:07 +02:00
|
|
|
else:
|
|
|
|
info_dict['thumbnails'] = thumbnails
|
2021-05-23 13:58:15 +02:00
|
|
|
|
2013-07-02 10:08:58 +02:00
|
|
|
def process_video_result(self, info_dict, download=True):
|
|
|
|
assert info_dict.get('_type', 'video') == 'video'
|
2022-01-03 14:07:35 +01:00
|
|
|
self._num_videos += 1
|
2013-07-02 10:08:58 +02:00
|
|
|
|
2014-04-03 14:36:40 +02:00
|
|
|
if 'id' not in info_dict:
|
2022-01-24 16:31:17 +01:00
|
|
|
raise ExtractorError('Missing "id" field in extractor result', ie=info_dict['extractor'])
|
|
|
|
elif not info_dict.get('id'):
|
|
|
|
raise ExtractorError('Extractor failed to obtain "id"', ie=info_dict['extractor'])
|
2022-02-03 16:02:10 +01:00
|
|
|
|
|
|
|
info_dict['fulltitle'] = info_dict.get('title')
|
2014-04-03 14:36:40 +02:00
|
|
|
if 'title' not in info_dict:
|
2021-08-19 03:49:23 +02:00
|
|
|
raise ExtractorError('Missing "title" field in extractor result',
|
|
|
|
video_id=info_dict['id'], ie=info_dict['extractor'])
|
2022-01-24 16:31:17 +01:00
|
|
|
elif not info_dict.get('title'):
|
|
|
|
self.report_warning('Extractor failed to obtain "title". Creating a generic title instead')
|
|
|
|
info_dict['title'] = f'{info_dict["extractor"]} video #{info_dict["id"]}'
|
2014-04-03 14:36:40 +02:00
|
|
|
|
2017-06-08 17:53:14 +02:00
|
|
|
def report_force_conversion(field, field_not, conversion):
|
|
|
|
self.report_warning(
|
|
|
|
'"%s" field is not %s - forcing %s conversion, there is an error in extractor'
|
|
|
|
% (field, field_not, conversion))
|
|
|
|
|
|
|
|
def sanitize_string_field(info, string_field):
|
|
|
|
field = info.get(string_field)
|
|
|
|
if field is None or isinstance(field, compat_str):
|
|
|
|
return
|
|
|
|
report_force_conversion(string_field, 'a string', 'string')
|
|
|
|
info[string_field] = compat_str(field)
|
|
|
|
|
|
|
|
def sanitize_numeric_fields(info):
|
|
|
|
for numeric_field in self._NUMERIC_FIELDS:
|
|
|
|
field = info.get(numeric_field)
|
|
|
|
if field is None or isinstance(field, compat_numeric_types):
|
|
|
|
continue
|
|
|
|
report_force_conversion(numeric_field, 'numeric', 'int')
|
|
|
|
info[numeric_field] = int_or_none(field)
|
|
|
|
|
|
|
|
sanitize_string_field(info_dict, 'id')
|
|
|
|
sanitize_numeric_fields(info_dict)
|
2016-06-09 00:34:19 +02:00
|
|
|
|
2013-07-02 10:08:58 +02:00
|
|
|
if 'playlist' not in info_dict:
|
|
|
|
# It isn't part of a playlist
|
|
|
|
info_dict['playlist'] = None
|
|
|
|
info_dict['playlist_index'] = None
|
|
|
|
|
2021-05-23 13:58:15 +02:00
|
|
|
self._sanitize_thumbnails(info_dict)
|
2014-06-07 15:33:45 +02:00
|
|
|
|
2016-04-07 20:17:47 +02:00
|
|
|
thumbnail = info_dict.get('thumbnail')
|
2021-05-23 13:58:15 +02:00
|
|
|
thumbnails = info_dict.get('thumbnails')
|
2016-04-07 20:17:47 +02:00
|
|
|
if thumbnail:
|
|
|
|
info_dict['thumbnail'] = sanitize_url(thumbnail)
|
|
|
|
elif thumbnails:
|
2014-06-07 15:33:45 +02:00
|
|
|
info_dict['thumbnail'] = thumbnails[-1]['url']
|
|
|
|
|
2021-07-21 17:14:18 +02:00
|
|
|
if info_dict.get('display_id') is None and 'id' in info_dict:
|
2014-03-03 12:06:28 +01:00
|
|
|
info_dict['display_id'] = info_dict['id']
|
|
|
|
|
2021-10-16 21:35:16 +02:00
|
|
|
if info_dict.get('duration') is not None:
|
|
|
|
info_dict['duration_string'] = formatSeconds(info_dict['duration'])
|
|
|
|
|
2021-03-15 00:22:06 +01:00
|
|
|
for ts_key, date_key in (
|
|
|
|
('timestamp', 'upload_date'),
|
|
|
|
('release_timestamp', 'release_date'),
|
2022-01-07 12:03:02 +01:00
|
|
|
('modified_timestamp', 'modified_date'),
|
2021-03-15 00:22:06 +01:00
|
|
|
):
|
|
|
|
if info_dict.get(date_key) is None and info_dict.get(ts_key) is not None:
|
|
|
|
# Working around out-of-range timestamp values (e.g. negative ones on Windows,
|
|
|
|
# see http://bugs.python.org/issue1646728)
|
|
|
|
try:
|
|
|
|
upload_date = datetime.datetime.utcfromtimestamp(info_dict[ts_key])
|
|
|
|
info_dict[date_key] = upload_date.strftime('%Y%m%d')
|
|
|
|
except (ValueError, OverflowError, OSError):
|
|
|
|
pass
|
2014-03-13 15:30:25 +01:00
|
|
|
|
2021-07-21 17:14:18 +02:00
|
|
|
live_keys = ('is_live', 'was_live')
|
|
|
|
live_status = info_dict.get('live_status')
|
|
|
|
if live_status is None:
|
|
|
|
for key in live_keys:
|
|
|
|
if info_dict.get(key) is False:
|
|
|
|
continue
|
|
|
|
if info_dict.get(key):
|
|
|
|
live_status = key
|
|
|
|
break
|
|
|
|
if all(info_dict.get(key) is False for key in live_keys):
|
|
|
|
live_status = 'not_live'
|
|
|
|
if live_status:
|
|
|
|
info_dict['live_status'] = live_status
|
|
|
|
for key in live_keys:
|
|
|
|
if info_dict.get(key) is None:
|
|
|
|
info_dict[key] = (live_status == key)
|
|
|
|
|
2016-01-15 19:09:54 +01:00
|
|
|
# Auto generate title fields corresponding to the *_number fields when missing
|
|
|
|
# in order to always have clean titles. This is very common for TV series.
|
|
|
|
for field in ('chapter', 'season', 'episode'):
|
|
|
|
if info_dict.get('%s_number' % field) is not None and not info_dict.get(field):
|
|
|
|
info_dict[field] = '%s %d' % (field.capitalize(), info_dict['%s_number' % field])
|
|
|
|
|
2018-05-08 17:57:52 +02:00
|
|
|
for cc_kind in ('subtitles', 'automatic_captions'):
|
|
|
|
cc = info_dict.get(cc_kind)
|
|
|
|
if cc:
|
|
|
|
for _, subtitle in cc.items():
|
|
|
|
for subtitle_format in subtitle:
|
|
|
|
if subtitle_format.get('url'):
|
|
|
|
subtitle_format['url'] = sanitize_url(subtitle_format['url'])
|
|
|
|
if subtitle_format.get('ext') is None:
|
|
|
|
subtitle_format['ext'] = determine_ext(subtitle_format['url']).lower()
|
|
|
|
|
|
|
|
automatic_captions = info_dict.get('automatic_captions')
|
2015-10-04 16:33:42 +02:00
|
|
|
subtitles = info_dict.get('subtitles')
|
|
|
|
|
2015-02-16 21:44:17 +01:00
|
|
|
info_dict['requested_subtitles'] = self.process_subtitles(
|
2018-05-08 17:57:52 +02:00
|
|
|
info_dict['id'], subtitles, automatic_captions)
|
2015-02-15 18:03:41 +01:00
|
|
|
|
2013-07-02 10:08:58 +02:00
|
|
|
if info_dict.get('formats') is None:
|
|
|
|
# There's only one format available
|
|
|
|
formats = [info_dict]
|
|
|
|
else:
|
|
|
|
formats = info_dict['formats']
|
|
|
|
|
2021-08-24 22:48:05 +02:00
|
|
|
info_dict['__has_drm'] = any(f.get('has_drm') for f in formats)
|
2021-08-22 22:08:38 +02:00
|
|
|
if not self.params.get('allow_unplayable_formats'):
|
|
|
|
formats = [f for f in formats if not f.get('has_drm')]
|
|
|
|
|
2021-12-20 07:06:46 +01:00
|
|
|
if info_dict.get('is_live'):
|
|
|
|
get_from_start = bool(self.params.get('live_from_start'))
|
|
|
|
formats = [f for f in formats if bool(f.get('is_from_start')) == get_from_start]
|
2022-01-01 22:33:26 +01:00
|
|
|
if not get_from_start:
|
|
|
|
info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
|
2021-12-20 07:06:46 +01:00
|
|
|
|
2014-03-10 20:55:47 +01:00
|
|
|
if not formats:
|
2021-08-19 03:49:23 +02:00
|
|
|
self.raise_no_formats(info_dict)
|
2014-03-10 20:55:47 +01:00
|
|
|
|
2017-06-23 16:18:33 +02:00
|
|
|
def is_wellformed(f):
|
|
|
|
url = f.get('url')
|
2017-08-17 18:59:12 +02:00
|
|
|
if not url:
|
2017-06-23 16:18:33 +02:00
|
|
|
self.report_warning(
|
|
|
|
'"url" field is missing or empty - skipping format, '
|
|
|
|
'there is an error in extractor')
|
2017-08-17 18:59:12 +02:00
|
|
|
return False
|
|
|
|
if isinstance(url, bytes):
|
|
|
|
sanitize_string_field(f, 'url')
|
|
|
|
return True
|
2017-06-23 16:18:33 +02:00
|
|
|
|
|
|
|
# Filter out malformed formats for better extraction robustness
|
|
|
|
formats = list(filter(is_wellformed, formats))
|
|
|
|
|
2015-05-30 12:04:44 +02:00
|
|
|
formats_dict = {}
|
|
|
|
|
2013-07-02 10:08:58 +02:00
|
|
|
# We check that all the formats have the format and format_id fields
|
2014-03-10 20:55:47 +01:00
|
|
|
for i, format in enumerate(formats):
|
2017-06-08 17:53:14 +02:00
|
|
|
sanitize_string_field(format, 'format_id')
|
|
|
|
sanitize_numeric_fields(format)
|
2016-03-26 14:37:41 +01:00
|
|
|
format['url'] = sanitize_url(format['url'])
|
2017-08-12 12:14:11 +02:00
|
|
|
if not format.get('format_id'):
|
2013-07-14 17:31:52 +02:00
|
|
|
format['format_id'] = compat_str(i)
|
2016-02-10 16:16:58 +01:00
|
|
|
else:
|
|
|
|
# Sanitize format_id from characters used in format selector expression
|
2017-01-02 13:08:07 +01:00
|
|
|
format['format_id'] = re.sub(r'[\s,/+\[\]()]', '_', format['format_id'])
|
2015-05-30 12:04:44 +02:00
|
|
|
format_id = format['format_id']
|
|
|
|
if format_id not in formats_dict:
|
|
|
|
formats_dict[format_id] = []
|
|
|
|
formats_dict[format_id].append(format)
|
|
|
|
|
|
|
|
# Make sure all formats have unique format_id
|
2021-10-16 15:01:00 +02:00
|
|
|
common_exts = set(itertools.chain(*self._format_selection_exts.values()))
|
2015-05-30 12:04:44 +02:00
|
|
|
for format_id, ambiguous_formats in formats_dict.items():
|
2021-10-15 15:20:28 +02:00
|
|
|
ambigious_id = len(ambiguous_formats) > 1
|
|
|
|
for i, format in enumerate(ambiguous_formats):
|
|
|
|
if ambigious_id:
|
2015-05-30 12:04:44 +02:00
|
|
|
format['format_id'] = '%s-%d' % (format_id, i)
|
2021-10-15 15:20:28 +02:00
|
|
|
if format.get('ext') is None:
|
|
|
|
format['ext'] = determine_ext(format['url']).lower()
|
|
|
|
# Ensure there is no conflict between id and ext in format selection
|
|
|
|
# See https://github.com/yt-dlp/yt-dlp/issues/1282
|
|
|
|
if format['format_id'] != format['ext'] and format['format_id'] in common_exts:
|
|
|
|
format['format_id'] = 'f%s' % format['format_id']
|
2015-05-30 12:04:44 +02:00
|
|
|
|
|
|
|
for i, format in enumerate(formats):
|
2013-10-21 14:09:38 +02:00
|
|
|
if format.get('format') is None:
|
2014-01-05 01:52:03 +01:00
|
|
|
format['format'] = '{id} - {res}{note}'.format(
|
2013-10-21 14:09:38 +02:00
|
|
|
id=format['format_id'],
|
|
|
|
res=self.format_resolution(format),
|
2021-08-07 13:20:46 +02:00
|
|
|
note=format_field(format, 'format_note', ' (%s)'),
|
2013-10-21 14:09:38 +02:00
|
|
|
)
|
2017-01-15 00:09:32 +01:00
|
|
|
if format.get('protocol') is None:
|
2016-01-16 05:10:28 +01:00
|
|
|
format['protocol'] = determine_protocol(format)
|
2021-10-16 21:35:16 +02:00
|
|
|
if format.get('resolution') is None:
|
|
|
|
format['resolution'] = self.format_resolution(format, default=None)
|
2021-10-18 15:04:21 +02:00
|
|
|
if format.get('dynamic_range') is None and format.get('vcodec') != 'none':
|
|
|
|
format['dynamic_range'] = 'SDR'
|
2021-10-24 14:32:00 +02:00
|
|
|
if (info_dict.get('duration') and format.get('tbr')
|
|
|
|
and not format.get('filesize') and not format.get('filesize_approx')):
|
|
|
|
format['filesize_approx'] = info_dict['duration'] * format['tbr'] * (1024 / 8)
|
|
|
|
|
2015-01-24 18:52:26 +01:00
|
|
|
# Add HTTP headers, so that external programs can use them from the
|
|
|
|
# json output
|
|
|
|
full_format_info = info_dict.copy()
|
|
|
|
full_format_info.update(format)
|
|
|
|
format['http_headers'] = self._calc_headers(full_format_info)
|
2017-02-04 15:06:07 +01:00
|
|
|
# Remove private housekeeping stuff
|
|
|
|
if '__x_forwarded_for_ip' in info_dict:
|
|
|
|
del info_dict['__x_forwarded_for_ip']
|
2013-07-02 10:08:58 +02:00
|
|
|
|
2021-10-24 11:16:07 +02:00
|
|
|
if self.params.get('check_formats') is True:
|
2021-11-20 03:35:57 +01:00
|
|
|
formats = LazyList(self._check_formats(formats[::-1]), reverse=True)
|
2021-10-24 11:16:07 +02:00
|
|
|
|
2021-08-22 22:08:38 +02:00
|
|
|
if not formats or formats[0] is not info_dict:
|
2013-12-23 10:23:13 +01:00
|
|
|
# only set the 'formats' fields if the original info_dict list them
|
|
|
|
# otherwise we end up with a circular reference, the first (and unique)
|
2014-01-25 12:02:43 +01:00
|
|
|
# element in the 'formats' field in info_dict is info_dict itself,
|
2016-01-10 16:17:47 +01:00
|
|
|
# which can't be exported to json
|
2013-12-23 10:23:13 +01:00
|
|
|
info_dict['formats'] = formats
|
2021-05-18 20:25:32 +02:00
|
|
|
|
|
|
|
info_dict, _ = self.pre_process(info_dict)
|
|
|
|
|
2022-02-22 12:43:30 +01:00
|
|
|
if self._match_entry(info_dict) is not None:
|
|
|
|
return info_dict
|
|
|
|
|
|
|
|
self.post_extract(info_dict)
|
|
|
|
info_dict, _ = self.pre_process(info_dict, 'after_filter')
|
|
|
|
|
2021-11-10 17:11:41 +01:00
|
|
|
# The pre-processors may have modified the formats
|
|
|
|
formats = info_dict.get('formats', [info_dict])
|
|
|
|
|
2021-12-21 12:32:13 +01:00
|
|
|
list_only = self.params.get('simulate') is None and (
|
|
|
|
self.params.get('list_thumbnails') or self.params.get('listformats') or self.params.get('listsubtitles'))
|
|
|
|
interactive_format_selection = not list_only and self.format_selector == '-'
|
2021-08-07 02:01:51 +02:00
|
|
|
if self.params.get('list_thumbnails'):
|
|
|
|
self.list_thumbnails(info_dict)
|
|
|
|
if self.params.get('listsubtitles'):
|
|
|
|
if 'automatic_captions' in info_dict:
|
|
|
|
self.list_subtitles(
|
|
|
|
info_dict['id'], automatic_captions, 'automatic captions')
|
|
|
|
self.list_subtitles(info_dict['id'], subtitles, 'subtitles')
|
2021-12-21 12:32:13 +01:00
|
|
|
if self.params.get('listformats') or interactive_format_selection:
|
2021-12-23 02:42:26 +01:00
|
|
|
self.list_formats(info_dict)
|
2021-07-02 21:45:01 +02:00
|
|
|
if list_only:
|
2021-08-07 02:01:51 +02:00
|
|
|
# Without this printing, -F --print-json will not work
|
2021-07-02 21:45:01 +02:00
|
|
|
self.__forced_printings(info_dict, self.prepare_filename(info_dict), incomplete=True)
|
2013-12-18 21:24:39 +01:00
|
|
|
return
|
|
|
|
|
2021-06-11 15:43:22 +02:00
|
|
|
format_selector = self.format_selector
|
|
|
|
if format_selector is None:
|
2017-07-22 19:12:01 +02:00
|
|
|
req_format = self._default_format_spec(info_dict, download=download)
|
2021-05-14 09:45:29 +02:00
|
|
|
self.write_debug('Default format spec: %s' % req_format)
|
2021-06-11 15:43:22 +02:00
|
|
|
format_selector = self.build_format_selector(req_format)
|
2016-07-15 19:55:43 +02:00
|
|
|
|
2021-12-21 12:32:13 +01:00
|
|
|
while True:
|
|
|
|
if interactive_format_selection:
|
|
|
|
req_format = input(
|
|
|
|
self._format_screen('\nEnter format selector: ', self.Styles.EMPHASIS))
|
|
|
|
try:
|
|
|
|
format_selector = self.build_format_selector(req_format)
|
|
|
|
except SyntaxError as err:
|
|
|
|
self.report_error(err, tb=False, is_error=False)
|
|
|
|
continue
|
|
|
|
|
|
|
|
# While in format selection we may need to have an access to the original
|
|
|
|
# format set in order to calculate some metrics or do some processing.
|
|
|
|
# For now we need to be able to guess whether original formats provided
|
|
|
|
# by extractor are incomplete or not (i.e. whether extractor provides only
|
|
|
|
# video-only or audio-only formats) for proper formats selection for
|
|
|
|
# extractors with such incomplete formats (see
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/pull/5556).
|
|
|
|
# Since formats may be filtered during format selection and may not match
|
|
|
|
# the original formats the results may be incorrect. Thus original formats
|
|
|
|
# or pre-calculated metrics should be passed to format selection routines
|
|
|
|
# as well.
|
|
|
|
# We will pass a context object containing all necessary additional data
|
|
|
|
# instead of just formats.
|
|
|
|
# This fixes incorrect format selection issue (see
|
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/10083).
|
|
|
|
incomplete_formats = (
|
|
|
|
# All formats are video-only or
|
|
|
|
all(f.get('vcodec') != 'none' and f.get('acodec') == 'none' for f in formats)
|
|
|
|
# all formats are audio-only
|
|
|
|
or all(f.get('vcodec') == 'none' and f.get('acodec') != 'none' for f in formats))
|
|
|
|
|
|
|
|
ctx = {
|
|
|
|
'formats': formats,
|
|
|
|
'incomplete_formats': incomplete_formats,
|
|
|
|
}
|
|
|
|
|
|
|
|
formats_to_download = list(format_selector(ctx))
|
|
|
|
if interactive_format_selection and not formats_to_download:
|
|
|
|
self.report_error('Requested format is not available', tb=False, is_error=False)
|
|
|
|
continue
|
|
|
|
break
|
2016-07-15 19:55:43 +02:00
|
|
|
|
2013-07-02 10:08:58 +02:00
|
|
|
if not formats_to_download:
|
2021-04-17 02:09:58 +02:00
|
|
|
if not self.params.get('ignore_no_formats_error'):
|
2021-08-19 03:49:23 +02:00
|
|
|
raise ExtractorError('Requested format is not available', expected=True,
|
|
|
|
video_id=info_dict['id'], ie=info_dict['extractor'])
|
2022-01-03 19:15:56 +01:00
|
|
|
self.report_warning('Requested format is not available')
|
|
|
|
# Process what we can, even without any available formats.
|
|
|
|
formats_to_download = [{}]
|
2022-01-03 13:57:43 +01:00
|
|
|
|
2022-01-03 19:15:56 +01:00
|
|
|
best_format = formats_to_download[-1]
|
|
|
|
if download:
|
|
|
|
if best_format:
|
|
|
|
self.to_screen(
|
|
|
|
f'[info] {info_dict["id"]}: Downloading {len(formats_to_download)} format(s): '
|
|
|
|
+ ', '.join([f['format_id'] for f in formats_to_download]))
|
2022-01-03 13:57:43 +01:00
|
|
|
max_downloads_reached = False
|
2022-01-03 14:36:26 +01:00
|
|
|
for i, fmt in enumerate(formats_to_download):
|
2022-02-22 12:43:30 +01:00
|
|
|
formats_to_download[i] = new_info = self._copy_infodict(info_dict)
|
2021-04-17 02:09:58 +02:00
|
|
|
new_info.update(fmt)
|
2022-01-03 13:57:43 +01:00
|
|
|
try:
|
|
|
|
self.process_info(new_info)
|
|
|
|
except MaxDownloadsReached:
|
|
|
|
max_downloads_reached = True
|
2022-01-03 14:36:26 +01:00
|
|
|
# Remove copied info
|
|
|
|
for key, val in tuple(new_info.items()):
|
|
|
|
if info_dict.get(key) == val:
|
|
|
|
new_info.pop(key)
|
2022-01-03 13:57:43 +01:00
|
|
|
if max_downloads_reached:
|
|
|
|
break
|
2022-01-02 11:53:20 +01:00
|
|
|
|
2022-01-03 20:37:24 +01:00
|
|
|
write_archive = set(f.get('__write_download_archive', False) for f in formats_to_download)
|
2022-01-03 13:57:43 +01:00
|
|
|
assert write_archive.issubset({True, False, 'ignore'})
|
|
|
|
if True in write_archive and False not in write_archive:
|
|
|
|
self.record_download_archive(info_dict)
|
2022-01-03 19:14:07 +01:00
|
|
|
|
|
|
|
info_dict['requested_downloads'] = formats_to_download
|
2022-01-10 19:27:59 +01:00
|
|
|
info_dict = self.run_all_pps('after_video', info_dict)
|
2022-01-03 13:57:43 +01:00
|
|
|
if max_downloads_reached:
|
|
|
|
raise MaxDownloadsReached()
|
2022-01-02 11:53:20 +01:00
|
|
|
|
2021-10-22 22:37:20 +02:00
|
|
|
# We update the info dict with the selected best quality format (backwards compatibility)
|
2022-01-03 19:14:07 +01:00
|
|
|
info_dict.update(best_format)
|
2013-07-02 10:08:58 +02:00
|
|
|
return info_dict
|
|
|
|
|
2015-02-22 11:37:27 +01:00
|
|
|
def process_subtitles(self, video_id, normal_subtitles, automatic_captions):
|
2015-02-15 18:03:41 +01:00
|
|
|
"""Select the requested subtitles and their format"""
|
2015-02-22 11:37:27 +01:00
|
|
|
available_subs = {}
|
|
|
|
if normal_subtitles and self.params.get('writesubtitles'):
|
|
|
|
available_subs.update(normal_subtitles)
|
|
|
|
if automatic_captions and self.params.get('writeautomaticsub'):
|
|
|
|
for lang, cap_info in automatic_captions.items():
|
2015-02-16 21:44:17 +01:00
|
|
|
if lang not in available_subs:
|
|
|
|
available_subs[lang] = cap_info
|
|
|
|
|
2015-02-21 22:31:53 +01:00
|
|
|
if (not self.params.get('writesubtitles') and not
|
|
|
|
self.params.get('writeautomaticsub') or not
|
|
|
|
available_subs):
|
|
|
|
return None
|
2015-02-15 18:03:41 +01:00
|
|
|
|
2021-04-19 23:17:09 +02:00
|
|
|
all_sub_langs = available_subs.keys()
|
2015-02-15 18:03:41 +01:00
|
|
|
if self.params.get('allsubtitles', False):
|
2021-04-19 23:17:09 +02:00
|
|
|
requested_langs = all_sub_langs
|
|
|
|
elif self.params.get('subtitleslangs', False):
|
2021-09-24 01:40:04 +02:00
|
|
|
# A list is used so that the order of languages will be the same as
|
|
|
|
# given in subtitleslangs. See https://github.com/yt-dlp/yt-dlp/issues/1041
|
|
|
|
requested_langs = []
|
|
|
|
for lang_re in self.params.get('subtitleslangs'):
|
|
|
|
discard = lang_re[0] == '-'
|
2021-04-19 23:17:09 +02:00
|
|
|
if discard:
|
2021-09-24 01:40:04 +02:00
|
|
|
lang_re = lang_re[1:]
|
2022-02-18 13:07:39 +01:00
|
|
|
if lang_re == 'all':
|
|
|
|
if discard:
|
|
|
|
requested_langs = []
|
|
|
|
else:
|
|
|
|
requested_langs.extend(all_sub_langs)
|
|
|
|
continue
|
2021-09-24 01:40:04 +02:00
|
|
|
current_langs = filter(re.compile(lang_re + '$').match, all_sub_langs)
|
2021-04-19 23:17:09 +02:00
|
|
|
if discard:
|
|
|
|
for lang in current_langs:
|
2021-09-24 01:40:04 +02:00
|
|
|
while lang in requested_langs:
|
|
|
|
requested_langs.remove(lang)
|
2021-04-19 23:17:09 +02:00
|
|
|
else:
|
2021-09-24 01:40:04 +02:00
|
|
|
requested_langs.extend(current_langs)
|
|
|
|
requested_langs = orderedSet(requested_langs)
|
2021-04-19 23:17:09 +02:00
|
|
|
elif 'en' in available_subs:
|
|
|
|
requested_langs = ['en']
|
2015-02-15 18:03:41 +01:00
|
|
|
else:
|
2021-04-19 23:17:09 +02:00
|
|
|
requested_langs = [list(all_sub_langs)[0]]
|
2021-08-09 14:10:24 +02:00
|
|
|
if requested_langs:
|
|
|
|
self.write_debug('Downloading subtitles: %s' % ', '.join(requested_langs))
|
2015-02-15 18:03:41 +01:00
|
|
|
|
|
|
|
formats_query = self.params.get('subtitlesformat', 'best')
|
|
|
|
formats_preference = formats_query.split('/') if formats_query else []
|
|
|
|
subs = {}
|
|
|
|
for lang in requested_langs:
|
|
|
|
formats = available_subs.get(lang)
|
|
|
|
if formats is None:
|
|
|
|
self.report_warning('%s subtitles not available for %s' % (lang, video_id))
|
|
|
|
continue
|
|
|
|
for ext in formats_preference:
|
|
|
|
if ext == 'best':
|
|
|
|
f = formats[-1]
|
|
|
|
break
|
|
|
|
matches = list(filter(lambda f: f['ext'] == ext, formats))
|
|
|
|
if matches:
|
|
|
|
f = matches[-1]
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
f = formats[-1]
|
|
|
|
self.report_warning(
|
|
|
|
'No subtitle format found matching "%s" for language %s, '
|
|
|
|
'using %s' % (formats_query, lang, f['ext']))
|
|
|
|
subs[lang] = f
|
|
|
|
return subs
|
|
|
|
|
2022-01-23 22:34:19 +01:00
|
|
|
def _forceprint(self, key, info_dict):
|
|
|
|
if info_dict is None:
|
|
|
|
return
|
|
|
|
info_copy = info_dict.copy()
|
|
|
|
info_copy['formats_table'] = self.render_formats_table(info_dict)
|
|
|
|
info_copy['thumbnails_table'] = self.render_thumbnails_table(info_dict)
|
|
|
|
info_copy['subtitles_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('subtitles'))
|
|
|
|
info_copy['automatic_captions_table'] = self.render_subtitles_table(info_dict.get('id'), info_dict.get('automatic_captions'))
|
|
|
|
|
|
|
|
def format_tmpl(tmpl):
|
|
|
|
mobj = re.match(r'\w+(=?)$', tmpl)
|
|
|
|
if mobj and mobj.group(1):
|
|
|
|
return f'{tmpl[:-1]} = %({tmpl[:-1]})r'
|
|
|
|
elif mobj:
|
|
|
|
return f'%({tmpl})s'
|
|
|
|
return tmpl
|
2022-01-10 19:41:12 +01:00
|
|
|
|
2022-01-23 22:34:19 +01:00
|
|
|
for tmpl in self.params['forceprint'].get(key, []):
|
|
|
|
self.to_stdout(self.evaluate_outtmpl(format_tmpl(tmpl), info_copy))
|
|
|
|
|
|
|
|
for tmpl, file_tmpl in self.params['print_to_file'].get(key, []):
|
|
|
|
filename = self.evaluate_outtmpl(file_tmpl, info_dict)
|
|
|
|
tmpl = format_tmpl(tmpl)
|
|
|
|
self.to_screen(f'[info] Writing {tmpl!r} to: {filename}')
|
2022-02-18 13:21:43 +01:00
|
|
|
if self._ensure_dir_exists(filename):
|
|
|
|
with io.open(filename, 'a', encoding='utf-8') as f:
|
|
|
|
f.write(self.evaluate_outtmpl(tmpl, info_copy) + '\n')
|
2022-01-02 11:52:00 +01:00
|
|
|
|
2019-09-24 21:08:46 +02:00
|
|
|
def __forced_printings(self, info_dict, filename, incomplete):
|
2021-05-14 09:44:38 +02:00
|
|
|
def print_mandatory(field, actual_field=None):
|
|
|
|
if actual_field is None:
|
|
|
|
actual_field = field
|
2019-09-24 21:08:46 +02:00
|
|
|
if (self.params.get('force%s' % field, False)
|
2021-05-14 09:44:38 +02:00
|
|
|
and (not incomplete or info_dict.get(actual_field) is not None)):
|
|
|
|
self.to_stdout(info_dict[actual_field])
|
2019-09-24 21:08:46 +02:00
|
|
|
|
|
|
|
def print_optional(field):
|
|
|
|
if (self.params.get('force%s' % field, False)
|
|
|
|
and info_dict.get(field) is not None):
|
|
|
|
self.to_stdout(info_dict[field])
|
|
|
|
|
2021-05-14 09:44:38 +02:00
|
|
|
info_dict = info_dict.copy()
|
|
|
|
if filename is not None:
|
|
|
|
info_dict['filename'] = filename
|
|
|
|
if info_dict.get('requested_formats') is not None:
|
|
|
|
# For RTMP URLs, also include the playpath
|
|
|
|
info_dict['urls'] = '\n'.join(f['url'] + f.get('play_path', '') for f in info_dict['requested_formats'])
|
|
|
|
elif 'url' in info_dict:
|
|
|
|
info_dict['urls'] = info_dict['url'] + info_dict.get('play_path', '')
|
|
|
|
|
2022-01-23 22:34:19 +01:00
|
|
|
if (self.params.get('forcejson')
|
|
|
|
or self.params['forceprint'].get('video')
|
|
|
|
or self.params['print_to_file'].get('video')):
|
2021-08-07 01:42:54 +02:00
|
|
|
self.post_extract(info_dict)
|
2022-01-23 22:34:19 +01:00
|
|
|
self._forceprint('video', info_dict)
|
2021-05-14 09:44:38 +02:00
|
|
|
|
2019-09-24 21:08:46 +02:00
|
|
|
print_mandatory('title')
|
|
|
|
print_mandatory('id')
|
2021-05-14 09:44:38 +02:00
|
|
|
print_mandatory('url', 'urls')
|
2019-09-24 21:08:46 +02:00
|
|
|
print_optional('thumbnail')
|
|
|
|
print_optional('description')
|
2021-05-14 09:44:38 +02:00
|
|
|
print_optional('filename')
|
2021-08-07 13:20:46 +02:00
|
|
|
if self.params.get('forceduration') and info_dict.get('duration') is not None:
|
2019-09-24 21:08:46 +02:00
|
|
|
self.to_stdout(formatSeconds(info_dict['duration']))
|
|
|
|
print_mandatory('format')
|
2021-05-14 09:44:38 +02:00
|
|
|
|
2021-08-07 01:42:54 +02:00
|
|
|
if self.params.get('forcejson'):
|
2021-08-07 17:46:55 +02:00
|
|
|
self.to_stdout(json.dumps(self.sanitize_info(info_dict)))
|
2019-09-24 21:08:46 +02:00
|
|
|
|
2021-05-04 17:54:00 +02:00
|
|
|
def dl(self, name, info, subtitle=False, test=False):
|
2021-08-22 22:08:38 +02:00
|
|
|
if not info.get('url'):
|
2021-08-19 03:49:23 +02:00
|
|
|
self.raise_no_formats(info, True)
|
2021-05-04 17:54:00 +02:00
|
|
|
|
|
|
|
if test:
|
|
|
|
verbose = self.params.get('verbose')
|
|
|
|
params = {
|
|
|
|
'test': True,
|
2021-10-11 00:29:55 +02:00
|
|
|
'quiet': self.params.get('quiet') or not verbose,
|
2021-05-04 17:54:00 +02:00
|
|
|
'verbose': verbose,
|
|
|
|
'noprogress': not verbose,
|
|
|
|
'nopart': True,
|
|
|
|
'skip_unavailable_fragments': False,
|
|
|
|
'keep_fragments': False,
|
|
|
|
'overwrites': True,
|
|
|
|
'_no_ytdl_file': True,
|
|
|
|
}
|
|
|
|
else:
|
|
|
|
params = self.params
|
2021-07-31 12:53:54 +02:00
|
|
|
fd = get_suitable_downloader(info, params, to_stdout=(name == '-'))(self, params)
|
2021-05-04 17:54:00 +02:00
|
|
|
if not test:
|
|
|
|
for ph in self._progress_hooks:
|
|
|
|
fd.add_progress_hook(ph)
|
2022-01-19 23:23:55 +01:00
|
|
|
urls = '", "'.join(
|
|
|
|
(f['url'].split(',')[0] + ',<data>' if f['url'].startswith('data:') else f['url'])
|
|
|
|
for f in info.get('requested_formats', []) or [info])
|
2021-05-23 00:17:44 +02:00
|
|
|
self.write_debug('Invoking downloader on "%s"' % urls)
|
2021-10-16 15:01:00 +02:00
|
|
|
|
2021-12-20 07:06:46 +01:00
|
|
|
# Note: Ideally info should be a deep-copied so that hooks cannot modify it.
|
|
|
|
# But it may contain objects that are not deep-copyable
|
|
|
|
new_info = self._copy_infodict(info)
|
2021-05-04 17:54:00 +02:00
|
|
|
if new_info.get('http_headers') is None:
|
|
|
|
new_info['http_headers'] = self._calc_headers(new_info)
|
|
|
|
return fd.download(name, new_info, subtitle)
|
|
|
|
|
2022-01-11 09:54:25 +01:00
|
|
|
def existing_file(self, filepaths, *, default_overwrite=True):
|
|
|
|
existing_files = list(filter(os.path.exists, orderedSet(filepaths)))
|
|
|
|
if existing_files and not self.params.get('overwrites', default_overwrite):
|
|
|
|
return existing_files[0]
|
|
|
|
|
|
|
|
for file in existing_files:
|
|
|
|
self.report_file_delete(file)
|
|
|
|
os.remove(file)
|
|
|
|
return None
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
def process_info(self, info_dict):
|
2022-02-22 12:43:30 +01:00
|
|
|
"""Process a single resolved IE result. (Modifies it in-place)"""
|
2013-06-18 22:14:21 +02:00
|
|
|
|
|
|
|
assert info_dict.get('_type', 'video') == 'video'
|
2022-01-03 14:36:26 +01:00
|
|
|
original_infodict = info_dict
|
2014-01-23 18:56:36 +01:00
|
|
|
|
2021-06-23 23:11:02 +02:00
|
|
|
if 'format' not in info_dict and 'ext' in info_dict:
|
2013-06-18 22:14:21 +02:00
|
|
|
info_dict['format'] = info_dict['ext']
|
|
|
|
|
2022-02-22 12:43:30 +01:00
|
|
|
# This is mostly just for backward compatibility of process_info
|
|
|
|
# As a side-effect, this allows for format-specific filters
|
2021-05-28 22:42:07 +02:00
|
|
|
if self._match_entry(info_dict) is not None:
|
2022-01-03 20:37:24 +01:00
|
|
|
info_dict['__write_download_archive'] = 'ignore'
|
2013-06-18 22:14:21 +02:00
|
|
|
return
|
|
|
|
|
2022-02-22 12:43:30 +01:00
|
|
|
# Does nothing under normal operation - for backward compatibility of process_info
|
2021-02-28 15:56:08 +01:00
|
|
|
self.post_extract(info_dict)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-03-18 16:24:53 +01:00
|
|
|
# info_dict['_filename'] needs to be set for backward compatibility
|
2021-02-03 14:36:09 +01:00
|
|
|
info_dict['_filename'] = full_filename = self.prepare_filename(info_dict, warn=True)
|
|
|
|
temp_filename = self.prepare_filename(info_dict, 'temp')
|
2021-01-23 13:18:12 +01:00
|
|
|
files_to_move = {}
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2022-02-22 12:43:30 +01:00
|
|
|
self._num_downloads += 1
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
# Forced printings
|
2021-06-23 23:11:02 +02:00
|
|
|
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-08-07 02:01:51 +02:00
|
|
|
if self.params.get('simulate'):
|
2022-01-03 20:37:24 +01:00
|
|
|
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
|
2013-06-18 22:14:21 +02:00
|
|
|
return
|
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
if full_filename is None:
|
2013-06-18 22:14:21 +02:00
|
|
|
return
|
2021-03-09 03:17:21 +01:00
|
|
|
if not self._ensure_dir_exists(encodeFilename(full_filename)):
|
2021-01-23 13:18:12 +01:00
|
|
|
return
|
2021-03-09 03:17:21 +01:00
|
|
|
if not self._ensure_dir_exists(encodeFilename(temp_filename)):
|
2013-06-18 22:14:21 +02:00
|
|
|
return
|
|
|
|
|
2021-09-29 22:44:42 +02:00
|
|
|
if self._write_description('video', info_dict,
|
|
|
|
self.prepare_filename(info_dict, 'description')) is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
sub_files = self._write_subtitles(info_dict, temp_filename)
|
|
|
|
if sub_files is None:
|
|
|
|
return
|
|
|
|
files_to_move.update(dict(sub_files))
|
|
|
|
|
|
|
|
thumb_files = self._write_thumbnails(
|
|
|
|
'video', info_dict, temp_filename, self.prepare_filename(info_dict, 'thumbnail'))
|
|
|
|
if thumb_files is None:
|
|
|
|
return
|
|
|
|
files_to_move.update(dict(thumb_files))
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-09-29 22:44:42 +02:00
|
|
|
infofn = self.prepare_filename(info_dict, 'infojson')
|
|
|
|
_infojson_written = self._write_info_json('video', info_dict, infofn)
|
|
|
|
if _infojson_written:
|
2021-11-14 23:33:41 +01:00
|
|
|
info_dict['infojson_filename'] = infofn
|
2021-12-19 16:18:06 +01:00
|
|
|
# For backward compatibility, even though it was a private field
|
2021-09-29 22:44:42 +02:00
|
|
|
info_dict['__infojson_filename'] = infofn
|
|
|
|
elif _infojson_written is None:
|
|
|
|
return
|
|
|
|
|
|
|
|
# Note: Annotations are deprecated
|
|
|
|
annofn = None
|
2013-10-14 07:18:58 +02:00
|
|
|
if self.params.get('writeannotations', False):
|
2021-02-03 14:36:09 +01:00
|
|
|
annofn = self.prepare_filename(info_dict, 'annotation')
|
2021-09-29 22:44:42 +02:00
|
|
|
if annofn:
|
2021-03-09 03:17:21 +01:00
|
|
|
if not self._ensure_dir_exists(encodeFilename(annofn)):
|
2021-01-23 13:18:12 +01:00
|
|
|
return
|
2019-10-13 18:00:48 +02:00
|
|
|
if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)):
|
2014-01-05 01:52:03 +01:00
|
|
|
self.to_screen('[info] Video annotations are already present')
|
2019-08-09 09:19:41 +02:00
|
|
|
elif not info_dict.get('annotations'):
|
|
|
|
self.report_warning('There are no annotations to write.')
|
2013-12-16 04:39:04 +01:00
|
|
|
else:
|
|
|
|
try:
|
2014-01-05 01:52:03 +01:00
|
|
|
self.to_screen('[info] Writing video annotations to: ' + annofn)
|
2013-12-16 04:39:04 +01:00
|
|
|
with io.open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile:
|
|
|
|
annofile.write(info_dict['annotations'])
|
|
|
|
except (KeyError, TypeError):
|
2014-01-05 01:52:03 +01:00
|
|
|
self.report_warning('There are no annotations to write.')
|
2013-12-16 04:39:04 +01:00
|
|
|
except (OSError, IOError):
|
2014-01-05 01:52:03 +01:00
|
|
|
self.report_error('Cannot write annotations file: ' + annofn)
|
2013-12-16 04:39:04 +01:00
|
|
|
return
|
2013-10-14 07:18:58 +02:00
|
|
|
|
2020-10-27 11:37:21 +01:00
|
|
|
# Write internet shortcut files
|
2021-10-26 16:41:59 +02:00
|
|
|
def _write_link_file(link_type):
|
2022-02-18 13:23:09 +01:00
|
|
|
url = try_get(info_dict['webpage_url'], iri_to_uri)
|
|
|
|
if not url:
|
|
|
|
self.report_warning(
|
|
|
|
f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown')
|
|
|
|
return True
|
2021-10-26 16:41:59 +02:00
|
|
|
linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext'))
|
2021-11-22 21:10:53 +01:00
|
|
|
if not self._ensure_dir_exists(encodeFilename(linkfn)):
|
|
|
|
return False
|
2021-01-23 16:52:15 +01:00
|
|
|
if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)):
|
2021-10-26 16:41:59 +02:00
|
|
|
self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present')
|
|
|
|
return True
|
|
|
|
try:
|
|
|
|
self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}')
|
|
|
|
with io.open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8',
|
|
|
|
newline='\r\n' if link_type == 'url' else '\n') as linkfile:
|
2022-02-18 13:23:09 +01:00
|
|
|
template_vars = {'url': url}
|
2021-10-26 16:41:59 +02:00
|
|
|
if link_type == 'desktop':
|
|
|
|
template_vars['filename'] = linkfn[:-(len(link_type) + 1)]
|
|
|
|
linkfile.write(LINK_TEMPLATES[link_type] % template_vars)
|
|
|
|
except (OSError, IOError):
|
|
|
|
self.report_error(f'Cannot write internet shortcut {linkfn}')
|
|
|
|
return False
|
2020-10-27 11:37:21 +01:00
|
|
|
return True
|
|
|
|
|
2021-10-26 16:41:59 +02:00
|
|
|
write_links = {
|
|
|
|
'url': self.params.get('writeurllink'),
|
|
|
|
'webloc': self.params.get('writewebloclink'),
|
|
|
|
'desktop': self.params.get('writedesktoplink'),
|
|
|
|
}
|
|
|
|
if self.params.get('writelink'):
|
|
|
|
link_type = ('webloc' if sys.platform == 'darwin'
|
|
|
|
else 'desktop' if sys.platform.startswith('linux')
|
|
|
|
else 'url')
|
|
|
|
write_links[link_type] = True
|
|
|
|
|
|
|
|
if any(should_write and not _write_link_file(link_type)
|
|
|
|
for link_type, should_write in write_links.items()):
|
|
|
|
return
|
2020-10-27 11:37:21 +01:00
|
|
|
|
2022-01-03 14:36:26 +01:00
|
|
|
def replace_info_dict(new_info):
|
|
|
|
nonlocal info_dict
|
|
|
|
if new_info == info_dict:
|
|
|
|
return
|
|
|
|
info_dict.clear()
|
|
|
|
info_dict.update(new_info)
|
|
|
|
|
2021-04-11 00:18:07 +02:00
|
|
|
try:
|
2022-01-03 14:36:26 +01:00
|
|
|
new_info, files_to_move = self.pre_process(info_dict, 'before_dl', files_to_move)
|
|
|
|
replace_info_dict(new_info)
|
2021-04-11 00:18:07 +02:00
|
|
|
except PostProcessingError as err:
|
|
|
|
self.report_error('Preprocessing: %s' % str(err))
|
|
|
|
return
|
|
|
|
|
2022-01-03 13:57:43 +01:00
|
|
|
if self.params.get('skip_download'):
|
2021-04-11 00:18:07 +02:00
|
|
|
info_dict['filepath'] = temp_filename
|
|
|
|
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
|
|
|
|
info_dict['__files_to_move'] = files_to_move
|
2022-01-03 14:36:26 +01:00
|
|
|
replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
|
2022-01-03 20:37:24 +01:00
|
|
|
info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
|
2021-04-11 00:18:07 +02:00
|
|
|
else:
|
|
|
|
# Download
|
2021-08-07 13:20:46 +02:00
|
|
|
info_dict.setdefault('__postprocessors', [])
|
2014-09-25 18:37:20 +02:00
|
|
|
try:
|
2021-01-23 13:18:12 +01:00
|
|
|
|
2022-01-11 09:54:25 +01:00
|
|
|
def existing_video_file(*filepaths):
|
2021-01-28 06:18:36 +01:00
|
|
|
ext = info_dict.get('ext')
|
2022-01-11 09:54:25 +01:00
|
|
|
converted = lambda file: replace_extension(file, self.params.get('final_ext') or ext, ext)
|
|
|
|
file = self.existing_file(itertools.chain(*zip(map(converted, filepaths), filepaths)),
|
|
|
|
default_overwrite=False)
|
|
|
|
if file:
|
|
|
|
info_dict['ext'] = os.path.splitext(file)[1][1:]
|
|
|
|
return file
|
2021-01-23 13:18:12 +01:00
|
|
|
|
|
|
|
success = True
|
2014-09-25 18:37:20 +02:00
|
|
|
if info_dict.get('requested_formats') is not None:
|
2015-04-17 23:00:35 +02:00
|
|
|
|
|
|
|
def compatible_formats(formats):
|
2015-08-04 09:07:44 +02:00
|
|
|
# TODO: some formats actually allow this (mkv, webm, ogg, mp4), but not all of them.
|
|
|
|
video_formats = [format for format in formats if format.get('vcodec') != 'none']
|
|
|
|
audio_formats = [format for format in formats if format.get('acodec') != 'none']
|
|
|
|
if len(video_formats) > 2 or len(audio_formats) > 2:
|
|
|
|
return False
|
|
|
|
|
2015-04-17 23:00:35 +02:00
|
|
|
# Check extension
|
2015-08-04 09:07:44 +02:00
|
|
|
exts = set(format.get('ext') for format in formats)
|
|
|
|
COMPATIBLE_EXTS = (
|
|
|
|
set(('mp3', 'mp4', 'm4a', 'm4p', 'm4b', 'm4r', 'm4v', 'ismv', 'isma')),
|
|
|
|
set(('webm',)),
|
|
|
|
)
|
|
|
|
for ext_sets in COMPATIBLE_EXTS:
|
|
|
|
if ext_sets.issuperset(exts):
|
|
|
|
return True
|
2015-04-17 23:00:35 +02:00
|
|
|
# TODO: Check acodec/vcodec
|
|
|
|
return False
|
|
|
|
|
|
|
|
requested_formats = info_dict['requested_formats']
|
2021-01-23 13:18:12 +01:00
|
|
|
old_ext = info_dict['ext']
|
2021-10-09 18:18:46 +02:00
|
|
|
if self.params.get('merge_output_format') is None:
|
|
|
|
if not compatible_formats(requested_formats):
|
|
|
|
info_dict['ext'] = 'mkv'
|
|
|
|
self.report_warning(
|
|
|
|
'Requested formats are incompatible for merge and will be merged into mkv')
|
|
|
|
if (info_dict['ext'] == 'webm'
|
|
|
|
and info_dict.get('thumbnails')
|
|
|
|
# check with type instead of pp_key, __name__, or isinstance
|
|
|
|
# since we dont want any custom PPs to trigger this
|
|
|
|
and any(type(pp) == EmbedThumbnailPP for pp in self._pps['post_process'])):
|
|
|
|
info_dict['ext'] = 'mkv'
|
|
|
|
self.report_warning(
|
|
|
|
'webm doesn\'t support embedding a thumbnail, mkv will be used')
|
2021-08-05 16:15:37 +02:00
|
|
|
new_ext = info_dict['ext']
|
2021-01-23 13:18:12 +01:00
|
|
|
|
2021-08-05 16:15:37 +02:00
|
|
|
def correct_ext(filename, ext=new_ext):
|
2021-07-31 12:53:54 +02:00
|
|
|
if filename == '-':
|
|
|
|
return filename
|
2021-01-23 13:18:12 +01:00
|
|
|
filename_real_ext = os.path.splitext(filename)[1][1:]
|
|
|
|
filename_wo_ext = (
|
|
|
|
os.path.splitext(filename)[0]
|
2021-08-05 16:15:37 +02:00
|
|
|
if filename_real_ext in (old_ext, new_ext)
|
2021-01-23 13:18:12 +01:00
|
|
|
else filename)
|
2021-08-05 16:15:37 +02:00
|
|
|
return '%s.%s' % (filename_wo_ext, ext)
|
2021-01-23 13:18:12 +01:00
|
|
|
|
2015-05-02 18:52:21 +02:00
|
|
|
# Ensure filename always has a correct extension for successful merge
|
2021-01-23 13:18:12 +01:00
|
|
|
full_filename = correct_ext(full_filename)
|
|
|
|
temp_filename = correct_ext(temp_filename)
|
2022-01-11 09:54:25 +01:00
|
|
|
dl_filename = existing_video_file(full_filename, temp_filename)
|
2021-02-12 05:40:31 +01:00
|
|
|
info_dict['__real_download'] = False
|
2021-05-23 00:17:44 +02:00
|
|
|
|
2021-12-20 07:06:46 +01:00
|
|
|
downloaded = []
|
|
|
|
merger = FFmpegMergerPP(self)
|
|
|
|
|
|
|
|
fd = get_suitable_downloader(info_dict, self.params, to_stdout=temp_filename == '-')
|
2021-07-31 12:51:01 +02:00
|
|
|
if dl_filename is not None:
|
2021-08-13 17:14:50 +02:00
|
|
|
self.report_file_already_downloaded(dl_filename)
|
2021-12-20 07:06:46 +01:00
|
|
|
elif fd:
|
|
|
|
for f in requested_formats if fd != FFmpegFD else []:
|
|
|
|
f['filepath'] = fname = prepend_extension(
|
|
|
|
correct_ext(temp_filename, info_dict['ext']),
|
|
|
|
'f%s' % f['format_id'], info_dict['ext'])
|
|
|
|
downloaded.append(fname)
|
2021-07-31 12:51:01 +02:00
|
|
|
info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
|
|
|
|
success, real_download = self.dl(temp_filename, info_dict)
|
|
|
|
info_dict['__real_download'] = real_download
|
2021-05-23 00:17:44 +02:00
|
|
|
else:
|
|
|
|
if self.params.get('allow_unplayable_formats'):
|
|
|
|
self.report_warning(
|
|
|
|
'You have requested merging of multiple formats '
|
|
|
|
'while also allowing unplayable formats to be downloaded. '
|
|
|
|
'The formats won\'t be merged to prevent data corruption.')
|
|
|
|
elif not merger.available:
|
2022-02-14 10:06:22 +01:00
|
|
|
msg = 'You have requested merging of multiple formats but ffmpeg is not installed'
|
|
|
|
if not self.params.get('ignoreerrors'):
|
|
|
|
self.report_error(f'{msg}. Aborting due to --abort-on-error')
|
|
|
|
return
|
|
|
|
self.report_warning(f'{msg}. The formats won\'t be merged')
|
2021-05-23 00:17:44 +02:00
|
|
|
|
2021-07-31 12:53:54 +02:00
|
|
|
if temp_filename == '-':
|
2021-12-20 07:06:46 +01:00
|
|
|
reason = ('using a downloader other than ffmpeg' if FFmpegFD.can_merge_formats(info_dict, self.params)
|
2021-07-31 12:53:54 +02:00
|
|
|
else 'but the formats are incompatible for simultaneous download' if merger.available
|
|
|
|
else 'but ffmpeg is not installed')
|
|
|
|
self.report_warning(
|
|
|
|
f'You have requested downloading multiple formats to stdout {reason}. '
|
|
|
|
'The formats will be streamed one after the other')
|
|
|
|
fname = temp_filename
|
2021-07-31 12:51:01 +02:00
|
|
|
for f in requested_formats:
|
|
|
|
new_info = dict(info_dict)
|
|
|
|
del new_info['requested_formats']
|
|
|
|
new_info.update(f)
|
2021-07-31 12:53:54 +02:00
|
|
|
if temp_filename != '-':
|
2021-08-05 16:15:37 +02:00
|
|
|
fname = prepend_extension(
|
|
|
|
correct_ext(temp_filename, new_info['ext']),
|
|
|
|
'f%s' % f['format_id'], new_info['ext'])
|
2021-07-31 12:53:54 +02:00
|
|
|
if not self._ensure_dir_exists(fname):
|
|
|
|
return
|
2021-09-22 16:21:40 +02:00
|
|
|
f['filepath'] = fname
|
2021-07-31 12:53:54 +02:00
|
|
|
downloaded.append(fname)
|
2021-07-31 12:51:01 +02:00
|
|
|
partial_success, real_download = self.dl(fname, new_info)
|
|
|
|
info_dict['__real_download'] = info_dict['__real_download'] or real_download
|
|
|
|
success = success and partial_success
|
2021-12-20 07:06:46 +01:00
|
|
|
|
|
|
|
if downloaded and merger.available and not self.params.get('allow_unplayable_formats'):
|
|
|
|
info_dict['__postprocessors'].append(merger)
|
|
|
|
info_dict['__files_to_merge'] = downloaded
|
|
|
|
# Even if there were no downloads, it is being merged only now
|
|
|
|
info_dict['__real_download'] = True
|
|
|
|
else:
|
|
|
|
for file in downloaded:
|
|
|
|
files_to_move[file] = None
|
2014-09-25 18:37:20 +02:00
|
|
|
else:
|
|
|
|
# Just a single file
|
2022-01-11 09:54:25 +01:00
|
|
|
dl_filename = existing_video_file(full_filename, temp_filename)
|
2021-08-13 17:14:50 +02:00
|
|
|
if dl_filename is None or dl_filename == temp_filename:
|
|
|
|
# dl_filename == temp_filename could mean that the file was partially downloaded with --no-part.
|
|
|
|
# So we should try to resume the download
|
2021-05-04 17:54:00 +02:00
|
|
|
success, real_download = self.dl(temp_filename, info_dict)
|
2021-01-23 13:18:12 +01:00
|
|
|
info_dict['__real_download'] = real_download
|
2021-08-13 17:14:50 +02:00
|
|
|
else:
|
|
|
|
self.report_file_already_downloaded(dl_filename)
|
2021-01-23 13:18:12 +01:00
|
|
|
|
|
|
|
dl_filename = dl_filename or temp_filename
|
2021-01-23 16:25:45 +01:00
|
|
|
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
|
2021-01-23 13:18:12 +01:00
|
|
|
|
2021-05-04 19:06:18 +02:00
|
|
|
except network_exceptions as err:
|
2016-05-02 14:35:50 +02:00
|
|
|
self.report_error('unable to download video data: %s' % error_to_compat_str(err))
|
2014-09-25 18:37:20 +02:00
|
|
|
return
|
|
|
|
except (OSError, IOError) as err:
|
|
|
|
raise UnavailableVideoError(err)
|
|
|
|
except (ContentTooShortError, ) as err:
|
|
|
|
self.report_error('content too short (expected %s bytes and served %s)' % (err.expected, err.downloaded))
|
|
|
|
return
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-02-03 14:36:09 +01:00
|
|
|
if success and full_filename != '-':
|
2016-03-01 21:08:50 +01:00
|
|
|
|
2021-06-20 00:19:23 +02:00
|
|
|
def fixup():
|
|
|
|
do_fixup = True
|
|
|
|
fixup_policy = self.params.get('fixup')
|
|
|
|
vid = info_dict['id']
|
|
|
|
|
|
|
|
if fixup_policy in ('ignore', 'never'):
|
|
|
|
return
|
|
|
|
elif fixup_policy == 'warn':
|
|
|
|
do_fixup = False
|
2021-06-20 00:45:19 +02:00
|
|
|
elif fixup_policy != 'force':
|
|
|
|
assert fixup_policy in ('detect_or_warn', None)
|
|
|
|
if not info_dict.get('__real_download'):
|
|
|
|
do_fixup = False
|
2021-06-20 00:19:23 +02:00
|
|
|
|
|
|
|
def ffmpeg_fixup(cndn, msg, cls):
|
|
|
|
if not cndn:
|
|
|
|
return
|
|
|
|
if not do_fixup:
|
|
|
|
self.report_warning(f'{vid}: {msg}')
|
|
|
|
return
|
|
|
|
pp = cls(self)
|
|
|
|
if pp.available:
|
|
|
|
info_dict['__postprocessors'].append(pp)
|
|
|
|
else:
|
|
|
|
self.report_warning(f'{vid}: {msg}. Install ffmpeg to fix this automatically')
|
|
|
|
|
|
|
|
stretched_ratio = info_dict.get('stretched_ratio')
|
|
|
|
ffmpeg_fixup(
|
|
|
|
stretched_ratio not in (1, None),
|
|
|
|
f'Non-uniform pixel ratio {stretched_ratio}',
|
|
|
|
FFmpegFixupStretchedPP)
|
|
|
|
|
|
|
|
ffmpeg_fixup(
|
|
|
|
(info_dict.get('requested_formats') is None
|
|
|
|
and info_dict.get('container') == 'm4a_dash'
|
|
|
|
and info_dict.get('ext') == 'm4a'),
|
|
|
|
'writing DASH m4a. Only some players support this container',
|
|
|
|
FFmpegFixupM4aPP)
|
|
|
|
|
2021-10-13 01:12:31 +02:00
|
|
|
downloader = get_suitable_downloader(info_dict, self.params) if 'protocol' in info_dict else None
|
|
|
|
downloader = downloader.__name__ if downloader else None
|
2021-12-20 07:06:46 +01:00
|
|
|
|
|
|
|
if info_dict.get('requested_formats') is None: # Not necessary if doing merger
|
|
|
|
ffmpeg_fixup(downloader == 'HlsFD',
|
|
|
|
'Possible MPEG-TS in MP4 container or malformed AAC timestamps',
|
|
|
|
FFmpegFixupM3u8PP)
|
|
|
|
ffmpeg_fixup(info_dict.get('is_live') and downloader == 'DashSegmentsFD',
|
|
|
|
'Possible duplicate MOOV atoms', FFmpegFixupDuplicateMoovPP)
|
|
|
|
|
2021-11-27 14:50:39 +01:00
|
|
|
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'Malformed timestamps detected', FFmpegFixupTimestampPP)
|
|
|
|
ffmpeg_fixup(downloader == 'WebSocketFragmentFD', 'Malformed duration detected', FFmpegFixupDurationPP)
|
2021-06-20 00:19:23 +02:00
|
|
|
|
|
|
|
fixup()
|
2013-06-18 22:14:21 +02:00
|
|
|
try:
|
2022-01-03 14:36:26 +01:00
|
|
|
replace_info_dict(self.post_process(dl_filename, info_dict, files_to_move))
|
2021-01-30 13:07:05 +01:00
|
|
|
except PostProcessingError as err:
|
|
|
|
self.report_error('Postprocessing: %s' % str(err))
|
2013-06-18 22:14:21 +02:00
|
|
|
return
|
2020-12-29 16:03:07 +01:00
|
|
|
try:
|
|
|
|
for ph in self._post_hooks:
|
2021-03-19 11:35:32 +01:00
|
|
|
ph(info_dict['filepath'])
|
2020-12-29 16:03:07 +01:00
|
|
|
except Exception as err:
|
|
|
|
self.report_error('post hooks: %s' % str(err))
|
|
|
|
return
|
2022-01-03 20:37:24 +01:00
|
|
|
info_dict['__write_download_archive'] = True
|
2020-11-05 18:43:21 +01:00
|
|
|
|
2022-01-03 13:57:43 +01:00
|
|
|
if self.params.get('force_write_download_archive'):
|
2022-01-03 20:37:24 +01:00
|
|
|
info_dict['__write_download_archive'] = True
|
2022-01-03 13:57:43 +01:00
|
|
|
|
|
|
|
# Make sure the info_dict was modified in-place
|
2022-01-03 14:36:26 +01:00
|
|
|
assert info_dict is original_infodict
|
2022-01-03 13:57:43 +01:00
|
|
|
|
2021-01-09 13:08:12 +01:00
|
|
|
max_downloads = self.params.get('max_downloads')
|
|
|
|
if max_downloads is not None and self._num_downloads >= int(max_downloads):
|
|
|
|
raise MaxDownloadsReached()
|
2013-06-18 22:14:21 +02:00
|
|
|
|
2021-11-09 23:49:33 +01:00
|
|
|
def __download_wrapper(self, func):
|
|
|
|
@functools.wraps(func)
|
|
|
|
def wrapper(*args, **kwargs):
|
|
|
|
try:
|
|
|
|
res = func(*args, **kwargs)
|
|
|
|
except UnavailableVideoError as e:
|
|
|
|
self.report_error(e)
|
2021-11-28 22:11:55 +01:00
|
|
|
except MaxDownloadsReached as e:
|
2021-11-09 23:49:33 +01:00
|
|
|
self.to_screen(f'[info] {e}')
|
|
|
|
raise
|
2021-11-28 22:11:55 +01:00
|
|
|
except DownloadCancelled as e:
|
|
|
|
self.to_screen(f'[info] {e}')
|
|
|
|
if not self.params.get('break_per_url'):
|
|
|
|
raise
|
2021-11-09 23:49:33 +01:00
|
|
|
else:
|
|
|
|
if self.params.get('dump_single_json', False):
|
|
|
|
self.post_extract(res)
|
|
|
|
self.to_stdout(json.dumps(self.sanitize_info(res)))
|
|
|
|
return wrapper
|
|
|
|
|
2013-06-18 22:14:21 +02:00
|
|
|
def download(self, url_list):
|
|
|
|
"""Download a given list of URLs."""
|
2021-11-09 23:49:33 +01:00
|
|
|
url_list = variadic(url_list) # Passing a single URL is a common mistake
|
2021-02-03 14:36:09 +01:00
|
|
|
outtmpl = self.outtmpl_dict['default']
|
2019-05-10 22:56:22 +02:00
|
|
|
if (len(url_list) > 1
|
|
|
|
and outtmpl != '-'
|
|
|
|
and '%' not in outtmpl
|
|
|
|
and self.params.get('max_downloads') != 1):
|
2014-04-30 10:02:03 +02:00
|
|
|
raise SameFileError(outtmpl)
|
2013-06-18 22:14:21 +02:00
|
|
|
|
|
|
|
for url in url_list:
|
2021-11-09 23:49:33 +01:00
|
|
|
self.__download_wrapper(self.extract_info)(
|
|
|
|
url, force_generic_extractor=self.params.get('force_generic_extractor', False))
|
2013-06-18 22:14:21 +02:00
|
|
|
|
|
|
|
return self._download_retcode
|
|
|
|
|
2013-11-22 14:57:53 +01:00
|
|
|
def download_with_info_file(self, info_filename):
|
2015-03-01 11:46:57 +01:00
|
|
|
with contextlib.closing(fileinput.FileInput(
|
|
|
|
[info_filename], mode='r',
|
|
|
|
openhook=fileinput.hook_encoded('utf-8'))) as f:
|
|
|
|
# FileInput doesn't have a read method, we can't call json.load
|
2021-08-05 00:07:16 +02:00
|
|
|
info = self.sanitize_info(json.loads('\n'.join(f)), self.params.get('clean_infojson', True))
|
2013-12-03 20:16:52 +01:00
|
|
|
try:
|
2021-11-09 23:49:33 +01:00
|
|
|
self.__download_wrapper(self.process_ie_result)(info, download=True)
|
2021-11-28 19:57:44 +01:00
|
|
|
except (DownloadError, EntryNotInPlaylist, ReExtractInfo) as e:
|
2021-11-11 04:14:54 +01:00
|
|
|
if not isinstance(e, EntryNotInPlaylist):
|
|
|
|
self.to_stderr('\r')
|
2013-12-03 20:16:52 +01:00
|
|
|
webpage_url = info.get('webpage_url')
|
|
|
|
if webpage_url is not None:
|
2021-11-09 23:49:33 +01:00
|
|
|
self.report_warning(f'The info failed to download: {e}; trying with URL {webpage_url}')
|
2013-12-03 20:16:52 +01:00
|
|
|
return self.download([webpage_url])
|
|
|
|
else:
|
|
|
|
raise
|
|
|
|
return self._download_retcode
|
2013-11-22 14:57:53 +01:00
|
|
|
|
2015-04-30 20:44:34 +02:00
|
|
|
@staticmethod
|
2021-08-05 00:07:16 +02:00
|
|
|
def sanitize_info(info_dict, remove_private_keys=False):
|
|
|
|
''' Sanitize the infodict for converting to json '''
|
2021-08-15 17:01:35 +02:00
|
|
|
if info_dict is None:
|
|
|
|
return info_dict
|
2021-08-07 17:46:55 +02:00
|
|
|
info_dict.setdefault('epoch', int(time.time()))
|
2022-01-23 20:31:30 +01:00
|
|
|
info_dict.setdefault('_type', 'video')
|
2022-02-22 12:43:30 +01:00
|
|
|
|
2021-08-05 00:07:16 +02:00
|
|
|
if remove_private_keys:
|
2022-02-22 12:43:30 +01:00
|
|
|
reject = lambda k, v: v is None or (k.startswith('_') and k != '_type') or k in {
|
2022-01-03 14:36:26 +01:00
|
|
|
'requested_downloads', 'requested_formats', 'requested_subtitles', 'requested_entries',
|
|
|
|
'entries', 'filepath', 'infojson_filename', 'original_url', 'playlist_autonumber',
|
2021-08-07 17:46:55 +02:00
|
|
|
}
|
2021-05-28 18:15:06 +02:00
|
|
|
else:
|
2022-02-22 12:43:30 +01:00
|
|
|
reject = lambda k, v: False
|
2021-12-20 07:06:46 +01:00
|
|
|
|
|
|
|
def filter_fn(obj):
|
|
|
|
if isinstance(obj, dict):
|
|
|
|
return {k: filter_fn(v) for k, v in obj.items() if not reject(k, v)}
|
|
|
|
elif isinstance(obj, (list, tuple, set, LazyList)):
|
|
|
|
return list(map(filter_fn, obj))
|
|
|
|
elif obj is None or isinstance(obj, (str, int, float, bool)):
|
|
|
|
return obj
|
|
|
|
else:
|
|
|
|
return repr(obj)
|
|
|
|
|
2021-03-18 16:25:16 +01:00
|
|
|
return filter_fn(info_dict)
|
2015-04-30 20:44:34 +02:00
|
|
|
|
2021-08-05 00:07:16 +02:00
|
|
|
@staticmethod
|
|
|
|
def filter_requested_info(info_dict, actually_filter=True):
|
|
|
|
''' Alias of sanitize_info for backward compatibility '''
|
|
|
|
return YoutubeDL.sanitize_info(info_dict, actually_filter)
|
|
|
|
|
2022-01-10 19:27:59 +01:00
|
|
|
@staticmethod
|
|
|
|
def post_extract(info_dict):
|
|
|
|
def actual_post_extract(info_dict):
|
|
|
|
if info_dict.get('_type') in ('playlist', 'multi_video'):
|
|
|
|
for video_dict in info_dict.get('entries', {}):
|
|
|
|
actual_post_extract(video_dict or {})
|
|
|
|
return
|
|
|
|
|
2022-02-22 12:43:30 +01:00
|
|
|
post_extractor = info_dict.pop('__post_extractor', None) or (lambda: {})
|
|
|
|
info_dict.update(post_extractor())
|
2022-01-10 19:27:59 +01:00
|
|
|
|
|
|
|
actual_post_extract(info_dict or {})
|
|
|
|
|
2021-03-18 16:24:53 +01:00
|
|
|
def run_pp(self, pp, infodict):
|
2021-01-26 11:20:20 +01:00
|
|
|
files_to_delete = []
|
2021-03-18 16:24:53 +01:00
|
|
|
if '__files_to_move' not in infodict:
|
|
|
|
infodict['__files_to_move'] = {}
|
2021-09-24 02:21:54 +02:00
|
|
|
try:
|
|
|
|
files_to_delete, infodict = pp.run(infodict)
|
|
|
|
except PostProcessingError as e:
|
|
|
|
# Must be True and not 'only_download'
|
|
|
|
if self.params.get('ignoreerrors') is True:
|
|
|
|
self.report_error(e)
|
|
|
|
return infodict
|
|
|
|
raise
|
|
|
|
|
2021-01-26 11:20:20 +01:00
|
|
|
if not files_to_delete:
|
2021-03-18 16:24:53 +01:00
|
|
|
return infodict
|
2021-01-26 11:20:20 +01:00
|
|
|
if self.params.get('keepvideo', False):
|
|
|
|
for f in files_to_delete:
|
2021-03-18 16:24:53 +01:00
|
|
|
infodict['__files_to_move'].setdefault(f, '')
|
2021-01-26 11:20:20 +01:00
|
|
|
else:
|
|
|
|
for old_filename in set(files_to_delete):
|
|
|
|
self.to_screen('Deleting original file %s (pass -k to keep)' % old_filename)
|
|
|
|
try:
|
|
|
|
os.remove(encodeFilename(old_filename))
|
|
|
|
except (IOError, OSError):
|
|
|
|
self.report_warning('Unable to remove downloaded original file')
|
2021-03-18 16:24:53 +01:00
|
|
|
if old_filename in infodict['__files_to_move']:
|
|
|
|
del infodict['__files_to_move'][old_filename]
|
|
|
|
return infodict
|
2021-01-26 11:20:20 +01:00
|
|
|
|
2022-01-10 19:27:59 +01:00
|
|
|
def run_all_pps(self, key, info, *, additional_pps=None):
|
2022-01-23 22:34:19 +01:00
|
|
|
self._forceprint(key, info)
|
2022-01-10 19:27:59 +01:00
|
|
|
for pp in (additional_pps or []) + self._pps[key]:
|
2022-01-10 20:01:19 +01:00
|
|
|
info = self.run_pp(pp, info)
|
2022-01-10 19:27:59 +01:00
|
|
|
return info
|
2021-02-28 15:56:08 +01:00
|
|
|
|
2021-04-11 00:18:07 +02:00
|
|
|
def pre_process(self, ie_info, key='pre_process', files_to_move=None):
|
2021-01-26 11:20:20 +01:00
|
|
|
info = dict(ie_info)
|
2021-04-11 00:18:07 +02:00
|
|
|
info['__files_to_move'] = files_to_move or {}
|
2022-01-10 19:27:59 +01:00
|
|
|
info = self.run_all_pps(key, info)
|
2021-04-11 00:18:07 +02:00
|
|
|
return info, info.pop('__files_to_move', None)
|
2021-01-26 11:20:20 +01:00
|
|
|
|
2022-01-03 14:36:26 +01:00
|
|
|
def post_process(self, filename, info, files_to_move=None):
|
2013-06-18 22:14:21 +02:00
|
|
|
"""Run all the postprocessors on the given file."""
|
|
|
|
info['filepath'] = filename
|
2021-03-18 16:24:53 +01:00
|
|
|
info['__files_to_move'] = files_to_move or {}
|
2022-01-10 19:27:59 +01:00
|
|
|
info = self.run_all_pps('post_process', info, additional_pps=info.get('__postprocessors'))
|
2021-03-18 16:24:53 +01:00
|
|
|
info = self.run_pp(MoveFilesAfterDownloadPP(self), info)
|
|
|
|
del info['__files_to_move']
|
2022-01-10 19:27:59 +01:00
|
|
|
return self.run_all_pps('after_move', info)
|
2013-10-06 04:27:09 +02:00
|
|
|
|
2013-11-25 15:46:54 +01:00
|
|
|
def _make_archive_id(self, info_dict):
|
2019-02-01 23:44:31 +01:00
|
|
|
video_id = info_dict.get('id')
|
|
|
|
if not video_id:
|
|
|
|
return
|
2013-11-25 15:46:54 +01:00
|
|
|
# Future-proof against any change in case
|
|
|
|
# and backwards compatibility with prior versions
|
2019-02-01 23:44:31 +01:00
|
|
|
extractor = info_dict.get('extractor_key') or info_dict.get('ie_key') # key in a playlist
|
2013-11-22 22:46:46 +01:00
|
|
|
if extractor is None:
|
2019-02-07 19:08:48 +01:00
|
|
|
url = str_or_none(info_dict.get('url'))
|
|
|
|
if not url:
|
|
|
|
return
|
2019-02-01 23:44:31 +01:00
|
|
|
# Try to find matching extractor for the URL and take its ie_key
|
2021-08-23 01:56:45 +02:00
|
|
|
for ie_key, ie in self._ies.items():
|
2019-02-07 19:08:48 +01:00
|
|
|
if ie.suitable(url):
|
2021-08-23 01:56:45 +02:00
|
|
|
extractor = ie_key
|
2019-02-01 23:44:31 +01:00
|
|
|
break
|
|
|
|
else:
|
|
|
|
return
|
2021-01-21 13:06:42 +01:00
|
|
|
return '%s %s' % (extractor.lower(), video_id)
|
2013-11-25 15:46:54 +01:00
|
|
|
|
|
|
|
def in_download_archive(self, info_dict):
|
|
|
|
fn = self.params.get('download_archive')
|
|
|
|
if fn is None:
|
|
|
|
return False
|
|
|
|
|
|
|
|
vid_id = self._make_archive_id(info_dict)
|
2019-02-01 23:44:31 +01:00
|
|
|
if not vid_id:
|
2013-11-22 22:46:46 +01:00
|
|
|
return False # Incomplete video information
|
2013-11-25 15:46:54 +01:00
|
|
|
|
2020-09-19 03:18:23 +02:00
|
|
|
return vid_id in self.archive
|
2013-10-06 04:27:09 +02:00
|
|
|
|
|
|
|
def record_download_archive(self, info_dict):
|
|
|
|
fn = self.params.get('download_archive')
|
|
|
|
if fn is None:
|
|
|
|
return
|
2013-11-25 15:46:54 +01:00
|
|
|
vid_id = self._make_archive_id(info_dict)
|
|
|
|
assert vid_id
|
2022-01-03 13:57:43 +01:00
|
|
|
self.write_debug(f'Adding to archive: {vid_id}')
|
2013-10-06 04:27:09 +02:00
|
|
|
with locked_file(fn, 'a', encoding='utf-8') as archive_file:
|
2014-01-05 01:52:03 +01:00
|
|
|
archive_file.write(vid_id + '\n')
|
2020-09-19 03:18:23 +02:00
|
|
|
self.archive.add(vid_id)
|
2013-07-02 10:08:58 +02:00
|
|
|
|
2013-10-21 14:09:38 +02:00
|
|
|
@staticmethod
|
2013-10-28 11:31:12 +01:00
|
|
|
def format_resolution(format, default='unknown'):
|
2021-10-02 20:43:42 +02:00
|
|
|
if format.get('vcodec') == 'none' and format.get('acodec') != 'none':
|
2013-11-25 22:34:56 +01:00
|
|
|
return 'audio only'
|
2013-12-24 11:56:02 +01:00
|
|
|
if format.get('resolution') is not None:
|
|
|
|
return format['resolution']
|
2021-03-15 18:17:29 +01:00
|
|
|
if format.get('width') and format.get('height'):
|
2021-11-23 16:08:30 +01:00
|
|
|
return '%dx%d' % (format['width'], format['height'])
|
2021-03-15 18:17:29 +01:00
|
|
|
elif format.get('height'):
|
2021-11-23 16:08:30 +01:00
|
|
|
return '%sp' % format['height']
|
2021-03-15 18:17:29 +01:00
|
|
|
elif format.get('width'):
|
2021-11-23 16:08:30 +01:00
|
|
|
return '%dx?' % format['width']
|
|
|
|
return default
|
2013-10-21 14:09:38 +02:00
|
|
|
|
2022-01-10 19:41:12 +01:00
|
|
|
def _list_format_headers(self, *headers):
|
|
|
|
if self.params.get('listformats_table', True) is not False:
|
|
|
|
return [self._format_screen(header, self.Styles.HEADERS) for header in headers]
|
|
|
|
return headers
|
|
|
|
|
2014-04-30 02:02:41 +02:00
|
|
|
def _format_note(self, fdict):
|
|
|
|
res = ''
|
|
|
|
if fdict.get('ext') in ['f4f', 'f4m']:
|
2021-11-28 22:22:52 +01:00
|
|
|
res += '(unsupported)'
|
2016-01-01 13:28:45 +01:00
|
|
|
if fdict.get('language'):
|
|
|
|
if res:
|
|
|
|
res += ' '
|
2021-11-28 22:22:52 +01:00
|
|
|
res += '[%s]' % fdict['language']
|
2014-04-30 02:02:41 +02:00
|
|
|
if fdict.get('format_note') is not None:
|
2021-11-28 22:22:52 +01:00
|
|
|
if res:
|
|
|
|
res += ' '
|
|
|
|
res += fdict['format_note']
|
2014-04-30 02:02:41 +02:00
|
|
|
if fdict.get('tbr') is not None:
|
2021-11-28 22:22:52 +01:00
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += '%4dk' % fdict['tbr']
|
2014-04-30 02:02:41 +02:00
|
|
|
if fdict.get('container') is not None:
|
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += '%s container' % fdict['container']
|
2019-05-10 22:56:22 +02:00
|
|
|
if (fdict.get('vcodec') is not None
|
|
|
|
and fdict.get('vcodec') != 'none'):
|
2014-04-30 02:02:41 +02:00
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += fdict['vcodec']
|
2013-11-16 01:08:43 +01:00
|
|
|
if fdict.get('vbr') is not None:
|
2014-04-30 02:02:41 +02:00
|
|
|
res += '@'
|
|
|
|
elif fdict.get('vbr') is not None and fdict.get('abr') is not None:
|
|
|
|
res += 'video@'
|
|
|
|
if fdict.get('vbr') is not None:
|
|
|
|
res += '%4dk' % fdict['vbr']
|
2014-10-30 09:34:13 +01:00
|
|
|
if fdict.get('fps') is not None:
|
2016-03-09 20:03:18 +01:00
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += '%sfps' % fdict['fps']
|
2014-04-30 02:02:41 +02:00
|
|
|
if fdict.get('acodec') is not None:
|
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
if fdict['acodec'] == 'none':
|
|
|
|
res += 'video only'
|
|
|
|
else:
|
|
|
|
res += '%-5s' % fdict['acodec']
|
|
|
|
elif fdict.get('abr') is not None:
|
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += 'audio'
|
|
|
|
if fdict.get('abr') is not None:
|
|
|
|
res += '@%3dk' % fdict['abr']
|
|
|
|
if fdict.get('asr') is not None:
|
|
|
|
res += ' (%5dHz)' % fdict['asr']
|
|
|
|
if fdict.get('filesize') is not None:
|
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += format_bytes(fdict['filesize'])
|
2014-07-21 12:02:44 +02:00
|
|
|
elif fdict.get('filesize_approx') is not None:
|
|
|
|
if res:
|
|
|
|
res += ', '
|
|
|
|
res += '~' + format_bytes(fdict['filesize_approx'])
|
2014-04-30 02:02:41 +02:00
|
|
|
return res
|
2013-11-16 01:08:43 +01:00
|
|
|
|
2022-01-10 19:41:12 +01:00
|
|
|
def render_formats_table(self, info_dict):
|
2021-12-23 02:42:26 +01:00
|
|
|
if not info_dict.get('formats') and not info_dict.get('url'):
|
2022-01-10 19:41:12 +01:00
|
|
|
return None
|
2021-12-23 02:42:26 +01:00
|
|
|
|
2013-10-30 01:09:26 +01:00
|
|
|
formats = info_dict.get('formats', [info_dict])
|
2022-01-10 19:41:12 +01:00
|
|
|
if not self.params.get('listformats_table', True) is not False:
|
2020-12-13 15:29:09 +01:00
|
|
|
table = [
|
|
|
|
[
|
|
|
|
format_field(f, 'format_id'),
|
|
|
|
format_field(f, 'ext'),
|
|
|
|
self.format_resolution(f),
|
2022-01-10 19:41:12 +01:00
|
|
|
self._format_note(f)
|
|
|
|
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
|
|
|
|
return render_table(['format code', 'extension', 'resolution', 'note'], table, extra_gap=1)
|
|
|
|
|
|
|
|
delim = self._format_screen('\u2502', self.Styles.DELIM, '|', test_encoding=True)
|
|
|
|
table = [
|
|
|
|
[
|
|
|
|
self._format_screen(format_field(f, 'format_id'), self.Styles.ID),
|
|
|
|
format_field(f, 'ext'),
|
|
|
|
format_field(f, func=self.format_resolution, ignore=('audio only', 'images')),
|
|
|
|
format_field(f, 'fps', '\t%d'),
|
|
|
|
format_field(f, 'dynamic_range', '%s', ignore=(None, 'SDR')).replace('HDR', ''),
|
|
|
|
delim,
|
|
|
|
format_field(f, 'filesize', ' \t%s', func=format_bytes) + format_field(f, 'filesize_approx', '~\t%s', func=format_bytes),
|
|
|
|
format_field(f, 'tbr', '\t%dk'),
|
|
|
|
shorten_protocol_name(f.get('protocol', '')),
|
|
|
|
delim,
|
|
|
|
format_field(f, 'vcodec', default='unknown').replace(
|
|
|
|
'none', 'images' if f.get('acodec') == 'none'
|
|
|
|
else self._format_screen('audio only', self.Styles.SUPPRESS)),
|
|
|
|
format_field(f, 'vbr', '\t%dk'),
|
|
|
|
format_field(f, 'acodec', default='unknown').replace(
|
|
|
|
'none', '' if f.get('vcodec') == 'none'
|
|
|
|
else self._format_screen('video only', self.Styles.SUPPRESS)),
|
|
|
|
format_field(f, 'abr', '\t%dk'),
|
|
|
|
format_field(f, 'asr', '\t%dHz'),
|
|
|
|
join_nonempty(
|
|
|
|
self._format_screen('UNSUPPORTED', 'light red') if f.get('ext') in ('f4f', 'f4m') else None,
|
|
|
|
format_field(f, 'language', '[%s]'),
|
|
|
|
join_nonempty(format_field(f, 'format_note'),
|
|
|
|
format_field(f, 'container', ignore=(None, f.get('ext'))),
|
|
|
|
delim=', '),
|
|
|
|
delim=' '),
|
|
|
|
] for f in formats if f.get('preference') is None or f['preference'] >= -1000]
|
|
|
|
header_line = self._list_format_headers(
|
|
|
|
'ID', 'EXT', 'RESOLUTION', '\tFPS', 'HDR', delim, '\tFILESIZE', '\tTBR', 'PROTO',
|
|
|
|
delim, 'VCODEC', '\tVBR', 'ACODEC', '\tABR', '\tASR', 'MORE INFO')
|
|
|
|
|
|
|
|
return render_table(
|
|
|
|
header_line, table, hide_empty=True,
|
|
|
|
delim=self._format_screen('\u2500', self.Styles.DELIM, '-', test_encoding=True))
|
|
|
|
|
|
|
|
def render_thumbnails_table(self, info_dict):
|
2022-01-23 20:51:39 +01:00
|
|
|
thumbnails = list(info_dict.get('thumbnails') or [])
|
2015-01-25 02:38:47 +01:00
|
|
|
if not thumbnails:
|
2022-01-10 19:41:12 +01:00
|
|
|
return None
|
|
|
|
return render_table(
|
2021-10-20 18:37:32 +02:00
|
|
|
self._list_format_headers('ID', 'Width', 'Height', 'URL'),
|
2022-01-19 23:57:36 +01:00
|
|
|
[[t.get('id'), t.get('width', 'unknown'), t.get('height', 'unknown'), t['url']] for t in thumbnails])
|
2021-05-12 21:37:58 +02:00
|
|
|
|
2022-01-10 19:41:12 +01:00
|
|
|
def render_subtitles_table(self, video_id, subtitles):
|
2021-05-12 21:37:58 +02:00
|
|
|
def _row(lang, formats):
|
2021-06-25 19:40:31 +02:00
|
|
|
exts, names = zip(*((f['ext'], f.get('name') or 'unknown') for f in reversed(formats)))
|
2021-05-12 21:37:58 +02:00
|
|
|
if len(set(names)) == 1:
|
2021-05-17 12:41:07 +02:00
|
|
|
names = [] if names[0] == 'unknown' else names[:1]
|
2021-05-12 21:37:58 +02:00
|
|
|
return [lang, ', '.join(names), ', '.join(exts)]
|
|
|
|
|
2022-01-10 19:41:12 +01:00
|
|
|
if not subtitles:
|
|
|
|
return None
|
|
|
|
return render_table(
|
2021-10-20 18:37:32 +02:00
|
|
|
self._list_format_headers('Language', 'Name', 'Formats'),
|
2021-05-12 21:37:58 +02:00
|
|
|
[_row(lang, formats) for lang, formats in subtitles.items()],
|
2022-01-10 19:41:12 +01:00
|
|
|
hide_empty=True)
|
|
|
|
|
|
|
|
def __list_table(self, video_id, name, func, *args):
|
|
|
|
table = func(*args)
|
|
|
|
if not table:
|
|
|
|
self.to_screen(f'{video_id} has no {name}')
|
|
|
|
return
|
|
|
|
self.to_screen(f'[info] Available {name} for {video_id}:')
|
|
|
|
self.to_stdout(table)
|
|
|
|
|
|
|
|
def list_formats(self, info_dict):
|
|
|
|
self.__list_table(info_dict['id'], 'formats', self.render_formats_table, info_dict)
|
|
|
|
|
|
|
|
def list_thumbnails(self, info_dict):
|
|
|
|
self.__list_table(info_dict['id'], 'thumbnails', self.render_thumbnails_table, info_dict)
|
|
|
|
|
|
|
|
def list_subtitles(self, video_id, subtitles, name='subtitles'):
|
|
|
|
self.__list_table(video_id, name, self.render_subtitles_table, video_id, subtitles)
|
2015-02-15 18:03:41 +01:00
|
|
|
|
2013-11-22 19:57:52 +01:00
|
|
|
def urlopen(self, req):
|
|
|
|
""" Start an HTTP download """
|
2015-11-19 22:08:34 +01:00
|
|
|
if isinstance(req, compat_basestring):
|
2015-11-20 15:33:49 +01:00
|
|
|
req = sanitized_Request(req)
|
2014-03-10 19:01:29 +01:00
|
|
|
return self._opener.open(req, timeout=self._socket_timeout)
|
2013-11-22 19:57:52 +01:00
|
|
|
|
|
|
|
def print_debug_header(self):
|
|
|
|
if not self.params.get('verbose'):
|
|
|
|
return
|
2021-10-22 22:37:20 +02:00
|
|
|
|
|
|
|
def get_encoding(stream):
|
|
|
|
ret = getattr(stream, 'encoding', 'missing (%s)' % type(stream).__name__)
|
|
|
|
if not supports_terminal_sequences(stream):
|
2021-12-08 15:11:54 +01:00
|
|
|
from .compat import WINDOWS_VT_MODE
|
|
|
|
ret += ' (No VT)' if WINDOWS_VT_MODE is False else ' (No ANSI)'
|
2021-10-22 22:37:20 +02:00
|
|
|
return ret
|
|
|
|
|
|
|
|
encoding_str = 'Encodings: locale %s, fs %s, out %s, err %s, pref %s' % (
|
|
|
|
locale.getpreferredencoding(),
|
|
|
|
sys.getfilesystemencoding(),
|
|
|
|
get_encoding(self._screen_file), get_encoding(self._err_file),
|
|
|
|
self.get_encoding())
|
2021-10-14 06:28:29 +02:00
|
|
|
|
|
|
|
logger = self.params.get('logger')
|
|
|
|
if logger:
|
|
|
|
write_debug = lambda msg: logger.debug(f'[debug] {msg}')
|
|
|
|
write_debug(encoding_str)
|
|
|
|
else:
|
2021-10-23 16:29:52 +02:00
|
|
|
write_string(f'[debug] {encoding_str}\n', encoding=None)
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug = lambda msg: self._write_string(f'[debug] {msg}\n')
|
2014-04-07 19:57:42 +02:00
|
|
|
|
2021-09-24 03:01:43 +02:00
|
|
|
source = detect_variant()
|
2021-11-29 18:00:02 +01:00
|
|
|
write_debug(join_nonempty(
|
|
|
|
'yt-dlp version', __version__,
|
|
|
|
f'[{RELEASE_GIT_HEAD}]' if RELEASE_GIT_HEAD else '',
|
|
|
|
'' if source == 'unknown' else f'({source})',
|
|
|
|
delim=' '))
|
2021-10-21 14:54:05 +02:00
|
|
|
if not _LAZY_LOADER:
|
|
|
|
if os.environ.get('YTDLP_NO_LAZY_EXTRACTORS'):
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Lazy loading extractors is forcibly disabled')
|
2021-10-21 14:54:05 +02:00
|
|
|
else:
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Lazy loading extractors is disabled')
|
2021-09-29 22:53:33 +02:00
|
|
|
if plugin_extractors or plugin_postprocessors:
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Plugins: %s' % [
|
2021-09-29 22:53:33 +02:00
|
|
|
'%s%s' % (klass.__name__, '' if klass.__name__ == name else f' as {name}')
|
|
|
|
for name, klass in itertools.chain(plugin_extractors.items(), plugin_postprocessors.items())])
|
2021-05-11 10:00:48 +02:00
|
|
|
if self.params.get('compat_opts'):
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Compatibility options: %s' % ', '.join(self.params.get('compat_opts')))
|
2021-11-29 18:00:02 +01:00
|
|
|
|
|
|
|
if source == 'source':
|
2013-11-22 19:57:52 +01:00
|
|
|
try:
|
2021-11-29 18:00:02 +01:00
|
|
|
sp = Popen(
|
|
|
|
['git', 'rev-parse', '--short', 'HEAD'],
|
|
|
|
stdout=subprocess.PIPE, stderr=subprocess.PIPE,
|
|
|
|
cwd=os.path.dirname(os.path.abspath(__file__)))
|
|
|
|
out, err = sp.communicate_or_kill()
|
|
|
|
out = out.decode().strip()
|
|
|
|
if re.match('[0-9a-f]+', out):
|
|
|
|
write_debug('Git HEAD: %s' % out)
|
2015-03-27 13:02:20 +01:00
|
|
|
except Exception:
|
2021-11-29 18:00:02 +01:00
|
|
|
try:
|
|
|
|
sys.exc_clear()
|
|
|
|
except Exception:
|
|
|
|
pass
|
2018-01-01 15:52:24 +01:00
|
|
|
|
|
|
|
def python_implementation():
|
|
|
|
impl_name = platform.python_implementation()
|
|
|
|
if impl_name == 'PyPy' and hasattr(sys, 'pypy_version_info'):
|
|
|
|
return impl_name + ' version %d.%d.%d' % sys.pypy_version_info[:3]
|
|
|
|
return impl_name
|
|
|
|
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Python version %s (%s %s) - %s' % (
|
2021-02-14 18:10:54 +01:00
|
|
|
platform.python_version(),
|
|
|
|
python_implementation(),
|
|
|
|
platform.architecture()[0],
|
2018-01-01 15:52:24 +01:00
|
|
|
platform_name()))
|
2014-10-26 16:31:52 +01:00
|
|
|
|
2021-11-03 22:40:49 +01:00
|
|
|
exe_versions, ffmpeg_features = FFmpegPostProcessor.get_versions_and_features(self)
|
|
|
|
ffmpeg_features = {key for key, val in ffmpeg_features.items() if val}
|
|
|
|
if ffmpeg_features:
|
2021-11-03 23:10:35 +01:00
|
|
|
exe_versions['ffmpeg'] += ' (%s)' % ','.join(ffmpeg_features)
|
2021-11-03 22:40:49 +01:00
|
|
|
|
2014-11-02 10:55:36 +01:00
|
|
|
exe_versions['rtmpdump'] = rtmpdump_version()
|
2017-08-03 14:17:25 +02:00
|
|
|
exe_versions['phantomjs'] = PhantomJSwrapper._version()
|
2014-10-26 16:31:52 +01:00
|
|
|
exe_str = ', '.join(
|
2021-08-08 23:36:14 +02:00
|
|
|
f'{exe} {v}' for exe, v in sorted(exe_versions.items()) if v
|
|
|
|
) or 'none'
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('exe versions: %s' % exe_str)
|
2013-11-22 19:57:52 +01:00
|
|
|
|
2021-08-08 23:36:14 +02:00
|
|
|
from .downloader.websocket import has_websockets
|
|
|
|
from .postprocessor.embedthumbnail import has_mutagen
|
2021-12-27 02:28:44 +01:00
|
|
|
from .cookies import SQLITE_AVAILABLE, SECRETSTORAGE_AVAILABLE
|
2021-08-08 23:36:14 +02:00
|
|
|
|
2021-11-09 23:44:42 +01:00
|
|
|
lib_str = join_nonempty(
|
2021-09-17 21:21:27 +02:00
|
|
|
compat_pycrypto_AES and compat_pycrypto_AES.__name__.split('.')[0],
|
2021-12-27 02:28:44 +01:00
|
|
|
SECRETSTORAGE_AVAILABLE and 'secretstorage',
|
2021-08-08 23:36:14 +02:00
|
|
|
has_mutagen and 'mutagen',
|
|
|
|
SQLITE_AVAILABLE and 'sqlite',
|
2021-11-09 23:44:42 +01:00
|
|
|
has_websockets and 'websockets',
|
|
|
|
delim=', ') or 'none'
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Optional libraries: %s' % lib_str)
|
2021-08-08 23:36:14 +02:00
|
|
|
|
2013-11-22 19:57:52 +01:00
|
|
|
proxy_map = {}
|
|
|
|
for handler in self._opener.handlers:
|
|
|
|
if hasattr(handler, 'proxies'):
|
|
|
|
proxy_map.update(handler.proxies)
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug(f'Proxy map: {proxy_map}')
|
2013-11-22 19:57:52 +01:00
|
|
|
|
2021-10-22 22:37:20 +02:00
|
|
|
# Not implemented
|
|
|
|
if False and self.params.get('call_home'):
|
2015-01-10 21:02:27 +01:00
|
|
|
ipaddr = self.urlopen('https://yt-dl.org/ip').read().decode('utf-8')
|
2021-10-22 22:37:20 +02:00
|
|
|
write_debug('Public IP address: %s' % ipaddr)
|
2015-01-10 21:02:27 +01:00
|
|
|
latest_version = self.urlopen(
|
|
|
|
'https://yt-dl.org/latest/version').read().decode('utf-8')
|
|
|
|
if version_tuple(latest_version) > version_tuple(__version__):
|
|
|
|
self.report_warning(
|
|
|
|
'You are using an outdated version (newest version: %s)! '
|
|
|
|
'See https://yt-dl.org/update if you need help updating.' %
|
|
|
|
latest_version)
|
|
|
|
|
2013-12-01 11:42:02 +01:00
|
|
|
def _setup_opener(self):
|
2013-12-02 13:37:05 +01:00
|
|
|
timeout_val = self.params.get('socket_timeout')
|
2021-10-18 12:40:27 +02:00
|
|
|
self._socket_timeout = 20 if timeout_val is None else float(timeout_val)
|
2013-12-02 13:37:05 +01:00
|
|
|
|
2021-07-21 22:32:49 +02:00
|
|
|
opts_cookiesfrombrowser = self.params.get('cookiesfrombrowser')
|
2013-11-22 19:57:52 +01:00
|
|
|
opts_cookiefile = self.params.get('cookiefile')
|
|
|
|
opts_proxy = self.params.get('proxy')
|
|
|
|
|
2021-07-21 22:32:49 +02:00
|
|
|
self.cookiejar = load_cookies(opts_cookiefile, opts_cookiesfrombrowser, self)
|
2013-11-22 19:57:52 +01:00
|
|
|
|
2015-09-06 02:21:33 +02:00
|
|
|
cookie_processor = YoutubeDLCookieProcessor(self.cookiejar)
|
2013-11-22 19:57:52 +01:00
|
|
|
if opts_proxy is not None:
|
|
|
|
if opts_proxy == '':
|
|
|
|
proxies = {}
|
|
|
|
else:
|
|
|
|
proxies = {'http': opts_proxy, 'https': opts_proxy}
|
|
|
|
else:
|
|
|
|
proxies = compat_urllib_request.getproxies()
|
2019-03-09 13:14:41 +01:00
|
|
|
# Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805)
|
2013-11-22 19:57:52 +01:00
|
|
|
if 'http' in proxies and 'https' not in proxies:
|
|
|
|
proxies['https'] = proxies['http']
|
2015-03-03 00:03:06 +01:00
|
|
|
proxy_handler = PerRequestProxyHandler(proxies)
|
2013-12-29 15:28:32 +01:00
|
|
|
|
|
|
|
debuglevel = 1 if self.params.get('debug_printtraffic') else 0
|
2015-01-10 19:55:36 +01:00
|
|
|
https_handler = make_HTTPS_handler(self.params, debuglevel=debuglevel)
|
|
|
|
ydlh = YoutubeDLHandler(self.params, debuglevel=debuglevel)
|
2020-02-29 13:08:44 +01:00
|
|
|
redirect_handler = YoutubeDLRedirectHandler()
|
2015-10-17 17:16:40 +02:00
|
|
|
data_handler = compat_urllib_request_DataHandler()
|
2016-01-14 08:14:01 +01:00
|
|
|
|
|
|
|
# When passing our own FileHandler instance, build_opener won't add the
|
|
|
|
# default FileHandler and allows us to disable the file protocol, which
|
|
|
|
# can be used for malicious purposes (see
|
2019-03-09 13:14:41 +01:00
|
|
|
# https://github.com/ytdl-org/youtube-dl/issues/8227)
|
2016-01-14 08:14:01 +01:00
|
|
|
file_handler = compat_urllib_request.FileHandler()
|
|
|
|
|
|
|
|
def file_open(*args, **kwargs):
|
2021-02-24 19:45:56 +01:00
|
|
|
raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons')
|
2016-01-14 08:14:01 +01:00
|
|
|
file_handler.file_open = file_open
|
|
|
|
|
|
|
|
opener = compat_urllib_request.build_opener(
|
2020-02-29 13:08:44 +01:00
|
|
|
proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler)
|
2015-03-03 13:56:06 +01:00
|
|
|
|
2013-11-22 19:57:52 +01:00
|
|
|
# Delete the default user-agent header, which would otherwise apply in
|
|
|
|
# cases where our custom HTTP handler doesn't come into play
|
2019-03-09 13:14:41 +01:00
|
|
|
# (See https://github.com/ytdl-org/youtube-dl/issues/1309 for details)
|
2013-11-22 19:57:52 +01:00
|
|
|
opener.addheaders = []
|
|
|
|
self._opener = opener
|
2014-03-30 06:02:41 +02:00
|
|
|
|
|
|
|
def encode(self, s):
|
|
|
|
if isinstance(s, bytes):
|
|
|
|
return s # Already encoded
|
|
|
|
|
|
|
|
try:
|
|
|
|
return s.encode(self.get_encoding())
|
|
|
|
except UnicodeEncodeError as err:
|
|
|
|
err.reason = err.reason + '. Check your system encoding configuration or use the --encoding option.'
|
|
|
|
raise
|
|
|
|
|
|
|
|
def get_encoding(self):
|
|
|
|
encoding = self.params.get('encoding')
|
|
|
|
if encoding is None:
|
|
|
|
encoding = preferredencoding()
|
|
|
|
return encoding
|
2015-01-25 03:11:12 +01:00
|
|
|
|
2021-11-11 03:30:43 +01:00
|
|
|
def _write_info_json(self, label, ie_result, infofn, overwrite=None):
|
2021-09-29 22:44:42 +02:00
|
|
|
''' Write infojson and returns True = written, False = skip, None = error '''
|
2021-11-11 03:30:43 +01:00
|
|
|
if overwrite is None:
|
|
|
|
overwrite = self.params.get('overwrites', True)
|
2021-09-29 22:44:42 +02:00
|
|
|
if not self.params.get('writeinfojson'):
|
|
|
|
return False
|
|
|
|
elif not infofn:
|
|
|
|
self.write_debug(f'Skipping writing {label} infojson')
|
|
|
|
return False
|
|
|
|
elif not self._ensure_dir_exists(infofn):
|
|
|
|
return None
|
2021-11-11 03:30:43 +01:00
|
|
|
elif not overwrite and os.path.exists(infofn):
|
2021-09-29 22:44:42 +02:00
|
|
|
self.to_screen(f'[info] {label.title()} metadata is already present')
|
|
|
|
else:
|
|
|
|
self.to_screen(f'[info] Writing {label} metadata as JSON to: {infofn}')
|
|
|
|
try:
|
|
|
|
write_json_file(self.sanitize_info(ie_result, self.params.get('clean_infojson', True)), infofn)
|
|
|
|
except (OSError, IOError):
|
|
|
|
self.report_error(f'Cannot write {label} metadata to JSON file {infofn}')
|
|
|
|
return None
|
|
|
|
return True
|
|
|
|
|
|
|
|
def _write_description(self, label, ie_result, descfn):
|
|
|
|
''' Write description and returns True = written, False = skip, None = error '''
|
|
|
|
if not self.params.get('writedescription'):
|
|
|
|
return False
|
|
|
|
elif not descfn:
|
|
|
|
self.write_debug(f'Skipping writing {label} description')
|
|
|
|
return False
|
|
|
|
elif not self._ensure_dir_exists(descfn):
|
|
|
|
return None
|
|
|
|
elif not self.params.get('overwrites', True) and os.path.exists(descfn):
|
|
|
|
self.to_screen(f'[info] {label.title()} description is already present')
|
|
|
|
elif ie_result.get('description') is None:
|
|
|
|
self.report_warning(f'There\'s no {label} description to write')
|
|
|
|
return False
|
|
|
|
else:
|
|
|
|
try:
|
|
|
|
self.to_screen(f'[info] Writing {label} description to: {descfn}')
|
|
|
|
with io.open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile:
|
|
|
|
descfile.write(ie_result['description'])
|
|
|
|
except (OSError, IOError):
|
|
|
|
self.report_error(f'Cannot write {label} description file {descfn}')
|
|
|
|
return None
|
|
|
|
return True
|
|
|
|
|
|
|
|
def _write_subtitles(self, info_dict, filename):
|
|
|
|
''' Write subtitles to file and return list of (sub_filename, final_sub_filename); or None if error'''
|
|
|
|
ret = []
|
|
|
|
subtitles = info_dict.get('requested_subtitles')
|
|
|
|
if not subtitles or not (self.params.get('writesubtitles') or self.params.get('writeautomaticsub')):
|
|
|
|
# subtitles download errors are already managed as troubles in relevant IE
|
|
|
|
# that way it will silently go on when used with unsupporting IE
|
|
|
|
return ret
|
|
|
|
|
|
|
|
sub_filename_base = self.prepare_filename(info_dict, 'subtitle')
|
|
|
|
if not sub_filename_base:
|
|
|
|
self.to_screen('[info] Skipping writing video subtitles')
|
|
|
|
return ret
|
|
|
|
for sub_lang, sub_info in subtitles.items():
|
|
|
|
sub_format = sub_info['ext']
|
|
|
|
sub_filename = subtitles_filename(filename, sub_lang, sub_format, info_dict.get('ext'))
|
|
|
|
sub_filename_final = subtitles_filename(sub_filename_base, sub_lang, sub_format, info_dict.get('ext'))
|
2022-01-11 09:54:25 +01:00
|
|
|
existing_sub = self.existing_file((sub_filename_final, sub_filename))
|
|
|
|
if existing_sub:
|
2021-09-29 22:44:42 +02:00
|
|
|
self.to_screen(f'[info] Video subtitle {sub_lang}.{sub_format} is already present')
|
2022-01-11 09:54:25 +01:00
|
|
|
sub_info['filepath'] = existing_sub
|
|
|
|
ret.append((existing_sub, sub_filename_final))
|
2021-09-29 22:44:42 +02:00
|
|
|
continue
|
|
|
|
|
|
|
|
self.to_screen(f'[info] Writing video subtitles to: {sub_filename}')
|
|
|
|
if sub_info.get('data') is not None:
|
|
|
|
try:
|
|
|
|
# Use newline='' to prevent conversion of newline characters
|
|
|
|
# See https://github.com/ytdl-org/youtube-dl/issues/10268
|
|
|
|
with io.open(sub_filename, 'w', encoding='utf-8', newline='') as subfile:
|
|
|
|
subfile.write(sub_info['data'])
|
|
|
|
sub_info['filepath'] = sub_filename
|
|
|
|
ret.append((sub_filename, sub_filename_final))
|
|
|
|
continue
|
|
|
|
except (OSError, IOError):
|
|
|
|
self.report_error(f'Cannot write video subtitles file {sub_filename}')
|
|
|
|
return None
|
|
|
|
|
|
|
|
try:
|
|
|
|
sub_copy = sub_info.copy()
|
|
|
|
sub_copy.setdefault('http_headers', info_dict.get('http_headers'))
|
|
|
|
self.dl(sub_filename, sub_copy, subtitle=True)
|
|
|
|
sub_info['filepath'] = sub_filename
|
|
|
|
ret.append((sub_filename, sub_filename_final))
|
2022-01-03 13:11:27 +01:00
|
|
|
except (DownloadError, ExtractorError, IOError, OSError, ValueError) + network_exceptions as err:
|
|
|
|
if self.params.get('ignoreerrors') is not True: # False or 'only_download'
|
|
|
|
raise DownloadError(f'Unable to download video subtitles for {sub_lang!r}: {err}', err)
|
2021-09-29 22:44:42 +02:00
|
|
|
self.report_warning(f'Unable to download video subtitles for {sub_lang!r}: {err}')
|
2021-10-05 06:15:46 +02:00
|
|
|
return ret
|
2021-09-29 22:44:42 +02:00
|
|
|
|
|
|
|
def _write_thumbnails(self, label, info_dict, filename, thumb_filename_base=None):
|
|
|
|
''' Write thumbnails to file and return list of (thumb_filename, final_thumb_filename) '''
|
2021-02-09 18:42:32 +01:00
|
|
|
write_all = self.params.get('write_all_thumbnails', False)
|
2021-09-29 22:44:42 +02:00
|
|
|
thumbnails, ret = [], []
|
2021-02-09 18:42:32 +01:00
|
|
|
if write_all or self.params.get('writethumbnail', False):
|
2021-01-23 13:18:12 +01:00
|
|
|
thumbnails = info_dict.get('thumbnails') or []
|
2021-02-09 18:42:32 +01:00
|
|
|
multiple = write_all and len(thumbnails) > 1
|
2015-01-25 03:11:12 +01:00
|
|
|
|
2021-09-29 22:44:42 +02:00
|
|
|
if thumb_filename_base is None:
|
|
|
|
thumb_filename_base = filename
|
|
|
|
if thumbnails and not thumb_filename_base:
|
|
|
|
self.write_debug(f'Skipping writing {label} thumbnail')
|
|
|
|
return ret
|
|
|
|
|
2021-12-19 15:55:01 +01:00
|
|
|
for idx, t in list(enumerate(thumbnails))[::-1]:
|
2021-09-29 22:44:42 +02:00
|
|
|
thumb_ext = (f'{t["id"]}.' if multiple else '') + determine_ext(t['url'], 'jpg')
|
2021-11-09 23:49:33 +01:00
|
|
|
thumb_display_id = f'{label} thumbnail {t["id"]}'
|
2021-09-29 22:44:42 +02:00
|
|
|
thumb_filename = replace_extension(filename, thumb_ext, info_dict.get('ext'))
|
|
|
|
thumb_filename_final = replace_extension(thumb_filename_base, thumb_ext, info_dict.get('ext'))
|
2015-01-25 03:11:12 +01:00
|
|
|
|
2022-01-11 09:54:25 +01:00
|
|
|
existing_thumb = self.existing_file((thumb_filename_final, thumb_filename))
|
|
|
|
if existing_thumb:
|
2021-11-09 23:49:33 +01:00
|
|
|
self.to_screen('[info] %s is already present' % (
|
|
|
|
thumb_display_id if multiple else f'{label} thumbnail').capitalize())
|
2022-01-11 09:54:25 +01:00
|
|
|
t['filepath'] = existing_thumb
|
|
|
|
ret.append((existing_thumb, thumb_filename_final))
|
2015-01-25 03:11:12 +01:00
|
|
|
else:
|
2021-09-29 22:44:42 +02:00
|
|
|
self.to_screen(f'[info] Downloading {thumb_display_id} ...')
|
2015-01-25 03:11:12 +01:00
|
|
|
try:
|
2022-02-11 19:00:48 +01:00
|
|
|
uf = self.urlopen(sanitized_Request(t['url'], headers=t.get('http_headers', {})))
|
2021-09-29 22:44:42 +02:00
|
|
|
self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}')
|
2015-08-30 22:01:13 +02:00
|
|
|
with open(encodeFilename(thumb_filename), 'wb') as thumbf:
|
2015-01-25 03:11:12 +01:00
|
|
|
shutil.copyfileobj(uf, thumbf)
|
2021-09-29 22:44:42 +02:00
|
|
|
ret.append((thumb_filename, thumb_filename_final))
|
2021-05-28 22:08:02 +02:00
|
|
|
t['filepath'] = thumb_filename
|
2021-05-04 19:06:18 +02:00
|
|
|
except network_exceptions as err:
|
2021-12-19 15:55:01 +01:00
|
|
|
thumbnails.pop(idx)
|
2021-09-29 22:44:42 +02:00
|
|
|
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
|
2021-02-09 18:42:32 +01:00
|
|
|
if ret and not write_all:
|
|
|
|
break
|
2021-01-23 13:18:12 +01:00
|
|
|
return ret
|