mirror of
https://github.com/yt-dlp/yt-dlp
synced 2024-12-26 21:59:08 +01:00
[cleanup] Misc
Closes #6288, Closes #7197, Closes #7265, Closes #7353, Closes #5773 Authored by: mikf, freezboltz, pukkandan
This commit is contained in:
parent
db3ad8a676
commit
ad54c9130e
23 changed files with 138 additions and 102 deletions
2
.github/workflows/potential-duplicates.yml
vendored
2
.github/workflows/potential-duplicates.yml
vendored
|
@ -12,7 +12,7 @@ jobs:
|
||||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||||
label: potential-duplicate
|
label: potential-duplicate
|
||||||
state: all
|
state: all
|
||||||
threshold: 0.7
|
threshold: 0.3
|
||||||
comment: |
|
comment: |
|
||||||
This issue is potentially a duplicate of one of the following issues:
|
This issue is potentially a duplicate of one of the following issues:
|
||||||
{{#issues}}
|
{{#issues}}
|
||||||
|
|
28
README.md
28
README.md
|
@ -152,7 +152,7 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
|
||||||
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
* The upload dates extracted from YouTube are in UTC [when available](https://github.com/yt-dlp/yt-dlp/blob/89e4d86171c7b7c997c77d4714542e0383bf0db0/yt_dlp/extractor/youtube.py#L3898-L3900). Use `--compat-options no-youtube-prefer-utc-upload-date` to prefer the non-UTC upload date.
|
||||||
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
* If `ffmpeg` is used as the downloader, the downloading and merging of formats happen in a single step when possible. Use `--compat-options no-direct-merge` to revert this
|
||||||
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
* Thumbnail embedding in `mp4` is done with mutagen if possible. Use `--compat-options embed-thumbnail-atomicparsley` to force the use of AtomicParsley instead
|
||||||
* Some private fields such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
* Some internal metadata such as filenames are removed by default from the infojson. Use `--no-clean-infojson` or `--compat-options no-clean-infojson` to revert this
|
||||||
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
* When `--embed-subs` and `--write-subs` are used together, the subtitles are written to disk and also embedded in the media file. You can use just `--embed-subs` to embed the subs and automatically delete the separate file. See [#630 (comment)](https://github.com/yt-dlp/yt-dlp/issues/630#issuecomment-893659460) for more info. `--compat-options no-keep-subs` can be used to revert this
|
||||||
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
* `certifi` will be used for SSL root certificates, if installed. If you want to use system certificates (e.g. self-signed), use `--compat-options no-certifi`
|
||||||
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
* yt-dlp's sanitization of invalid characters in filenames is different/smarter than in youtube-dl. You can use `--compat-options filename-sanitization` to revert to youtube-dl's behavior
|
||||||
|
@ -251,7 +251,7 @@ gpg --verify SHA2-512SUMS.sig SHA2-512SUMS
|
||||||
```
|
```
|
||||||
<!-- MANPAGE: END EXCLUDED SECTION -->
|
<!-- MANPAGE: END EXCLUDED SECTION -->
|
||||||
|
|
||||||
**Note**: The manpages, shell completion files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
**Note**: The manpages, shell completion (autocomplete) files etc. are available inside the [source tarball](https://github.com/yt-dlp/yt-dlp/releases/latest/download/yt-dlp.tar.gz)
|
||||||
|
|
||||||
## DEPENDENCIES
|
## DEPENDENCIES
|
||||||
Python versions 3.7+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
Python versions 3.7+ (CPython and PyPy) are supported. Other versions and implementations may or may not work correctly.
|
||||||
|
@ -699,9 +699,8 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
||||||
--write-description etc. (default)
|
--write-description etc. (default)
|
||||||
--no-write-playlist-metafiles Do not write playlist metadata when using
|
--no-write-playlist-metafiles Do not write playlist metadata when using
|
||||||
--write-info-json, --write-description etc.
|
--write-info-json, --write-description etc.
|
||||||
--clean-info-json Remove some private fields such as filenames
|
--clean-info-json Remove some internal metadata such as
|
||||||
from the infojson. Note that it could still
|
filenames from the infojson (default)
|
||||||
contain some personal information (default)
|
|
||||||
--no-clean-info-json Write all fields to the infojson
|
--no-clean-info-json Write all fields to the infojson
|
||||||
--write-comments Retrieve video comments to be placed in the
|
--write-comments Retrieve video comments to be placed in the
|
||||||
infojson. The comments are fetched even
|
infojson. The comments are fetched even
|
||||||
|
@ -1041,13 +1040,10 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git
|
||||||
that of --use-postprocessor (default:
|
that of --use-postprocessor (default:
|
||||||
after_move). Same syntax as the output
|
after_move). Same syntax as the output
|
||||||
template can be used to pass any field as
|
template can be used to pass any field as
|
||||||
arguments to the command. After download, an
|
arguments to the command. If no fields are
|
||||||
additional field "filepath" that contains
|
passed, %(filepath,_filename|)q is appended
|
||||||
the final path of the downloaded file is
|
to the end of the command. This option can
|
||||||
also available, and if no fields are passed,
|
be used multiple times
|
||||||
%(filepath,_filename|)q is appended to the
|
|
||||||
end of the command. This option can be used
|
|
||||||
multiple times
|
|
||||||
--no-exec Remove any previously defined --exec
|
--no-exec Remove any previously defined --exec
|
||||||
--convert-subs FORMAT Convert the subtitles to another format
|
--convert-subs FORMAT Convert the subtitles to another format
|
||||||
(currently supported: ass, lrc, srt, vtt)
|
(currently supported: ass, lrc, srt, vtt)
|
||||||
|
@ -1225,8 +1221,7 @@ To activate authentication with the `.netrc` file you should pass `--netrc` to y
|
||||||
|
|
||||||
The default location of the .netrc file is `~` (see below).
|
The default location of the .netrc file is `~` (see below).
|
||||||
|
|
||||||
As an alternative to using the `.netrc` file, which has the disadvantage of keeping your passwords in a plain text file, you can configure a custom shell command to provide the credentials for an extractor. This is done by providing the `--netrc-cmd` parameter, it shall output the credentials in the netrc format and return `0` on success, other values will be treated as an error. `{}` in the command will be replaced by the name of the extractor to make it possible to select the credentials for the right extractor.
|
As an alternative to using the `.netrc` file, which has the disadvantage of keeping your passwords in a plain text file, you can configure a custom shell command to provide the credentials for an extractor. This is done by providing the `--netrc-cmd` parameter, it shall output the credentials in the netrc format and return `0` on success, other values will be treated as an error. `{}` in the command will be replaced by the name of the extractor to make it possible to select the credentials for the right extractor (To use literal braces, double them like `{{}}`).
|
||||||
To use braces in the command, they need to be escaped by doubling them. (see example bellow)
|
|
||||||
|
|
||||||
E.g. To use an encrypted `.netrc` file stored as `.authinfo.gpg`
|
E.g. To use an encrypted `.netrc` file stored as `.authinfo.gpg`
|
||||||
```
|
```
|
||||||
|
@ -1389,7 +1384,10 @@ Available only when used in `--print`:
|
||||||
- `subtitles_table` (table): The subtitle format table as printed by `--list-subs`
|
- `subtitles_table` (table): The subtitle format table as printed by `--list-subs`
|
||||||
- `automatic_captions_table` (table): The automatic subtitle format table as printed by `--list-subs`
|
- `automatic_captions_table` (table): The automatic subtitle format table as printed by `--list-subs`
|
||||||
|
|
||||||
|
Available only after the video is downloaded (`post_process`/`after_move`):
|
||||||
|
|
||||||
|
- `filepath`: Actual path of downloaded video file
|
||||||
|
|
||||||
Available only in `--sponsorblock-chapter-title`:
|
Available only in `--sponsorblock-chapter-title`:
|
||||||
|
|
||||||
- `start_time` (numeric): Start time of the chapter in seconds
|
- `start_time` (numeric): Start time of the chapter in seconds
|
||||||
|
@ -1435,7 +1433,7 @@ $ yt-dlp -o "%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.y
|
||||||
$ yt-dlp -o "%(upload_date>%Y)s/%(title)s.%(ext)s" "https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re"
|
$ yt-dlp -o "%(upload_date>%Y)s/%(title)s.%(ext)s" "https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re"
|
||||||
|
|
||||||
# Prefix playlist index with " - " separator, but only if it is available
|
# Prefix playlist index with " - " separator, but only if it is available
|
||||||
$ yt-dlp -o '%(playlist_index|)s%(playlist_index& - |)s%(title)s.%(ext)s' BaW_jenozKc "https://www.youtube.com/user/TheLinuxFoundation/playlists"
|
$ yt-dlp -o "%(playlist_index&{} - |)s%(title)s.%(ext)s" BaW_jenozKc "https://www.youtube.com/user/TheLinuxFoundation/playlists"
|
||||||
|
|
||||||
# Download all playlists of YouTube channel/user keeping each playlist in separate directory:
|
# Download all playlists of YouTube channel/user keeping each playlist in separate directory:
|
||||||
$ yt-dlp -o "%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.youtube.com/user/TheLinuxFoundation/playlists"
|
$ yt-dlp -o "%(uploader)s/%(playlist)s/%(playlist_index)s - %(title)s.%(ext)s" "https://www.youtube.com/user/TheLinuxFoundation/playlists"
|
||||||
|
|
|
@ -8,5 +8,32 @@
|
||||||
"action": "add",
|
"action": "add",
|
||||||
"when": "776d1c3f0c9b00399896dd2e40e78e9a43218109",
|
"when": "776d1c3f0c9b00399896dd2e40e78e9a43218109",
|
||||||
"short": "[priority] **YouTube throttling fixes!**"
|
"short": "[priority] **YouTube throttling fixes!**"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "remove",
|
||||||
|
"when": "2e023649ea4e11151545a34dc1360c114981a236"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "add",
|
||||||
|
"when": "01aba2519a0884ef17d5f85608dbd2a455577147",
|
||||||
|
"short": "[priority] YouTube: Improved throttling and signature fixes"
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "c86e433c35fe5da6cb29f3539eef97497f84ed38",
|
||||||
|
"short": "[extractor/niconico:series] Fix extraction (#6898)",
|
||||||
|
"authors": ["sqrtNOT"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "69a40e4a7f6caa5662527ebd2f3c4e8aa02857a2",
|
||||||
|
"short": "[extractor/youtube:music_search_url] Extract title (#7102)",
|
||||||
|
"authors": ["kangalio"]
|
||||||
|
},
|
||||||
|
{
|
||||||
|
"action": "change",
|
||||||
|
"when": "8417f26b8a819cd7ffcd4e000ca3e45033e670fb",
|
||||||
|
"short": "Add option `--color` (#6904)",
|
||||||
|
"authors": ["Grub4K"]
|
||||||
}
|
}
|
||||||
]
|
]
|
||||||
|
|
|
@ -19,11 +19,11 @@ def parse_patched_options(opts):
|
||||||
'extract_flat': False,
|
'extract_flat': False,
|
||||||
'concat_playlist': 'never',
|
'concat_playlist': 'never',
|
||||||
})
|
})
|
||||||
yt_dlp.options.__dict__['create_parser'] = lambda: patched_parser
|
yt_dlp.options.create_parser = lambda: patched_parser
|
||||||
try:
|
try:
|
||||||
return yt_dlp.parse_options(opts)
|
return yt_dlp.parse_options(opts)
|
||||||
finally:
|
finally:
|
||||||
yt_dlp.options.__dict__['create_parser'] = create_parser
|
yt_dlp.options.create_parser = create_parser
|
||||||
|
|
||||||
|
|
||||||
default_opts = parse_patched_options([]).ydl_opts
|
default_opts = parse_patched_options([]).ydl_opts
|
||||||
|
|
|
@ -44,7 +44,7 @@ class CommitGroup(enum.Enum):
|
||||||
return {
|
return {
|
||||||
name: group
|
name: group
|
||||||
for group, names in {
|
for group, names in {
|
||||||
cls.PRIORITY: {''},
|
cls.PRIORITY: {'priority'},
|
||||||
cls.CORE: {
|
cls.CORE: {
|
||||||
'aes',
|
'aes',
|
||||||
'cache',
|
'cache',
|
||||||
|
@ -68,7 +68,7 @@ class CommitGroup(enum.Enum):
|
||||||
'misc',
|
'misc',
|
||||||
'test',
|
'test',
|
||||||
},
|
},
|
||||||
cls.EXTRACTOR: {'extractor', 'extractors'},
|
cls.EXTRACTOR: {'extractor'},
|
||||||
cls.DOWNLOADER: {'downloader'},
|
cls.DOWNLOADER: {'downloader'},
|
||||||
cls.POSTPROCESSOR: {'postprocessor'},
|
cls.POSTPROCESSOR: {'postprocessor'},
|
||||||
}.items()
|
}.items()
|
||||||
|
@ -323,7 +323,7 @@ class CommitRange:
|
||||||
logger.debug(f'Ignored {when!r}, not in commits {self._start!r}')
|
logger.debug(f'Ignored {when!r}, not in commits {self._start!r}')
|
||||||
continue
|
continue
|
||||||
|
|
||||||
override_hash = override.get('hash')
|
override_hash = override.get('hash') or when
|
||||||
if override['action'] == 'add':
|
if override['action'] == 'add':
|
||||||
commit = Commit(override.get('hash'), override['short'], override.get('authors') or [])
|
commit = Commit(override.get('hash'), override['short'], override.get('authors') or [])
|
||||||
logger.info(f'ADD {commit}')
|
logger.info(f'ADD {commit}')
|
||||||
|
@ -337,7 +337,7 @@ class CommitRange:
|
||||||
elif override['action'] == 'change':
|
elif override['action'] == 'change':
|
||||||
if override_hash not in self._commits:
|
if override_hash not in self._commits:
|
||||||
continue
|
continue
|
||||||
commit = Commit(override_hash, override['short'], override['authors'])
|
commit = Commit(override_hash, override['short'], override.get('authors') or [])
|
||||||
logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}')
|
logger.info(f'CHANGE {self._commits[commit.hash]} -> {commit}')
|
||||||
self._commits[commit.hash] = commit
|
self._commits[commit.hash] = commit
|
||||||
|
|
||||||
|
@ -348,7 +348,7 @@ class CommitRange:
|
||||||
for commit in self:
|
for commit in self:
|
||||||
upstream_re = self.UPSTREAM_MERGE_RE.search(commit.short)
|
upstream_re = self.UPSTREAM_MERGE_RE.search(commit.short)
|
||||||
if upstream_re:
|
if upstream_re:
|
||||||
commit.short = f'[upstream] Merged with youtube-dl {upstream_re.group(1)}'
|
commit.short = f'[core/upstream] Merged with youtube-dl {upstream_re.group(1)}'
|
||||||
|
|
||||||
match = self.MESSAGE_RE.fullmatch(commit.short)
|
match = self.MESSAGE_RE.fullmatch(commit.short)
|
||||||
if not match:
|
if not match:
|
||||||
|
@ -394,10 +394,10 @@ class CommitRange:
|
||||||
return CommitGroup.CORE, None, ()
|
return CommitGroup.CORE, None, ()
|
||||||
|
|
||||||
prefix, _, details = prefix.partition('/')
|
prefix, _, details = prefix.partition('/')
|
||||||
prefix = prefix.strip().lower()
|
prefix = prefix.strip()
|
||||||
details = details.strip()
|
details = details.strip()
|
||||||
|
|
||||||
group = CommitGroup.get(prefix)
|
group = CommitGroup.get(prefix.lower())
|
||||||
if group is CommitGroup.PRIORITY:
|
if group is CommitGroup.PRIORITY:
|
||||||
prefix, _, details = details.partition('/')
|
prefix, _, details = details.partition('/')
|
||||||
|
|
||||||
|
|
|
@ -668,7 +668,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||||
for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected):
|
for (name, got), expect in zip((('outtmpl', out), ('filename', fname)), expected):
|
||||||
if callable(expect):
|
if callable(expect):
|
||||||
self.assertTrue(expect(got), f'Wrong {name} from {tmpl}')
|
self.assertTrue(expect(got), f'Wrong {name} from {tmpl}')
|
||||||
else:
|
elif expect is not None:
|
||||||
self.assertEqual(got, expect, f'Wrong {name} from {tmpl}')
|
self.assertEqual(got, expect, f'Wrong {name} from {tmpl}')
|
||||||
|
|
||||||
# Side-effects
|
# Side-effects
|
||||||
|
@ -759,15 +759,17 @@ class TestYoutubeDL(unittest.TestCase):
|
||||||
test('a%(width|b)d', 'ab', outtmpl_na_placeholder='none')
|
test('a%(width|b)d', 'ab', outtmpl_na_placeholder='none')
|
||||||
|
|
||||||
FORMATS = self.outtmpl_info['formats']
|
FORMATS = self.outtmpl_info['formats']
|
||||||
sanitize = lambda x: x.replace(':', ':').replace('"', """).replace('\n', ' ')
|
|
||||||
|
|
||||||
# Custom type casting
|
# Custom type casting
|
||||||
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
test('%(formats.:.id)l', 'id 1, id 2, id 3')
|
||||||
test('%(formats.:.id)#l', ('id 1\nid 2\nid 3', 'id 1 id 2 id 3'))
|
test('%(formats.:.id)#l', ('id 1\nid 2\nid 3', 'id 1 id 2 id 3'))
|
||||||
test('%(ext)l', 'mp4')
|
test('%(ext)l', 'mp4')
|
||||||
test('%(formats.:.id) 18l', ' id 1, id 2, id 3')
|
test('%(formats.:.id) 18l', ' id 1, id 2, id 3')
|
||||||
test('%(formats)j', (json.dumps(FORMATS), sanitize(json.dumps(FORMATS))))
|
test('%(formats)j', (json.dumps(FORMATS), None))
|
||||||
test('%(formats)#j', (json.dumps(FORMATS, indent=4), sanitize(json.dumps(FORMATS, indent=4))))
|
test('%(formats)#j', (
|
||||||
|
json.dumps(FORMATS, indent=4),
|
||||||
|
json.dumps(FORMATS, indent=4).replace(':', ':').replace('"', """).replace('\n', ' ')
|
||||||
|
))
|
||||||
test('%(title5).3B', 'á')
|
test('%(title5).3B', 'á')
|
||||||
test('%(title5)U', 'áéí 𝐀')
|
test('%(title5)U', 'áéí 𝐀')
|
||||||
test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀')
|
test('%(title5)#U', 'a\u0301e\u0301i\u0301 𝐀')
|
||||||
|
@ -792,8 +794,8 @@ class TestYoutubeDL(unittest.TestCase):
|
||||||
test('%(title|%)s %(title|%%)s', '% %%')
|
test('%(title|%)s %(title|%%)s', '% %%')
|
||||||
test('%(id+1-height+3)05d', '00158')
|
test('%(id+1-height+3)05d', '00158')
|
||||||
test('%(width+100)05d', 'NA')
|
test('%(width+100)05d', 'NA')
|
||||||
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], '% 15s' % sanitize(str(FORMATS[0]))))
|
test('%(formats.0) 15s', ('% 15s' % FORMATS[0], None))
|
||||||
test('%(formats.0)r', (repr(FORMATS[0]), sanitize(repr(FORMATS[0]))))
|
test('%(formats.0)r', (repr(FORMATS[0]), None))
|
||||||
test('%(height.0)03d', '001')
|
test('%(height.0)03d', '001')
|
||||||
test('%(-height.0)04d', '-001')
|
test('%(-height.0)04d', '-001')
|
||||||
test('%(formats.-1.id)s', FORMATS[-1]['id'])
|
test('%(formats.-1.id)s', FORMATS[-1]['id'])
|
||||||
|
@ -805,7 +807,7 @@ class TestYoutubeDL(unittest.TestCase):
|
||||||
out = json.dumps([{'id': f['id'], 'height.:2': str(f['height'])[:2]}
|
out = json.dumps([{'id': f['id'], 'height.:2': str(f['height'])[:2]}
|
||||||
if 'height' in f else {'id': f['id']}
|
if 'height' in f else {'id': f['id']}
|
||||||
for f in FORMATS])
|
for f in FORMATS])
|
||||||
test('%(formats.:.{id,height.:2})j', (out, sanitize(out)))
|
test('%(formats.:.{id,height.:2})j', (out, None))
|
||||||
test('%(formats.:.{id,height}.id)l', ', '.join(f['id'] for f in FORMATS))
|
test('%(formats.:.{id,height}.id)l', ', '.join(f['id'] for f in FORMATS))
|
||||||
test('%(.{id,title})j', ('{"id": "1234"}', '{"id": "1234"}'))
|
test('%(.{id,title})j', ('{"id": "1234"}', '{"id": "1234"}'))
|
||||||
|
|
||||||
|
|
|
@ -12,28 +12,38 @@ import math
|
||||||
from yt_dlp.jsinterp import JS_Undefined, JSInterpreter
|
from yt_dlp.jsinterp import JS_Undefined, JSInterpreter
|
||||||
|
|
||||||
|
|
||||||
|
class NaN:
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class TestJSInterpreter(unittest.TestCase):
|
class TestJSInterpreter(unittest.TestCase):
|
||||||
def _test(self, code, ret, func='f', args=()):
|
def _test(self, jsi_or_code, expected, func='f', args=()):
|
||||||
self.assertEqual(JSInterpreter(code).call_function(func, *args), ret)
|
if isinstance(jsi_or_code, str):
|
||||||
|
jsi_or_code = JSInterpreter(jsi_or_code)
|
||||||
|
got = jsi_or_code.call_function(func, *args)
|
||||||
|
if expected is NaN:
|
||||||
|
self.assertTrue(math.isnan(got), f'{got} is not NaN')
|
||||||
|
else:
|
||||||
|
self.assertEqual(got, expected)
|
||||||
|
|
||||||
def test_basic(self):
|
def test_basic(self):
|
||||||
jsi = JSInterpreter('function f(){;}')
|
jsi = JSInterpreter('function f(){;}')
|
||||||
self.assertEqual(repr(jsi.extract_function('f')), 'F<f>')
|
self.assertEqual(repr(jsi.extract_function('f')), 'F<f>')
|
||||||
self.assertEqual(jsi.call_function('f'), None)
|
self._test(jsi, None)
|
||||||
|
|
||||||
self._test('function f(){return 42;}', 42)
|
self._test('function f(){return 42;}', 42)
|
||||||
self._test('function f(){42}', None)
|
self._test('function f(){42}', None)
|
||||||
self._test('var f = function(){return 42;}', 42)
|
self._test('var f = function(){return 42;}', 42)
|
||||||
|
|
||||||
def test_calc(self):
|
|
||||||
self._test('function f(a){return 2*a+1;}', 7, args=[3])
|
|
||||||
|
|
||||||
def test_div(self):
|
def test_div(self):
|
||||||
jsi = JSInterpreter('function f(a, b){return a / b;}')
|
jsi = JSInterpreter('function f(a, b){return a / b;}')
|
||||||
self.assertTrue(math.isnan(jsi.call_function('f', 0, 0)))
|
self._test(jsi, NaN, args=(0, 0))
|
||||||
self.assertTrue(math.isnan(jsi.call_function('f', JS_Undefined, 1)))
|
self._test(jsi, NaN, args=(JS_Undefined, 1))
|
||||||
self.assertTrue(math.isinf(jsi.call_function('f', 2, 0)))
|
self._test(jsi, float('inf'), args=(2, 0))
|
||||||
self.assertEqual(jsi.call_function('f', 0, 3), 0)
|
self._test(jsi, 0, args=(0, 3))
|
||||||
|
|
||||||
|
def test_calc(self):
|
||||||
|
self._test('function f(a){return 2*a+1;}', 7, args=[3])
|
||||||
|
|
||||||
def test_empty_return(self):
|
def test_empty_return(self):
|
||||||
self._test('function f(){return; y()}', None)
|
self._test('function f(){return; y()}', None)
|
||||||
|
@ -102,16 +112,15 @@ class TestJSInterpreter(unittest.TestCase):
|
||||||
''', [20, 20, 30, 40, 50])
|
''', [20, 20, 30, 40, 50])
|
||||||
|
|
||||||
def test_builtins(self):
|
def test_builtins(self):
|
||||||
jsi = JSInterpreter('function f() { return NaN }')
|
self._test('function f() { return NaN }', NaN)
|
||||||
self.assertTrue(math.isnan(jsi.call_function('f')))
|
|
||||||
|
|
||||||
def test_date(self):
|
def test_date(self):
|
||||||
self._test('function f() { return new Date("Wednesday 31 December 1969 18:01:26 MDT") - 0; }', 86000)
|
self._test('function f() { return new Date("Wednesday 31 December 1969 18:01:26 MDT") - 0; }', 86000)
|
||||||
|
|
||||||
jsi = JSInterpreter('function f(dt) { return new Date(dt) - 0; }')
|
jsi = JSInterpreter('function f(dt) { return new Date(dt) - 0; }')
|
||||||
self.assertEqual(jsi.call_function('f', 'Wednesday 31 December 1969 18:01:26 MDT'), 86000)
|
self._test(jsi, 86000, args=['Wednesday 31 December 1969 18:01:26 MDT'])
|
||||||
self.assertEqual(jsi.call_function('f', '12/31/1969 18:01:26 MDT'), 86000) # m/d/y
|
self._test(jsi, 86000, args=['12/31/1969 18:01:26 MDT']) # m/d/y
|
||||||
self.assertEqual(jsi.call_function('f', '1 January 1970 00:00:00 UTC'), 0)
|
self._test(jsi, 0, args=['1 January 1970 00:00:00 UTC'])
|
||||||
|
|
||||||
def test_call(self):
|
def test_call(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
|
@ -119,8 +128,8 @@ class TestJSInterpreter(unittest.TestCase):
|
||||||
function y(a) { return x() + (a?a:0); }
|
function y(a) { return x() + (a?a:0); }
|
||||||
function z() { return y(3); }
|
function z() { return y(3); }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('z'), 5)
|
self._test(jsi, 5, func='z')
|
||||||
self.assertEqual(jsi.call_function('y'), 2)
|
self._test(jsi, 2, func='y')
|
||||||
|
|
||||||
def test_if(self):
|
def test_if(self):
|
||||||
self._test('''
|
self._test('''
|
||||||
|
@ -167,9 +176,9 @@ class TestJSInterpreter(unittest.TestCase):
|
||||||
default:x=0;
|
default:x=0;
|
||||||
} return x }
|
} return x }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('f', 1), 7)
|
self._test(jsi, 7, args=[1])
|
||||||
self.assertEqual(jsi.call_function('f', 3), 6)
|
self._test(jsi, 6, args=[3])
|
||||||
self.assertEqual(jsi.call_function('f', 5), 0)
|
self._test(jsi, 0, args=[5])
|
||||||
|
|
||||||
def test_switch_default(self):
|
def test_switch_default(self):
|
||||||
jsi = JSInterpreter('''
|
jsi = JSInterpreter('''
|
||||||
|
@ -182,9 +191,9 @@ class TestJSInterpreter(unittest.TestCase):
|
||||||
case 1: x+=1;
|
case 1: x+=1;
|
||||||
} return x }
|
} return x }
|
||||||
''')
|
''')
|
||||||
self.assertEqual(jsi.call_function('f', 1), 2)
|
self._test(jsi, 2, args=[1])
|
||||||
self.assertEqual(jsi.call_function('f', 5), 11)
|
self._test(jsi, 11, args=[5])
|
||||||
self.assertEqual(jsi.call_function('f', 9), 14)
|
self._test(jsi, 14, args=[9])
|
||||||
|
|
||||||
def test_try(self):
|
def test_try(self):
|
||||||
self._test('function f() { try{return 10} catch(e){return 5} }', 10)
|
self._test('function f() { try{return 10} catch(e){return 5} }', 10)
|
||||||
|
@ -312,12 +321,12 @@ class TestJSInterpreter(unittest.TestCase):
|
||||||
|
|
||||||
def test_char_code_at(self):
|
def test_char_code_at(self):
|
||||||
jsi = JSInterpreter('function f(i){return "test".charCodeAt(i)}')
|
jsi = JSInterpreter('function f(i){return "test".charCodeAt(i)}')
|
||||||
self.assertEqual(jsi.call_function('f', 0), 116)
|
self._test(jsi, 116, args=[0])
|
||||||
self.assertEqual(jsi.call_function('f', 1), 101)
|
self._test(jsi, 101, args=[1])
|
||||||
self.assertEqual(jsi.call_function('f', 2), 115)
|
self._test(jsi, 115, args=[2])
|
||||||
self.assertEqual(jsi.call_function('f', 3), 116)
|
self._test(jsi, 116, args=[3])
|
||||||
self.assertEqual(jsi.call_function('f', 4), None)
|
self._test(jsi, None, args=[4])
|
||||||
self.assertEqual(jsi.call_function('f', 'not_a_number'), 116)
|
self._test(jsi, 116, args=['not_a_number'])
|
||||||
|
|
||||||
def test_bitwise_operators_overflow(self):
|
def test_bitwise_operators_overflow(self):
|
||||||
self._test('function f(){return -524999584 << 5}', 379882496)
|
self._test('function f(){return -524999584 << 5}', 379882496)
|
||||||
|
|
|
@ -67,7 +67,7 @@ _SIG_TESTS = [
|
||||||
'https://www.youtube.com/s/player/6ed0d907/player_ias.vflset/en_US/base.js',
|
'https://www.youtube.com/s/player/6ed0d907/player_ias.vflset/en_US/base.js',
|
||||||
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
|
'2aq0aqSyOoJXtK73m-uME_jv7-pT15gOFC02RFkGMqWpzEICs69VdbwQ0LDp1v7j8xx92efCJlYFYb1sUkkBSPOlPmXgIARw8JQ0qOAOAA',
|
||||||
'AOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL2QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
|
'AOq0QJ8wRAIgXmPlOPSBkkUs1bYFYlJCfe29xx8j7v1pDL2QwbdV96sCIEzpWqMGkFR20CFOg51Tp-7vj_EMu-m37KtXJoOySqa0',
|
||||||
)
|
),
|
||||||
]
|
]
|
||||||
|
|
||||||
_NSIG_TESTS = [
|
_NSIG_TESTS = [
|
||||||
|
|
|
@ -259,7 +259,7 @@ class YoutubeDL:
|
||||||
consoletitle: Display progress in console window's titlebar.
|
consoletitle: Display progress in console window's titlebar.
|
||||||
writedescription: Write the video description to a .description file
|
writedescription: Write the video description to a .description file
|
||||||
writeinfojson: Write the video description to a .info.json file
|
writeinfojson: Write the video description to a .info.json file
|
||||||
clean_infojson: Remove private fields from the infojson
|
clean_infojson: Remove internal metadata from the infojson
|
||||||
getcomments: Extract video comments. This will not be written to disk
|
getcomments: Extract video comments. This will not be written to disk
|
||||||
unless writeinfojson is also given
|
unless writeinfojson is also given
|
||||||
writeannotations: Write the video annotations to a .annotations.xml file
|
writeannotations: Write the video annotations to a .annotations.xml file
|
||||||
|
@ -1902,7 +1902,7 @@ class YoutubeDL:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
|
entry['__x_forwarded_for_ip'] = ie_result.get('__x_forwarded_for_ip')
|
||||||
if not lazy and 'playlist-index' in self.params.get('compat_opts', []):
|
if not lazy and 'playlist-index' in self.params['compat_opts']:
|
||||||
playlist_index = ie_result['requested_entries'][i]
|
playlist_index = ie_result['requested_entries'][i]
|
||||||
|
|
||||||
entry_copy = collections.ChainMap(entry, {
|
entry_copy = collections.ChainMap(entry, {
|
||||||
|
@ -2959,8 +2959,7 @@ class YoutubeDL:
|
||||||
print_field('url', 'urls')
|
print_field('url', 'urls')
|
||||||
print_field('thumbnail', optional=True)
|
print_field('thumbnail', optional=True)
|
||||||
print_field('description', optional=True)
|
print_field('description', optional=True)
|
||||||
if filename:
|
print_field('filename')
|
||||||
print_field('filename')
|
|
||||||
if self.params.get('forceduration') and info_copy.get('duration') is not None:
|
if self.params.get('forceduration') and info_copy.get('duration') is not None:
|
||||||
self.to_stdout(formatSeconds(info_copy['duration']))
|
self.to_stdout(formatSeconds(info_copy['duration']))
|
||||||
print_field('format')
|
print_field('format')
|
||||||
|
@ -3185,7 +3184,6 @@ class YoutubeDL:
|
||||||
return
|
return
|
||||||
|
|
||||||
if info_dict.get('requested_formats') is not None:
|
if info_dict.get('requested_formats') is not None:
|
||||||
requested_formats = info_dict['requested_formats']
|
|
||||||
old_ext = info_dict['ext']
|
old_ext = info_dict['ext']
|
||||||
if self.params.get('merge_output_format') is None:
|
if self.params.get('merge_output_format') is None:
|
||||||
if (info_dict['ext'] == 'webm'
|
if (info_dict['ext'] == 'webm'
|
||||||
|
@ -3212,6 +3210,7 @@ class YoutubeDL:
|
||||||
full_filename = correct_ext(full_filename)
|
full_filename = correct_ext(full_filename)
|
||||||
temp_filename = correct_ext(temp_filename)
|
temp_filename = correct_ext(temp_filename)
|
||||||
dl_filename = existing_video_file(full_filename, temp_filename)
|
dl_filename = existing_video_file(full_filename, temp_filename)
|
||||||
|
|
||||||
info_dict['__real_download'] = False
|
info_dict['__real_download'] = False
|
||||||
|
|
||||||
merger = FFmpegMergerPP(self)
|
merger = FFmpegMergerPP(self)
|
||||||
|
@ -3219,12 +3218,12 @@ class YoutubeDL:
|
||||||
if dl_filename is not None:
|
if dl_filename is not None:
|
||||||
self.report_file_already_downloaded(dl_filename)
|
self.report_file_already_downloaded(dl_filename)
|
||||||
elif fd:
|
elif fd:
|
||||||
for f in requested_formats if fd != FFmpegFD else []:
|
for f in info_dict['requested_formats'] if fd != FFmpegFD else []:
|
||||||
f['filepath'] = fname = prepend_extension(
|
f['filepath'] = fname = prepend_extension(
|
||||||
correct_ext(temp_filename, info_dict['ext']),
|
correct_ext(temp_filename, info_dict['ext']),
|
||||||
'f%s' % f['format_id'], info_dict['ext'])
|
'f%s' % f['format_id'], info_dict['ext'])
|
||||||
downloaded.append(fname)
|
downloaded.append(fname)
|
||||||
info_dict['url'] = '\n'.join(f['url'] for f in requested_formats)
|
info_dict['url'] = '\n'.join(f['url'] for f in info_dict['requested_formats'])
|
||||||
success, real_download = self.dl(temp_filename, info_dict)
|
success, real_download = self.dl(temp_filename, info_dict)
|
||||||
info_dict['__real_download'] = real_download
|
info_dict['__real_download'] = real_download
|
||||||
else:
|
else:
|
||||||
|
@ -3248,7 +3247,7 @@ class YoutubeDL:
|
||||||
f'You have requested downloading multiple formats to stdout {reason}. '
|
f'You have requested downloading multiple formats to stdout {reason}. '
|
||||||
'The formats will be streamed one after the other')
|
'The formats will be streamed one after the other')
|
||||||
fname = temp_filename
|
fname = temp_filename
|
||||||
for f in requested_formats:
|
for f in info_dict['requested_formats']:
|
||||||
new_info = dict(info_dict)
|
new_info = dict(info_dict)
|
||||||
del new_info['requested_formats']
|
del new_info['requested_formats']
|
||||||
new_info.update(f)
|
new_info.update(f)
|
||||||
|
@ -4109,8 +4108,11 @@ class YoutubeDL:
|
||||||
ret.append((thumb_filename, thumb_filename_final))
|
ret.append((thumb_filename, thumb_filename_final))
|
||||||
t['filepath'] = thumb_filename
|
t['filepath'] = thumb_filename
|
||||||
except network_exceptions as err:
|
except network_exceptions as err:
|
||||||
|
if isinstance(err, urllib.error.HTTPError) and err.code == 404:
|
||||||
|
self.to_screen(f'[info] {thumb_display_id.title()} does not exist')
|
||||||
|
else:
|
||||||
|
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
|
||||||
thumbnails.pop(idx)
|
thumbnails.pop(idx)
|
||||||
self.report_warning(f'Unable to download {thumb_display_id}: {err}')
|
|
||||||
if ret and not write_all:
|
if ret and not write_all:
|
||||||
break
|
break
|
||||||
return ret
|
return ret
|
||||||
|
|
|
@ -1326,3 +1326,7 @@ class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar):
|
||||||
cookie_req = urllib.request.Request(escape_url(sanitize_url(url)))
|
cookie_req = urllib.request.Request(escape_url(sanitize_url(url)))
|
||||||
self.add_cookie_header(cookie_req)
|
self.add_cookie_header(cookie_req)
|
||||||
return cookie_req.get_header('Cookie')
|
return cookie_req.get_header('Cookie')
|
||||||
|
|
||||||
|
def clear(self, *args, **kwargs):
|
||||||
|
with contextlib.suppress(KeyError):
|
||||||
|
return super().clear(*args, **kwargs)
|
||||||
|
|
|
@ -49,7 +49,6 @@ class FileDownloader:
|
||||||
verbose: Print additional info to stdout.
|
verbose: Print additional info to stdout.
|
||||||
quiet: Do not print messages to stdout.
|
quiet: Do not print messages to stdout.
|
||||||
ratelimit: Download speed limit, in bytes/sec.
|
ratelimit: Download speed limit, in bytes/sec.
|
||||||
continuedl: Attempt to continue downloads if possible
|
|
||||||
throttledratelimit: Assume the download is being throttled below this speed (bytes/sec)
|
throttledratelimit: Assume the download is being throttled below this speed (bytes/sec)
|
||||||
retries: Number of times to retry for expected network errors.
|
retries: Number of times to retry for expected network errors.
|
||||||
Default is 0 for API, but 10 for CLI
|
Default is 0 for API, but 10 for CLI
|
||||||
|
|
|
@ -7,9 +7,9 @@ from .common import FileDownloader
|
||||||
from .external import FFmpegFD
|
from .external import FFmpegFD
|
||||||
from ..utils import (
|
from ..utils import (
|
||||||
DownloadError,
|
DownloadError,
|
||||||
str_or_none,
|
|
||||||
sanitized_Request,
|
|
||||||
WebSocketsWrapper,
|
WebSocketsWrapper,
|
||||||
|
sanitized_Request,
|
||||||
|
str_or_none,
|
||||||
try_get,
|
try_get,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -49,7 +49,7 @@ class CiscoWebexIE(InfoExtractor):
|
||||||
'https://%s.webex.com/webappng/api/v1/recordings/%s/stream' % (subdomain, video_id),
|
'https://%s.webex.com/webappng/api/v1/recordings/%s/stream' % (subdomain, video_id),
|
||||||
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
|
video_id, headers=headers, query={'siteurl': siteurl}, expected_status=(403, 429))
|
||||||
|
|
||||||
if urlh.status == 403:
|
if urlh.getcode() == 403:
|
||||||
if stream['code'] == 53004:
|
if stream['code'] == 53004:
|
||||||
self.raise_login_required()
|
self.raise_login_required()
|
||||||
if stream['code'] == 53005:
|
if stream['code'] == 53005:
|
||||||
|
@ -59,7 +59,7 @@ class CiscoWebexIE(InfoExtractor):
|
||||||
'This video is protected by a password, use the --video-password option', expected=True)
|
'This video is protected by a password, use the --video-password option', expected=True)
|
||||||
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
|
raise ExtractorError(f'{self.IE_NAME} said: {stream["code"]} - {stream["message"]}', expected=True)
|
||||||
|
|
||||||
if urlh.status == 429:
|
if urlh.getcode() == 429:
|
||||||
self.raise_login_required(
|
self.raise_login_required(
|
||||||
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
|
f'{self.IE_NAME} asks you to solve a CAPTCHA. Solve CAPTCHA in browser and',
|
||||||
method='cookies')
|
method='cookies')
|
||||||
|
|
|
@ -17,6 +17,7 @@ import subprocess
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
import types
|
import types
|
||||||
|
import urllib.error
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import urllib.request
|
import urllib.request
|
||||||
import xml.etree.ElementTree
|
import xml.etree.ElementTree
|
||||||
|
@ -58,6 +59,7 @@ from ..utils import (
|
||||||
join_nonempty,
|
join_nonempty,
|
||||||
js_to_json,
|
js_to_json,
|
||||||
mimetype2ext,
|
mimetype2ext,
|
||||||
|
netrc_from_content,
|
||||||
network_exceptions,
|
network_exceptions,
|
||||||
orderedSet,
|
orderedSet,
|
||||||
parse_bitrate,
|
parse_bitrate,
|
||||||
|
@ -72,7 +74,6 @@ from ..utils import (
|
||||||
smuggle_url,
|
smuggle_url,
|
||||||
str_or_none,
|
str_or_none,
|
||||||
str_to_int,
|
str_to_int,
|
||||||
netrc_from_content,
|
|
||||||
strip_or_none,
|
strip_or_none,
|
||||||
traverse_obj,
|
traverse_obj,
|
||||||
truncate_string,
|
truncate_string,
|
||||||
|
|
0
yt_dlp/extractor/dumpert.py
Executable file → Normal file
0
yt_dlp/extractor/dumpert.py
Executable file → Normal file
0
yt_dlp/extractor/globalplayer.py
Executable file → Normal file
0
yt_dlp/extractor/globalplayer.py
Executable file → Normal file
|
@ -238,10 +238,8 @@ class OdnoklassnikiIE(InfoExtractor):
|
||||||
def _clear_cookies(self, cdn_url):
|
def _clear_cookies(self, cdn_url):
|
||||||
# Direct http downloads will fail if CDN cookies are set
|
# Direct http downloads will fail if CDN cookies are set
|
||||||
# so we need to reset them after each format extraction
|
# so we need to reset them after each format extraction
|
||||||
if self._get_cookies('https://notarealsubdomain.mycdn.me/'):
|
self.cookiejar.clear(domain='.mycdn.me')
|
||||||
self.cookiejar.clear(domain='.mycdn.me')
|
self.cookiejar.clear(domain=urllib.parse.urlparse(cdn_url).hostname)
|
||||||
if self._get_cookies(cdn_url):
|
|
||||||
self.cookiejar.clear(domain=urllib.parse.urlparse(cdn_url).hostname)
|
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def _extract_embed_urls(cls, url, webpage):
|
def _extract_embed_urls(cls, url, webpage):
|
||||||
|
|
|
@ -488,9 +488,9 @@ class TVPVODBaseIE(InfoExtractor):
|
||||||
f'{self._API_BASE_URL}/{resource}', video_id,
|
f'{self._API_BASE_URL}/{resource}', video_id,
|
||||||
query={'lang': 'pl', 'platform': 'BROWSER', **query},
|
query={'lang': 'pl', 'platform': 'BROWSER', **query},
|
||||||
expected_status=lambda x: is_valid(x) or 400 <= x < 500, **kwargs)
|
expected_status=lambda x: is_valid(x) or 400 <= x < 500, **kwargs)
|
||||||
if is_valid(urlh.status):
|
if is_valid(urlh.getcode()):
|
||||||
return document
|
return document
|
||||||
raise ExtractorError(f'Woronicza said: {document.get("code")} (HTTP {urlh.status})')
|
raise ExtractorError(f'Woronicza said: {document.get("code")} (HTTP {urlh.getcode()})')
|
||||||
|
|
||||||
def _parse_video(self, video, with_url=True):
|
def _parse_video(self, video, with_url=True):
|
||||||
info_dict = traverse_obj(video, {
|
info_dict = traverse_obj(video, {
|
||||||
|
|
|
@ -39,7 +39,7 @@ class VidioBaseIE(InfoExtractor):
|
||||||
login_post, login_post_urlh = self._download_webpage_handle(
|
login_post, login_post_urlh = self._download_webpage_handle(
|
||||||
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), expected_status=[302, 401])
|
self._LOGIN_URL, None, 'Logging in', data=urlencode_postdata(login_form), expected_status=[302, 401])
|
||||||
|
|
||||||
if login_post_urlh.status == 401:
|
if login_post_urlh.getcode() == 401:
|
||||||
if get_element_by_class('onboarding-content-register-popup__title', login_post):
|
if get_element_by_class('onboarding-content-register-popup__title', login_post):
|
||||||
raise ExtractorError(
|
raise ExtractorError(
|
||||||
'Unable to log in: The provided email has not registered yet.', expected=True)
|
'Unable to log in: The provided email has not registered yet.', expected=True)
|
||||||
|
|
|
@ -811,7 +811,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
'BADGE_STYLE_TYPE_PREMIUM': BadgeType.AVAILABILITY_PREMIUM,
|
'BADGE_STYLE_TYPE_PREMIUM': BadgeType.AVAILABILITY_PREMIUM,
|
||||||
'BADGE_STYLE_TYPE_LIVE_NOW': BadgeType.LIVE_NOW,
|
'BADGE_STYLE_TYPE_LIVE_NOW': BadgeType.LIVE_NOW,
|
||||||
'BADGE_STYLE_TYPE_VERIFIED': BadgeType.VERIFIED,
|
'BADGE_STYLE_TYPE_VERIFIED': BadgeType.VERIFIED,
|
||||||
'BADGE_STYLE_TYPE_VERIFIED_ARTIST': BadgeType.VERIFIED
|
'BADGE_STYLE_TYPE_VERIFIED_ARTIST': BadgeType.VERIFIED,
|
||||||
}
|
}
|
||||||
|
|
||||||
label_map = {
|
label_map = {
|
||||||
|
@ -821,7 +821,7 @@ class YoutubeBaseInfoExtractor(InfoExtractor):
|
||||||
'live': BadgeType.LIVE_NOW,
|
'live': BadgeType.LIVE_NOW,
|
||||||
'premium': BadgeType.AVAILABILITY_PREMIUM,
|
'premium': BadgeType.AVAILABILITY_PREMIUM,
|
||||||
'verified': BadgeType.VERIFIED,
|
'verified': BadgeType.VERIFIED,
|
||||||
'official artist channel': BadgeType.VERIFIED
|
'official artist channel': BadgeType.VERIFIED,
|
||||||
}
|
}
|
||||||
|
|
||||||
badges = []
|
badges = []
|
||||||
|
@ -3935,7 +3935,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1))
|
f['quality'] = q(itag_qualities.get(try_get(f, lambda f: f['format_id'].split('-')[0]), -1))
|
||||||
if f['quality'] == -1 and f.get('height'):
|
if f['quality'] == -1 and f.get('height'):
|
||||||
f['quality'] = q(res_qualities[min(res_qualities, key=lambda x: abs(x - f['height']))])
|
f['quality'] = q(res_qualities[min(res_qualities, key=lambda x: abs(x - f['height']))])
|
||||||
if self.get_param('verbose'):
|
if self.get_param('verbose') or all_formats:
|
||||||
f['format_note'] = join_nonempty(f.get('format_note'), client_name, delim=', ')
|
f['format_note'] = join_nonempty(f.get('format_note'), client_name, delim=', ')
|
||||||
if f.get('fps') and f['fps'] <= 1:
|
if f.get('fps') and f['fps'] <= 1:
|
||||||
del f['fps']
|
del f['fps']
|
||||||
|
@ -4531,7 +4531,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor):
|
||||||
and 'no-youtube-prefer-utc-upload-date' not in self.get_param('compat_opts', [])
|
and 'no-youtube-prefer-utc-upload-date' not in self.get_param('compat_opts', [])
|
||||||
):
|
):
|
||||||
upload_date = strftime_or_none(
|
upload_date = strftime_or_none(
|
||||||
self._parse_time_text(self._get_text(vpir, 'dateText')), '%Y%m%d') or upload_date
|
self._parse_time_text(self._get_text(vpir, 'dateText'))) or upload_date
|
||||||
info['upload_date'] = upload_date
|
info['upload_date'] = upload_date
|
||||||
|
|
||||||
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
|
for s_k, d_k in [('artist', 'creator'), ('track', 'alt_title')]:
|
||||||
|
@ -5071,7 +5071,7 @@ class YoutubeTabBaseInfoExtractor(YoutubeBaseInfoExtractor):
|
||||||
last_updated_unix = self._parse_time_text(
|
last_updated_unix = self._parse_time_text(
|
||||||
self._get_text(playlist_stats, 2) # deprecated, remove when old layout discontinued
|
self._get_text(playlist_stats, 2) # deprecated, remove when old layout discontinued
|
||||||
or self._get_text(playlist_header_renderer, ('byline', 1, 'playlistBylineRenderer', 'text')))
|
or self._get_text(playlist_header_renderer, ('byline', 1, 'playlistBylineRenderer', 'text')))
|
||||||
info['modified_date'] = strftime_or_none(last_updated_unix, '%Y%m%d')
|
info['modified_date'] = strftime_or_none(last_updated_unix)
|
||||||
|
|
||||||
info['view_count'] = self._get_count(playlist_stats, 1)
|
info['view_count'] = self._get_count(playlist_stats, 1)
|
||||||
if info['view_count'] is None: # 0 is allowed
|
if info['view_count'] is None: # 0 is allowed
|
||||||
|
|
|
@ -1414,8 +1414,7 @@ def create_parser():
|
||||||
'--clean-info-json', '--clean-infojson',
|
'--clean-info-json', '--clean-infojson',
|
||||||
action='store_true', dest='clean_infojson', default=None,
|
action='store_true', dest='clean_infojson', default=None,
|
||||||
help=(
|
help=(
|
||||||
'Remove some private fields such as filenames from the infojson. '
|
'Remove some internal metadata such as filenames from the infojson (default)'))
|
||||||
'Note that it could still contain some personal information (default)'))
|
|
||||||
filesystem.add_option(
|
filesystem.add_option(
|
||||||
'--no-clean-info-json', '--no-clean-infojson',
|
'--no-clean-info-json', '--no-clean-infojson',
|
||||||
action='store_false', dest='clean_infojson',
|
action='store_false', dest='clean_infojson',
|
||||||
|
@ -1678,8 +1677,7 @@ def create_parser():
|
||||||
'Execute a command, optionally prefixed with when to execute it, separated by a ":". '
|
'Execute a command, optionally prefixed with when to execute it, separated by a ":". '
|
||||||
'Supported values of "WHEN" are the same as that of --use-postprocessor (default: after_move). '
|
'Supported values of "WHEN" are the same as that of --use-postprocessor (default: after_move). '
|
||||||
'Same syntax as the output template can be used to pass any field as arguments to the command. '
|
'Same syntax as the output template can be used to pass any field as arguments to the command. '
|
||||||
'After download, an additional field "filepath" that contains the final path of the downloaded file '
|
'If no fields are passed, %(filepath,_filename|)q is appended to the end of the command. '
|
||||||
'is also available, and if no fields are passed, %(filepath,_filename|)q is appended to the end of the command. '
|
|
||||||
'This option can be used multiple times'))
|
'This option can be used multiple times'))
|
||||||
postproc.add_option(
|
postproc.add_option(
|
||||||
'--no-exec',
|
'--no-exec',
|
||||||
|
|
|
@ -6,7 +6,7 @@ import sys
|
||||||
import urllib.parse
|
import urllib.parse
|
||||||
import zlib
|
import zlib
|
||||||
|
|
||||||
from ._utils import decode_base_n, preferredencoding
|
from ._utils import Popen, decode_base_n, preferredencoding
|
||||||
from .traversal import traverse_obj
|
from .traversal import traverse_obj
|
||||||
from ..dependencies import certifi, websockets
|
from ..dependencies import certifi, websockets
|
||||||
|
|
||||||
|
@ -174,3 +174,7 @@ def handle_youtubedl_headers(headers):
|
||||||
del filtered_headers['Youtubedl-no-compression']
|
del filtered_headers['Youtubedl-no-compression']
|
||||||
|
|
||||||
return filtered_headers
|
return filtered_headers
|
||||||
|
|
||||||
|
|
||||||
|
def process_communicate_or_kill(p, *args, **kwargs):
|
||||||
|
return Popen.communicate_or_kill(p, *args, **kwargs)
|
||||||
|
|
|
@ -872,12 +872,6 @@ class netrc_from_content(netrc.netrc):
|
||||||
self._parse('-', stream, False)
|
self._parse('-', stream, False)
|
||||||
|
|
||||||
|
|
||||||
def process_communicate_or_kill(p, *args, **kwargs):
|
|
||||||
deprecation_warning(f'"{__name__}.process_communicate_or_kill" is deprecated and may be removed '
|
|
||||||
f'in a future version. Use "{__name__}.Popen.communicate_or_kill" instead')
|
|
||||||
return Popen.communicate_or_kill(p, *args, **kwargs)
|
|
||||||
|
|
||||||
|
|
||||||
class Popen(subprocess.Popen):
|
class Popen(subprocess.Popen):
|
||||||
if sys.platform == 'win32':
|
if sys.platform == 'win32':
|
||||||
_startupinfo = subprocess.STARTUPINFO()
|
_startupinfo = subprocess.STARTUPINFO()
|
||||||
|
@ -1662,7 +1656,7 @@ def unified_strdate(date_str, day_first=True):
|
||||||
|
|
||||||
|
|
||||||
def unified_timestamp(date_str, day_first=True):
|
def unified_timestamp(date_str, day_first=True):
|
||||||
if date_str is None:
|
if not isinstance(date_str, str):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
date_str = re.sub(r'\s+', ' ', re.sub(
|
date_str = re.sub(r'\s+', ' ', re.sub(
|
||||||
|
@ -2454,7 +2448,7 @@ def request_to_url(req):
|
||||||
return req
|
return req
|
||||||
|
|
||||||
|
|
||||||
def strftime_or_none(timestamp, date_format, default=None):
|
def strftime_or_none(timestamp, date_format='%Y%m%d', default=None):
|
||||||
datetime_object = None
|
datetime_object = None
|
||||||
try:
|
try:
|
||||||
if isinstance(timestamp, (int, float)): # unix timestamp
|
if isinstance(timestamp, (int, float)): # unix timestamp
|
||||||
|
|
Loading…
Reference in a new issue