diff --git a/CONTRIBUTORS b/CONTRIBUTORS index a9a0557426..9b8207b28b 100644 --- a/CONTRIBUTORS +++ b/CONTRIBUTORS @@ -695,3 +695,15 @@ KBelmin kesor MellowKyler Wesley107772 +a13ssandr0 +ChocoLZS +doe1080 +hugovdev +jshumphrey +julionc +manavchaudhary1 +powergold1 +Sakura286 +SamDecrock +stratus-ss +subrat-lima diff --git a/Changelog.md b/Changelog.md index 2648b9fe22..4dc0323683 100644 --- a/Changelog.md +++ b/Changelog.md @@ -4,6 +4,64 @@ # To create a release, dispatch the https://github.com/yt-dlp/yt-dlp/actions/workflows/release.yml workflow on master --> +### 2024.11.18 + +#### Important changes +- **Login with OAuth is no longer supported for YouTube** +Due to a change made by the site, yt-dlp is no longer able to support OAuth login for YouTube. [Read more](https://github.com/yt-dlp/yt-dlp/issues/11462#issuecomment-2471703090) + +#### Core changes +- [Catch broken Cryptodome installations](https://github.com/yt-dlp/yt-dlp/commit/b83ca24eb72e1e558b0185bd73975586c0bc0546) ([#11486](https://github.com/yt-dlp/yt-dlp/issues/11486)) by [seproDev](https://github.com/seproDev) +- **utils** + - [Fix `join_nonempty`, add `**kwargs` to `unpack`](https://github.com/yt-dlp/yt-dlp/commit/39d79c9b9cf23411d935910685c40aa1a2fdb409) ([#11559](https://github.com/yt-dlp/yt-dlp/issues/11559)) by [Grub4K](https://github.com/Grub4K) + - `subs_list_to_dict`: [Add `lang` default parameter](https://github.com/yt-dlp/yt-dlp/commit/c014fbcddcb4c8f79d914ac5bb526758b540ea33) ([#11508](https://github.com/yt-dlp/yt-dlp/issues/11508)) by [Grub4K](https://github.com/Grub4K) + +#### Extractor changes +- [Allow `ext` override for thumbnails](https://github.com/yt-dlp/yt-dlp/commit/eb64ae7d5def6df2aba74fb703e7f168fb299865) ([#11545](https://github.com/yt-dlp/yt-dlp/issues/11545)) by [bashonly](https://github.com/bashonly) +- **adobepass**: [Fix provider requests](https://github.com/yt-dlp/yt-dlp/commit/85fdc66b6e01d19a94b4f39b58e3c0cf23600902) ([#11472](https://github.com/yt-dlp/yt-dlp/issues/11472)) by [bashonly](https://github.com/bashonly) +- **archive.org**: [Fix comments extraction](https://github.com/yt-dlp/yt-dlp/commit/f2a4983df7a64c4e93b56f79dbd16a781bd90206) ([#11527](https://github.com/yt-dlp/yt-dlp/issues/11527)) by [jshumphrey](https://github.com/jshumphrey) +- **bandlab**: [Add extractors](https://github.com/yt-dlp/yt-dlp/commit/6365e92589e4bc17b8fffb0125a716d144ad2137) ([#11535](https://github.com/yt-dlp/yt-dlp/issues/11535)) by [seproDev](https://github.com/seproDev) +- **chaturbate** + - [Extract from API and support impersonation](https://github.com/yt-dlp/yt-dlp/commit/720b3dc453c342bc2e8df7dbc0acaab4479de46c) ([#11555](https://github.com/yt-dlp/yt-dlp/issues/11555)) by [powergold1](https://github.com/powergold1) (With fixes in [7cecd29](https://github.com/yt-dlp/yt-dlp/commit/7cecd299e4a5ef1f0f044b2fedc26f17e41f15e3) by [seproDev](https://github.com/seproDev)) + - [Support alternate domains](https://github.com/yt-dlp/yt-dlp/commit/a9f85670d03ab993dc589f21a9ffffcad61392d5) ([#10595](https://github.com/yt-dlp/yt-dlp/issues/10595)) by [manavchaudhary1](https://github.com/manavchaudhary1) +- **cloudflarestream**: [Avoid extraction via videodelivery.net](https://github.com/yt-dlp/yt-dlp/commit/2db8c2e7d57a1784b06057c48e3e91023720d195) ([#11478](https://github.com/yt-dlp/yt-dlp/issues/11478)) by [hugovdev](https://github.com/hugovdev) +- **ctvnews** + - [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/f351440f1dc5b3dfbfc5737b037a869d946056fe) ([#11534](https://github.com/yt-dlp/yt-dlp/issues/11534)) by [bashonly](https://github.com/bashonly), [jshumphrey](https://github.com/jshumphrey) + - [Fix playlist ID extraction](https://github.com/yt-dlp/yt-dlp/commit/f9d98509a898737c12977b2e2117277bada2c196) ([#8892](https://github.com/yt-dlp/yt-dlp/issues/8892)) by [qbnu](https://github.com/qbnu) +- **digitalconcerthall**: [Support login with access/refresh tokens](https://github.com/yt-dlp/yt-dlp/commit/f7257588bdff5f0b0452635a66b253a783c97357) ([#11571](https://github.com/yt-dlp/yt-dlp/issues/11571)) by [bashonly](https://github.com/bashonly) +- **facebook**: [Fix formats extraction](https://github.com/yt-dlp/yt-dlp/commit/bacc31b05a04181b63100c481565256b14813a5e) ([#11513](https://github.com/yt-dlp/yt-dlp/issues/11513)) by [bashonly](https://github.com/bashonly) +- **gamedevtv**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/be3579aaf0c3b71a0a3195e1955415d5e4d6b3d8) ([#11368](https://github.com/yt-dlp/yt-dlp/issues/11368)) by [bashonly](https://github.com/bashonly), [stratus-ss](https://github.com/stratus-ss) +- **goplay**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/6b43a8d84b881d769b480ba6e20ec691e9d1b92d) ([#11466](https://github.com/yt-dlp/yt-dlp/issues/11466)) by [bashonly](https://github.com/bashonly), [SamDecrock](https://github.com/SamDecrock) +- **kenh14**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/eb15fd5a32d8b35ef515f7a3d1158c03025648ff) ([#3996](https://github.com/yt-dlp/yt-dlp/issues/3996)) by [krichbanana](https://github.com/krichbanana), [pzhlkj6612](https://github.com/pzhlkj6612) +- **litv**: [Fix extractor](https://github.com/yt-dlp/yt-dlp/commit/e079ffbda66de150c0a9ebef05e89f61bb4d5f76) ([#11071](https://github.com/yt-dlp/yt-dlp/issues/11071)) by [jiru](https://github.com/jiru) +- **mixchmovie**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/0ec9bfed4d4a52bfb4f8733da1acf0aeeae21e6b) ([#10897](https://github.com/yt-dlp/yt-dlp/issues/10897)) by [Sakura286](https://github.com/Sakura286) +- **patreon**: [Fix comments extraction](https://github.com/yt-dlp/yt-dlp/commit/1d253b0a27110d174c40faf8fb1c999d099e0cde) ([#11530](https://github.com/yt-dlp/yt-dlp/issues/11530)) by [bashonly](https://github.com/bashonly), [jshumphrey](https://github.com/jshumphrey) +- **pialive**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/d867f99622ef7fba690b08da56c39d739b822bb7) ([#10811](https://github.com/yt-dlp/yt-dlp/issues/10811)) by [ChocoLZS](https://github.com/ChocoLZS) +- **radioradicale**: [Add extractor](https://github.com/yt-dlp/yt-dlp/commit/70c55cb08f780eab687e881ef42bb5c6007d290b) ([#5607](https://github.com/yt-dlp/yt-dlp/issues/5607)) by [a13ssandr0](https://github.com/a13ssandr0), [pzhlkj6612](https://github.com/pzhlkj6612) +- **reddit**: [Improve error handling](https://github.com/yt-dlp/yt-dlp/commit/7ea2787920cccc6b8ea30791993d114fbd564434) ([#11573](https://github.com/yt-dlp/yt-dlp/issues/11573)) by [bashonly](https://github.com/bashonly) +- **redgifsuser**: [Fix extraction](https://github.com/yt-dlp/yt-dlp/commit/d215fba7edb69d4fa665f43663756fd260b1489f) ([#11531](https://github.com/yt-dlp/yt-dlp/issues/11531)) by [jshumphrey](https://github.com/jshumphrey) +- **rutube**: [Rework extractors](https://github.com/yt-dlp/yt-dlp/commit/e398217aae19bb25f91797bfbe8a3243698d7f45) ([#11480](https://github.com/yt-dlp/yt-dlp/issues/11480)) by [seproDev](https://github.com/seproDev) +- **sonylivseries**: [Add `sort_order` extractor-arg](https://github.com/yt-dlp/yt-dlp/commit/2009cb27e17014787bf63eaa2ada51293d54f22a) ([#11569](https://github.com/yt-dlp/yt-dlp/issues/11569)) by [bashonly](https://github.com/bashonly) +- **soop**: [Fix thumbnail extraction](https://github.com/yt-dlp/yt-dlp/commit/c699bafc5038b59c9afe8c2e69175fb66424c832) ([#11545](https://github.com/yt-dlp/yt-dlp/issues/11545)) by [bashonly](https://github.com/bashonly) +- **spankbang**: [Support browser impersonation](https://github.com/yt-dlp/yt-dlp/commit/8388ec256f7753b02488788e3cfa771f6e1db247) ([#11542](https://github.com/yt-dlp/yt-dlp/issues/11542)) by [jshumphrey](https://github.com/jshumphrey) +- **spreaker** + - [Support episode pages and access keys](https://github.com/yt-dlp/yt-dlp/commit/c39016f66df76d14284c705736ca73db8055d8de) ([#11489](https://github.com/yt-dlp/yt-dlp/issues/11489)) by [julionc](https://github.com/julionc) + - [Support podcast and feed pages](https://github.com/yt-dlp/yt-dlp/commit/c6737310619022248f5d0fd13872073cac168453) ([#10968](https://github.com/yt-dlp/yt-dlp/issues/10968)) by [subrat-lima](https://github.com/subrat-lima) +- **youtube** + - [Player client maintenance](https://github.com/yt-dlp/yt-dlp/commit/637d62a3a9fc723d68632c1af25c30acdadeeb85) ([#11528](https://github.com/yt-dlp/yt-dlp/issues/11528)) by [bashonly](https://github.com/bashonly), [seproDev](https://github.com/seproDev) + - [Remove broken OAuth support](https://github.com/yt-dlp/yt-dlp/commit/52c0ffe40ad6e8404d93296f575007b05b04c686) ([#11558](https://github.com/yt-dlp/yt-dlp/issues/11558)) by [bashonly](https://github.com/bashonly) + - tab: [Fix podcasts tab extraction](https://github.com/yt-dlp/yt-dlp/commit/37cd7660eaff397c551ee18d80507702342b0c2b) ([#11567](https://github.com/yt-dlp/yt-dlp/issues/11567)) by [seproDev](https://github.com/seproDev) + +#### Misc. changes +- **build** + - [Bump PyInstaller version pin to `>=6.11.1`](https://github.com/yt-dlp/yt-dlp/commit/f9c8deb4e5887ff5150e911ac0452e645f988044) ([#11507](https://github.com/yt-dlp/yt-dlp/issues/11507)) by [bashonly](https://github.com/bashonly) + - [Enable attestations for trusted publishing](https://github.com/yt-dlp/yt-dlp/commit/f13df591d4d7ca8e2f31b35c9c91e69ba9e9b013) ([#11420](https://github.com/yt-dlp/yt-dlp/issues/11420)) by [bashonly](https://github.com/bashonly) + - [Pin `websockets` version to >=13.0,<14](https://github.com/yt-dlp/yt-dlp/commit/240a7d43c8a67ffb86d44dc276805aa43c358dcc) ([#11488](https://github.com/yt-dlp/yt-dlp/issues/11488)) by [bashonly](https://github.com/bashonly) +- **cleanup** + - [Deprecate more compat functions](https://github.com/yt-dlp/yt-dlp/commit/f95a92b3d0169a784ee15a138fbe09d82b2754a1) ([#11439](https://github.com/yt-dlp/yt-dlp/issues/11439)) by [seproDev](https://github.com/seproDev) + - [Remove dead extractors](https://github.com/yt-dlp/yt-dlp/commit/10fc719bc7f1eef469389c5219102266ef411f29) ([#11566](https://github.com/yt-dlp/yt-dlp/issues/11566)) by [doe1080](https://github.com/doe1080) + - Miscellaneous: [da252d9](https://github.com/yt-dlp/yt-dlp/commit/da252d9d322af3e2178ac5eae324809502a0a862) by [bashonly](https://github.com/bashonly), [Grub4K](https://github.com/Grub4K), [seproDev](https://github.com/seproDev) + ### 2024.11.04 #### Important changes diff --git a/README.md b/README.md index 2df72b7498..0a62d8e74c 100644 --- a/README.md +++ b/README.md @@ -342,8 +342,9 @@ If you fork the project on GitHub, you can run your fork's [build workflow](.git extractor plugins; postprocessor plugins can only be loaded from the default plugin directories - --flat-playlist Do not extract the videos of a playlist, - only list them + --flat-playlist Do not extract a playlist's URL result + entries; some entry metadata may be missing + and downloading may be bypassed --no-flat-playlist Fully extract the videos of a playlist (default) --live-from-start Download livestreams from the start. @@ -1293,6 +1294,7 @@ The available fields are: - `playlist_uploader_id` (string): Nickname or id of the playlist uploader - `playlist_channel` (string): Display name of the channel that uploaded the playlist - `playlist_channel_id` (string): Identifier of the channel that uploaded the playlist + - `playlist_webpage_url` (string): URL of the playlist webpage - `webpage_url` (string): A URL to the video webpage which, if given to yt-dlp, should yield the same result again - `webpage_url_basename` (string): The basename of the webpage URL - `webpage_url_domain` (string): The domain of the webpage URL @@ -1768,7 +1770,7 @@ The following extractors use this feature: #### youtube * `lang`: Prefer translated metadata (`title`, `description` etc) of this language code (case-sensitive). By default, the video primary language metadata is preferred, with a fallback to `en` translated. See [youtube.py](https://github.com/yt-dlp/yt-dlp/blob/c26f9b991a0681fd3ea548d535919cec1fbbd430/yt_dlp/extractor/youtube.py#L381-L390) for list of supported content language codes * `skip`: One or more of `hls`, `dash` or `translated_subs` to skip extraction of the m3u8 manifests, dash manifests and [auto-translated subtitles](https://github.com/yt-dlp/yt-dlp/issues/4090#issuecomment-1158102032) respectively -* `player_client`: Clients to extract video data from. The main clients are `web`, `ios` and `android`, with variants `_music` and `_creator` (e.g. `ios_creator`); and `mweb`, `mediaconnect`, `android_testsuite`, `android_vr`, `web_safari`, `web_embedded`, `tv` and `tv_embedded` with no variants. By default, `ios,mweb` is used, and `web_creator,mediaconnect` is added as needed for age-gated videos when account age verification is required. Similarly, the `_music` variants are added for `music.youtube.com` URLs. Some clients, such as `web` and `android`, require a `po_token` for their formats to be downloadable. Some clients, such as the `_creator` variants, will only work with authentication. You can use `all` to use all the clients, and `default` for the default clients. You can prefix a client with `-` to exclude it, e.g. `youtube:player_client=all,-web` +* `player_client`: Clients to extract video data from. The main clients are `web`, `ios` and `android`, with variants `_music` and `_creator` (e.g. `ios_creator`); and `mweb`, `mediaconnect`, `android_vr`, `web_safari`, `web_embedded`, `tv` and `tv_embedded` with no variants. By default, `ios,mweb` is used, and `web_creator` is added as needed for age-gated videos when account age verification is required. Similarly, the `_music` variants are added for `music.youtube.com` URLs. Some clients, such as `web` and `android`, require a `po_token` for their formats to be downloadable. Some clients, such as the `_creator` variants, will only work with authentication. You can use `all` to use all the clients, and `default` for the default clients. You can prefix a client with `-` to exclude it, e.g. `youtube:player_client=all,-web` * `player_skip`: Skip some network requests that are generally needed for robust extraction. One or more of `configs` (skip client configs), `webpage` (skip initial webpage), `js` (skip js player). While these options can help reduce the number of requests needed or avoid some rate-limiting, they could cause some issues. See [#860](https://github.com/yt-dlp/yt-dlp/pull/860) for more details * `player_params`: YouTube player parameters to use for player requests. Will overwrite any default ones set by yt-dlp. * `comment_sort`: `top` or `new` (default) - choose comment sorting mode (on YouTube's side) @@ -1866,8 +1868,8 @@ The following extractors use this feature: #### bilibili * `prefer_multi_flv`: Prefer extracting flv formats over mp4 for older videos that still provide legacy formats -#### digitalconcerthall -* `prefer_combined_hls`: Prefer extracting combined/pre-merged video and audio HLS formats. This will exclude 4K/HEVC video and lossless/FLAC audio formats, which are only available as split video/audio HLS formats +#### sonylivseries +* `sort_order`: Episode sort order for series extraction - one of `asc` (ascending, oldest first) or `desc` (descending, newest first). Default is `asc` **Note**: These options may be changed/removed in the future without concern for backward compatibility diff --git a/devscripts/changelog_override.json b/devscripts/changelog_override.json index 08ea9666ed..079e2f7296 100644 --- a/devscripts/changelog_override.json +++ b/devscripts/changelog_override.json @@ -234,5 +234,10 @@ "when": "57212a5f97ce367590aaa5c3e9a135eead8f81f7", "short": "[ie/vimeo] Fix API retries (#11351)", "authors": ["bashonly"] + }, + { + "action": "add", + "when": "52c0ffe40ad6e8404d93296f575007b05b04c686", + "short": "[priority] **Login with OAuth is no longer supported for YouTube**\nDue to a change made by the site, yt-dlp is no longer able to support OAuth login for YouTube. [Read more](https://github.com/yt-dlp/yt-dlp/issues/11462#issuecomment-2471703090)" } ] diff --git a/devscripts/generate_aes_testdata.py b/devscripts/generate_aes_testdata.py index 7f3c88bcfb..73cf803b8f 100644 --- a/devscripts/generate_aes_testdata.py +++ b/devscripts/generate_aes_testdata.py @@ -11,13 +11,12 @@ import codecs import subprocess from yt_dlp.aes import aes_encrypt, key_expansion -from yt_dlp.utils import intlist_to_bytes secret_msg = b'Secret message goes here' def hex_str(int_list): - return codecs.encode(intlist_to_bytes(int_list), 'hex') + return codecs.encode(bytes(int_list), 'hex') def openssl_encode(algo, key, iv): diff --git a/pyproject.toml b/pyproject.toml index ef921fed58..97ea4375fc 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -52,7 +52,7 @@ default = [ "pycryptodomex", "requests>=2.32.2,<3", "urllib3>=1.26.17,<3", - "websockets>=13.0,<14", + "websockets>=13.0", ] curl-cffi = [ "curl-cffi==0.5.10; os_name=='nt' and implementation_name=='cpython'", @@ -313,6 +313,16 @@ banned-from = [ "yt_dlp.compat.compat_urllib_parse_urlparse".msg = "Use `urllib.parse.urlparse` instead." "yt_dlp.compat.compat_shlex_quote".msg = "Use `yt_dlp.utils.shell_quote` instead." "yt_dlp.utils.error_to_compat_str".msg = "Use `str` instead." +"yt_dlp.utils.bytes_to_intlist".msg = "Use `list` instead." +"yt_dlp.utils.intlist_to_bytes".msg = "Use `bytes` instead." +"yt_dlp.utils.decodeArgument".msg = "Do not use" +"yt_dlp.utils.decodeFilename".msg = "Do not use" +"yt_dlp.utils.encodeFilename".msg = "Do not use" +"yt_dlp.compat.compat_os_name".msg = "Use `os.name` instead." +"yt_dlp.compat.compat_realpath".msg = "Use `os.path.realpath` instead." +"yt_dlp.compat.functools".msg = "Use `functools` instead." +"yt_dlp.utils.decodeOption".msg = "Do not use" +"yt_dlp.utils.compiled_regex_type".msg = "Use `re.Pattern` instead." [tool.autopep8] max_line_length = 120 diff --git a/supportedsites.md b/supportedsites.md index fc79e4ae61..916735e08b 100644 --- a/supportedsites.md +++ b/supportedsites.md @@ -129,6 +129,8 @@ - **Bandcamp:album** - **Bandcamp:user** - **Bandcamp:weekly** + - **Bandlab** + - **BandlabPlaylist** - **BannedVideo** - **bbc**: [*bbc*](## "netrc machine") BBC - **bbc.co.uk**: [*bbc*](## "netrc machine") BBC iPlayer @@ -484,6 +486,7 @@ - **Gab** - **GabTV** - **Gaia**: [*gaia*](## "netrc machine") + - **GameDevTVDashboard**: [*gamedevtv*](## "netrc machine") - **GameJolt** - **GameJoltCommunity** - **GameJoltGame** @@ -651,6 +654,8 @@ - **Karaoketv** - **Katsomo**: (**Currently broken**) - **KelbyOne**: (**Currently broken**) + - **Kenh14Playlist** + - **Kenh14Video** - **Ketnet** - **khanacademy** - **khanacademy:unit** @@ -784,10 +789,6 @@ - **MicrosoftLearnSession** - **MicrosoftMedius** - **microsoftstream**: Microsoft Stream - - **mildom**: Record ongoing live by specific user in Mildom - - **mildom:clip**: Clip in Mildom - - **mildom:​user:vod**: Download all VODs from specific user in Mildom - - **mildom:vod**: VOD in Mildom - **minds** - **minds:channel** - **minds:group** @@ -798,6 +799,7 @@ - **MiTele**: mitele.es - **mixch** - **mixch:archive** + - **mixch:movie** - **mixcloud** - **mixcloud:playlist** - **mixcloud:user** @@ -1060,8 +1062,8 @@ - **PhilharmonieDeParis**: Philharmonie de Paris - **phoenix.de** - **Photobucket** + - **PiaLive** - **Piapro**: [*piapro*](## "netrc machine") - - **PIAULIZAPortal**: ulizaportal.jp - PIA LIVE STREAM - **Picarto** - **PicartoVod** - **Piksel** @@ -1088,8 +1090,6 @@ - **PodbayFMChannel** - **Podchaser** - **podomatic**: (**Currently broken**) - - **Pokemon** - - **PokemonWatch** - **PokerGo**: [*pokergo*](## "netrc machine") - **PokerGoCollection**: [*pokergo*](## "netrc machine") - **PolsatGo** @@ -1160,6 +1160,7 @@ - **RadioJavan**: (**Currently broken**) - **radiokapital** - **radiokapital:show** + - **RadioRadicale** - **RadioZetPodcast** - **radlive** - **radlive:channel** @@ -1367,9 +1368,7 @@ - **spotify**: Spotify episodes (**Currently broken**) - **spotify:show**: Spotify shows (**Currently broken**) - **Spreaker** - - **SpreakerPage** - **SpreakerShow** - - **SpreakerShowPage** - **SpringboardPlatform** - **Sprout** - **SproutVideo** @@ -1570,6 +1569,8 @@ - **UFCTV**: [*ufctv*](## "netrc machine") - **ukcolumn**: (**Currently broken**) - **UKTVPlay** + - **UlizaPlayer** + - **UlizaPortal**: ulizaportal.jp - **umg:de**: Universal Music Deutschland (**Currently broken**) - **Unistra** - **Unity**: (**Currently broken**) @@ -1587,8 +1588,6 @@ - **Varzesh3**: (**Currently broken**) - **Vbox7** - **Veo** - - **Veoh** - - **veoh:user** - **Vesti**: Вести.Ru (**Currently broken**) - **Vevo** - **VevoPlaylist** diff --git a/test/helper.py b/test/helper.py index 3b550d1927..c776e70b73 100644 --- a/test/helper.py +++ b/test/helper.py @@ -9,7 +9,6 @@ import types import yt_dlp.extractor from yt_dlp import YoutubeDL -from yt_dlp.compat import compat_os_name from yt_dlp.utils import preferredencoding, try_call, write_string, find_available_port if 'pytest' in sys.modules: @@ -49,7 +48,7 @@ def report_warning(message, *args, **kwargs): Print the message to stderr, it will be prefixed with 'WARNING:' If stderr is a tty file the 'WARNING:' will be colored """ - if sys.stderr.isatty() and compat_os_name != 'nt': + if sys.stderr.isatty() and os.name != 'nt': _msg_header = '\033[0;33mWARNING:\033[0m' else: _msg_header = 'WARNING:' diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index a99e624080..966d27a498 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -15,7 +15,6 @@ import json from test.helper import FakeYDL, assertRegexpMatches, try_rm from yt_dlp import YoutubeDL -from yt_dlp.compat import compat_os_name from yt_dlp.extractor import YoutubeIE from yt_dlp.extractor.common import InfoExtractor from yt_dlp.postprocessor.common import PostProcessor @@ -839,8 +838,8 @@ class TestYoutubeDL(unittest.TestCase): test('%(filesize)#D', '1Ki') test('%(height)5.2D', ' 1.08k') test('%(title4)#S', 'foo_bar_test') - test('%(title4).10S', ('foo "bar" ', 'foo "bar"' + ('#' if compat_os_name == 'nt' else ' '))) - if compat_os_name == 'nt': + test('%(title4).10S', ('foo "bar" ', 'foo "bar"' + ('#' if os.name == 'nt' else ' '))) + if os.name == 'nt': test('%(title4)q', ('"foo ""bar"" test"', None)) test('%(formats.:.id)#q', ('"id 1" "id 2" "id 3"', None)) test('%(formats.0.id)#q', ('"id 1"', None)) @@ -903,9 +902,9 @@ class TestYoutubeDL(unittest.TestCase): # Environment variable expansion for prepare_filename os.environ['__yt_dlp_var'] = 'expanded' - envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var' + envvar = '%__yt_dlp_var%' if os.name == 'nt' else '$__yt_dlp_var' test(envvar, (envvar, 'expanded')) - if compat_os_name == 'nt': + if os.name == 'nt': test('%s%', ('%s%', '%s%')) os.environ['s'] = 'expanded' test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s diff --git a/test/test_aes.py b/test/test_aes.py index 6fe6059a17..9cd9189bcc 100644 --- a/test/test_aes.py +++ b/test/test_aes.py @@ -27,7 +27,6 @@ from yt_dlp.aes import ( pad_block, ) from yt_dlp.dependencies import Cryptodome -from yt_dlp.utils import bytes_to_intlist, intlist_to_bytes # the encrypted data can be generate with 'devscripts/generate_aes_testdata.py' @@ -40,33 +39,33 @@ class TestAES(unittest.TestCase): def test_encrypt(self): msg = b'message' key = list(range(16)) - encrypted = aes_encrypt(bytes_to_intlist(msg), key) - decrypted = intlist_to_bytes(aes_decrypt(encrypted, key)) + encrypted = aes_encrypt(list(msg), key) + decrypted = bytes(aes_decrypt(encrypted, key)) self.assertEqual(decrypted, msg) def test_cbc_decrypt(self): data = b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\x27\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd' - decrypted = intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist(data), self.key, self.iv)) + decrypted = bytes(aes_cbc_decrypt(list(data), self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) if Cryptodome.AES: - decrypted = aes_cbc_decrypt_bytes(data, intlist_to_bytes(self.key), intlist_to_bytes(self.iv)) + decrypted = aes_cbc_decrypt_bytes(data, bytes(self.key), bytes(self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_cbc_encrypt(self): - data = bytes_to_intlist(self.secret_msg) - encrypted = intlist_to_bytes(aes_cbc_encrypt(data, self.key, self.iv)) + data = list(self.secret_msg) + encrypted = bytes(aes_cbc_encrypt(data, self.key, self.iv)) self.assertEqual( encrypted, b'\x97\x92+\xe5\x0b\xc3\x18\x91ky9m&\xb3\xb5@\xe6\'\xc2\x96.\xc8u\x88\xab9-[\x9e|\xf1\xcd') def test_ctr_decrypt(self): - data = bytes_to_intlist(b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08') - decrypted = intlist_to_bytes(aes_ctr_decrypt(data, self.key, self.iv)) + data = list(b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08') + decrypted = bytes(aes_ctr_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_ctr_encrypt(self): - data = bytes_to_intlist(self.secret_msg) - encrypted = intlist_to_bytes(aes_ctr_encrypt(data, self.key, self.iv)) + data = list(self.secret_msg) + encrypted = bytes(aes_ctr_encrypt(data, self.key, self.iv)) self.assertEqual( encrypted, b'\x03\xc7\xdd\xd4\x8e\xb3\xbc\x1a*O\xdc1\x12+8Aio\xd1z\xb5#\xaf\x08') @@ -75,19 +74,19 @@ class TestAES(unittest.TestCase): data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f.\x08\xb4T\xe4/\x17\xbd' authentication_tag = b'\xe8&I\x80rI\x07\x9d}YWuU@:e' - decrypted = intlist_to_bytes(aes_gcm_decrypt_and_verify( - bytes_to_intlist(data), self.key, bytes_to_intlist(authentication_tag), self.iv[:12])) + decrypted = bytes(aes_gcm_decrypt_and_verify( + list(data), self.key, list(authentication_tag), self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) if Cryptodome.AES: decrypted = aes_gcm_decrypt_and_verify_bytes( - data, intlist_to_bytes(self.key), authentication_tag, intlist_to_bytes(self.iv[:12])) + data, bytes(self.key), authentication_tag, bytes(self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_gcm_aligned_decrypt(self): data = b'\x159Y\xcf5eud\x90\x9c\x85&]\x14\x1d\x0f' authentication_tag = b'\x08\xb1\x9d!&\x98\xd0\xeaRq\x90\xe6;\xb5]\xd8' - decrypted = intlist_to_bytes(aes_gcm_decrypt_and_verify( + decrypted = bytes(aes_gcm_decrypt_and_verify( list(data), self.key, list(authentication_tag), self.iv[:12])) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg[:16]) if Cryptodome.AES: @@ -96,38 +95,38 @@ class TestAES(unittest.TestCase): self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg[:16]) def test_decrypt_text(self): - password = intlist_to_bytes(self.key).decode() + password = bytes(self.key).decode() encrypted = base64.b64encode( - intlist_to_bytes(self.iv[:8]) + bytes(self.iv[:8]) + b'\x17\x15\x93\xab\x8d\x80V\xcdV\xe0\t\xcdo\xc2\xa5\xd8ksM\r\xe27N\xae', ).decode() decrypted = (aes_decrypt_text(encrypted, password, 16)) self.assertEqual(decrypted, self.secret_msg) - password = intlist_to_bytes(self.key).decode() + password = bytes(self.key).decode() encrypted = base64.b64encode( - intlist_to_bytes(self.iv[:8]) + bytes(self.iv[:8]) + b'\x0b\xe6\xa4\xd9z\x0e\xb8\xb9\xd0\xd4i_\x85\x1d\x99\x98_\xe5\x80\xe7.\xbf\xa5\x83', ).decode() decrypted = (aes_decrypt_text(encrypted, password, 32)) self.assertEqual(decrypted, self.secret_msg) def test_ecb_encrypt(self): - data = bytes_to_intlist(self.secret_msg) - encrypted = intlist_to_bytes(aes_ecb_encrypt(data, self.key)) + data = list(self.secret_msg) + encrypted = bytes(aes_ecb_encrypt(data, self.key)) self.assertEqual( encrypted, b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:') def test_ecb_decrypt(self): - data = bytes_to_intlist(b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:') - decrypted = intlist_to_bytes(aes_ecb_decrypt(data, self.key, self.iv)) + data = list(b'\xaa\x86]\x81\x97>\x02\x92\x9d\x1bR[[L/u\xd3&\xd1(h\xde{\x81\x94\xba\x02\xae\xbd\xa6\xd0:') + decrypted = bytes(aes_ecb_decrypt(data, self.key, self.iv)) self.assertEqual(decrypted.rstrip(b'\x08'), self.secret_msg) def test_key_expansion(self): key = '4f6bdaa39e2f8cb07f5e722d9edef314' - self.assertEqual(key_expansion(bytes_to_intlist(bytearray.fromhex(key))), [ + self.assertEqual(key_expansion(list(bytearray.fromhex(key))), [ 0x4F, 0x6B, 0xDA, 0xA3, 0x9E, 0x2F, 0x8C, 0xB0, 0x7F, 0x5E, 0x72, 0x2D, 0x9E, 0xDE, 0xF3, 0x14, 0x53, 0x66, 0x20, 0xA8, 0xCD, 0x49, 0xAC, 0x18, 0xB2, 0x17, 0xDE, 0x35, 0x2C, 0xC9, 0x2D, 0x21, 0x8C, 0xBE, 0xDD, 0xD9, 0x41, 0xF7, 0x71, 0xC1, 0xF3, 0xE0, 0xAF, 0xF4, 0xDF, 0x29, 0x82, 0xD5, diff --git a/test/test_compat.py b/test/test_compat.py index e7d97e3e93..b1cc2a8187 100644 --- a/test/test_compat.py +++ b/test/test_compat.py @@ -12,12 +12,7 @@ import struct from yt_dlp import compat from yt_dlp.compat import urllib # isort: split -from yt_dlp.compat import ( - compat_etree_fromstring, - compat_expanduser, - compat_urllib_parse_unquote, # noqa: TID251 - compat_urllib_parse_urlencode, # noqa: TID251 -) +from yt_dlp.compat import compat_etree_fromstring, compat_expanduser from yt_dlp.compat.urllib.request import getproxies @@ -43,39 +38,6 @@ class TestCompat(unittest.TestCase): finally: os.environ['HOME'] = old_home or '' - def test_compat_urllib_parse_unquote(self): - self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def') - self.assertEqual(compat_urllib_parse_unquote('%7e/abc+def'), '~/abc+def') - self.assertEqual(compat_urllib_parse_unquote(''), '') - self.assertEqual(compat_urllib_parse_unquote('%'), '%') - self.assertEqual(compat_urllib_parse_unquote('%%'), '%%') - self.assertEqual(compat_urllib_parse_unquote('%%%'), '%%%') - self.assertEqual(compat_urllib_parse_unquote('%2F'), '/') - self.assertEqual(compat_urllib_parse_unquote('%2f'), '/') - self.assertEqual(compat_urllib_parse_unquote('%E6%B4%A5%E6%B3%A2'), '津波') - self.assertEqual( - compat_urllib_parse_unquote(''' -%%a'''), - ''' -%%a''') - self.assertEqual( - compat_urllib_parse_unquote('''%28%5E%E2%97%A3_%E2%97%A2%5E%29%E3%81%A3%EF%B8%BB%E3%83%87%E2%95%90%E4%B8%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%87%80 %E2%86%B6%I%Break%25Things%'''), - '''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''') - - def test_compat_urllib_parse_unquote_plus(self): - self.assertEqual(urllib.parse.unquote_plus('abc%20def'), 'abc def') - self.assertEqual(urllib.parse.unquote_plus('%7e/abc+def'), '~/abc def') - - def test_compat_urllib_parse_urlencode(self): - self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode({'abc': b'def'}), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode({b'abc': 'def'}), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode({b'abc': b'def'}), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode([('abc', 'def')]), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode([('abc', b'def')]), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode([(b'abc', 'def')]), 'abc=def') - self.assertEqual(compat_urllib_parse_urlencode([(b'abc', b'def')]), 'abc=def') - def test_compat_etree_fromstring(self): xml = ''' diff --git a/test/test_downloader_http.py b/test/test_downloader_http.py index faba0bc9c8..cf2e3fac16 100644 --- a/test/test_downloader_http.py +++ b/test/test_downloader_http.py @@ -15,7 +15,6 @@ import threading from test.helper import http_server_port, try_rm from yt_dlp import YoutubeDL from yt_dlp.downloader.http import HttpFD -from yt_dlp.utils import encodeFilename from yt_dlp.utils._utils import _YDLLogger as FakeLogger TEST_DIR = os.path.dirname(os.path.abspath(__file__)) @@ -82,12 +81,12 @@ class TestHttpFD(unittest.TestCase): ydl = YoutubeDL(params) downloader = HttpFD(ydl, params) filename = 'testfile.mp4' - try_rm(encodeFilename(filename)) + try_rm(filename) self.assertTrue(downloader.real_download(filename, { 'url': f'http://127.0.0.1:{self.port}/{ep}', }), ep) - self.assertEqual(os.path.getsize(encodeFilename(filename)), TEST_SIZE, ep) - try_rm(encodeFilename(filename)) + self.assertEqual(os.path.getsize(filename), TEST_SIZE, ep) + try_rm(filename) def download_all(self, params): for ep in ('regular', 'no-content-length', 'no-range', 'no-range-no-content-length'): diff --git a/test/test_socks.py b/test/test_socks.py index 68af19d0ca..f601fc8a5e 100644 --- a/test/test_socks.py +++ b/test/test_socks.py @@ -216,7 +216,9 @@ class SocksWebSocketTestRequestHandler(SocksTestRequestHandler): protocol = websockets.ServerProtocol() connection = websockets.sync.server.ServerConnection(socket=self.request, protocol=protocol, close_timeout=0) connection.handshake() - connection.send(json.dumps(self.socks_info)) + for message in connection: + if message == 'socks_info': + connection.send(json.dumps(self.socks_info)) connection.close() diff --git a/test/test_utils.py b/test/test_utils.py index 835774a912..b3de14198e 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -21,7 +21,6 @@ import xml.etree.ElementTree from yt_dlp.compat import ( compat_etree_fromstring, compat_HTMLParseError, - compat_os_name, ) from yt_dlp.utils import ( Config, @@ -49,7 +48,6 @@ from yt_dlp.utils import ( dfxp2srt, encode_base_n, encode_compat_str, - encodeFilename, expand_path, extract_attributes, extract_basic_auth, @@ -69,7 +67,6 @@ from yt_dlp.utils import ( get_elements_html_by_class, get_elements_text_and_html_by_attribute, int_or_none, - intlist_to_bytes, iri_to_uri, is_html, js_to_json, @@ -566,10 +563,10 @@ class TestUtil(unittest.TestCase): self.assertEqual(res_data, {'a': 'b', 'c': 'd'}) def test_shell_quote(self): - args = ['ffmpeg', '-i', encodeFilename('ñ€ß\'.mp4')] + args = ['ffmpeg', '-i', 'ñ€ß\'.mp4'] self.assertEqual( shell_quote(args), - """ffmpeg -i 'ñ€ß'"'"'.mp4'""" if compat_os_name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''') + """ffmpeg -i 'ñ€ß'"'"'.mp4'""" if os.name != 'nt' else '''ffmpeg -i "ñ€ß'.mp4"''') def test_float_or_none(self): self.assertEqual(float_or_none('42.42'), 42.42) @@ -1309,15 +1306,10 @@ class TestUtil(unittest.TestCase): self.assertEqual(clean_html('a:\n "b"'), 'a: "b"') self.assertEqual(clean_html('a
\xa0b'), 'a\nb') - def test_intlist_to_bytes(self): - self.assertEqual( - intlist_to_bytes([0, 1, 127, 128, 255]), - b'\x00\x01\x7f\x80\xff') - def test_args_to_str(self): self.assertEqual( args_to_str(['foo', 'ba/r', '-baz', '2 be', '']), - 'foo ba/r -baz \'2 be\' \'\'' if compat_os_name != 'nt' else 'foo ba/r -baz "2 be" ""', + 'foo ba/r -baz \'2 be\' \'\'' if os.name != 'nt' else 'foo ba/r -baz "2 be" ""', ) def test_parse_filesize(self): @@ -2117,7 +2109,7 @@ Line 1 assert extract_basic_auth('http://user:@foo.bar') == ('http://foo.bar', 'Basic dXNlcjo=') assert extract_basic_auth('http://user:pass@foo.bar') == ('http://foo.bar', 'Basic dXNlcjpwYXNz') - @unittest.skipUnless(compat_os_name == 'nt', 'Only relevant on Windows') + @unittest.skipUnless(os.name == 'nt', 'Only relevant on Windows') def test_windows_escaping(self): tests = [ 'test"&', diff --git a/yt_dlp/YoutubeDL.py b/yt_dlp/YoutubeDL.py index 3130deda31..a9a8e4133e 100644 --- a/yt_dlp/YoutubeDL.py +++ b/yt_dlp/YoutubeDL.py @@ -26,7 +26,7 @@ import unicodedata from .cache import Cache from .compat import urllib # isort: split -from .compat import compat_os_name, urllib_req_to_req +from .compat import urllib_req_to_req from .cookies import CookieLoadError, LenientSimpleCookie, load_cookies from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name from .downloader.rtmp import rtmpdump_version @@ -109,7 +109,6 @@ from .utils import ( determine_ext, determine_protocol, encode_compat_str, - encodeFilename, escapeHTML, expand_path, extract_basic_auth, @@ -167,7 +166,7 @@ from .utils.networking import ( ) from .version import CHANNEL, ORIGIN, RELEASE_GIT_HEAD, VARIANT, __version__ -if compat_os_name == 'nt': +if os.name == 'nt': import ctypes @@ -643,7 +642,7 @@ class YoutubeDL: out=stdout, error=sys.stderr, screen=sys.stderr if self.params.get('quiet') else stdout, - console=None if compat_os_name == 'nt' else next( + console=None if os.name == 'nt' else next( filter(supports_terminal_sequences, (sys.stderr, sys.stdout)), None), ) @@ -952,7 +951,7 @@ class YoutubeDL: self._write_string(f'{self._bidi_workaround(message)}\n', self._out_files.error, only_once=only_once) def _send_console_code(self, code): - if compat_os_name == 'nt' or not self._out_files.console: + if os.name == 'nt' or not self._out_files.console: return self._write_string(code, self._out_files.console) @@ -960,7 +959,7 @@ class YoutubeDL: if not self.params.get('consoletitle', False): return message = remove_terminal_sequences(message) - if compat_os_name == 'nt': + if os.name == 'nt': if ctypes.windll.kernel32.GetConsoleWindow(): # c_wchar_p() might not be necessary if `message` is # already of type unicode() @@ -1948,6 +1947,7 @@ class YoutubeDL: 'playlist_uploader_id': ie_result.get('uploader_id'), 'playlist_channel': ie_result.get('channel'), 'playlist_channel_id': ie_result.get('channel_id'), + 'playlist_webpage_url': ie_result.get('webpage_url'), **kwargs, } if strict: @@ -3255,9 +3255,9 @@ class YoutubeDL: if full_filename is None: return - if not self._ensure_dir_exists(encodeFilename(full_filename)): + if not self._ensure_dir_exists(full_filename): return - if not self._ensure_dir_exists(encodeFilename(temp_filename)): + if not self._ensure_dir_exists(temp_filename): return if self._write_description('video', info_dict, @@ -3289,16 +3289,16 @@ class YoutubeDL: if self.params.get('writeannotations', False): annofn = self.prepare_filename(info_dict, 'annotation') if annofn: - if not self._ensure_dir_exists(encodeFilename(annofn)): + if not self._ensure_dir_exists(annofn): return - if not self.params.get('overwrites', True) and os.path.exists(encodeFilename(annofn)): + if not self.params.get('overwrites', True) and os.path.exists(annofn): self.to_screen('[info] Video annotations are already present') elif not info_dict.get('annotations'): self.report_warning('There are no annotations to write.') else: try: self.to_screen('[info] Writing video annotations to: ' + annofn) - with open(encodeFilename(annofn), 'w', encoding='utf-8') as annofile: + with open(annofn, 'w', encoding='utf-8') as annofile: annofile.write(info_dict['annotations']) except (KeyError, TypeError): self.report_warning('There are no annotations to write.') @@ -3314,14 +3314,14 @@ class YoutubeDL: f'Cannot write internet shortcut file because the actual URL of "{info_dict["webpage_url"]}" is unknown') return True linkfn = replace_extension(self.prepare_filename(info_dict, 'link'), link_type, info_dict.get('ext')) - if not self._ensure_dir_exists(encodeFilename(linkfn)): + if not self._ensure_dir_exists(linkfn): return False - if self.params.get('overwrites', True) and os.path.exists(encodeFilename(linkfn)): + if self.params.get('overwrites', True) and os.path.exists(linkfn): self.to_screen(f'[info] Internet shortcut (.{link_type}) is already present') return True try: self.to_screen(f'[info] Writing internet shortcut (.{link_type}) to: {linkfn}') - with open(encodeFilename(to_high_limit_path(linkfn)), 'w', encoding='utf-8', + with open(to_high_limit_path(linkfn), 'w', encoding='utf-8', newline='\r\n' if link_type == 'url' else '\n') as linkfile: template_vars = {'url': url} if link_type == 'desktop': @@ -3352,7 +3352,7 @@ class YoutubeDL: if self.params.get('skip_download'): info_dict['filepath'] = temp_filename - info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename))) + info_dict['__finaldir'] = os.path.dirname(os.path.abspath(full_filename)) info_dict['__files_to_move'] = files_to_move replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)) info_dict['__write_download_archive'] = self.params.get('force_write_download_archive') @@ -3482,7 +3482,7 @@ class YoutubeDL: self.report_file_already_downloaded(dl_filename) dl_filename = dl_filename or temp_filename - info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename))) + info_dict['__finaldir'] = os.path.dirname(os.path.abspath(full_filename)) except network_exceptions as err: self.report_error(f'unable to download video data: {err}') @@ -4297,7 +4297,7 @@ class YoutubeDL: else: try: self.to_screen(f'[info] Writing {label} description to: {descfn}') - with open(encodeFilename(descfn), 'w', encoding='utf-8') as descfile: + with open(descfn, 'w', encoding='utf-8') as descfile: descfile.write(ie_result['description']) except OSError: self.report_error(f'Cannot write {label} description file {descfn}') @@ -4399,7 +4399,7 @@ class YoutubeDL: try: uf = self.urlopen(Request(t['url'], headers=t.get('http_headers', {}))) self.to_screen(f'[info] Writing {thumb_display_id} to: {thumb_filename}') - with open(encodeFilename(thumb_filename), 'wb') as thumbf: + with open(thumb_filename, 'wb') as thumbf: shutil.copyfileobj(uf, thumbf) ret.append((thumb_filename, thumb_filename_final)) t['filepath'] = thumb_filename diff --git a/yt_dlp/__init__.py b/yt_dlp/__init__.py index a7665159bd..a1880bf7dc 100644 --- a/yt_dlp/__init__.py +++ b/yt_dlp/__init__.py @@ -14,7 +14,6 @@ import os import re import traceback -from .compat import compat_os_name from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS, CookieLoadError from .downloader.external import get_external_downloader from .extractor import list_extractor_classes @@ -44,7 +43,6 @@ from .utils import ( GeoUtils, PlaylistEntries, SameFileError, - decodeOption, download_range_func, expand_path, float_or_none, @@ -883,8 +881,8 @@ def parse_options(argv=None): 'listsubtitles': opts.listsubtitles, 'subtitlesformat': opts.subtitlesformat, 'subtitleslangs': opts.subtitleslangs, - 'matchtitle': decodeOption(opts.matchtitle), - 'rejecttitle': decodeOption(opts.rejecttitle), + 'matchtitle': opts.matchtitle, + 'rejecttitle': opts.rejecttitle, 'max_downloads': opts.max_downloads, 'prefer_free_formats': opts.prefer_free_formats, 'trim_file_name': opts.trim_file_name, @@ -1053,7 +1051,7 @@ def _real_main(argv=None): ydl.warn_if_short_id(args) # Show a useful error message and wait for keypress if not launched from shell on Windows - if not args and compat_os_name == 'nt' and getattr(sys, 'frozen', False): + if not args and os.name == 'nt' and getattr(sys, 'frozen', False): import ctypes.wintypes import msvcrt diff --git a/yt_dlp/aes.py b/yt_dlp/aes.py index be67b40fe2..0930d36df9 100644 --- a/yt_dlp/aes.py +++ b/yt_dlp/aes.py @@ -3,7 +3,6 @@ from math import ceil from .compat import compat_ord from .dependencies import Cryptodome -from .utils import bytes_to_intlist, intlist_to_bytes if Cryptodome.AES: def aes_cbc_decrypt_bytes(data, key, iv): @@ -17,15 +16,15 @@ if Cryptodome.AES: else: def aes_cbc_decrypt_bytes(data, key, iv): """ Decrypt bytes with AES-CBC using native implementation since pycryptodome is unavailable """ - return intlist_to_bytes(aes_cbc_decrypt(*map(bytes_to_intlist, (data, key, iv)))) + return bytes(aes_cbc_decrypt(*map(list, (data, key, iv)))) def aes_gcm_decrypt_and_verify_bytes(data, key, tag, nonce): """ Decrypt bytes with AES-GCM using native implementation since pycryptodome is unavailable """ - return intlist_to_bytes(aes_gcm_decrypt_and_verify(*map(bytes_to_intlist, (data, key, tag, nonce)))) + return bytes(aes_gcm_decrypt_and_verify(*map(list, (data, key, tag, nonce)))) def aes_cbc_encrypt_bytes(data, key, iv, **kwargs): - return intlist_to_bytes(aes_cbc_encrypt(*map(bytes_to_intlist, (data, key, iv)), **kwargs)) + return bytes(aes_cbc_encrypt(*map(list, (data, key, iv)), **kwargs)) BLOCK_SIZE_BYTES = 16 @@ -221,7 +220,7 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce): j0 = [*nonce, 0, 0, 0, 1] else: fill = (BLOCK_SIZE_BYTES - (len(nonce) % BLOCK_SIZE_BYTES)) % BLOCK_SIZE_BYTES + 8 - ghash_in = nonce + [0] * fill + bytes_to_intlist((8 * len(nonce)).to_bytes(8, 'big')) + ghash_in = nonce + [0] * fill + list((8 * len(nonce)).to_bytes(8, 'big')) j0 = ghash(hash_subkey, ghash_in) # TODO: add nonce support to aes_ctr_decrypt @@ -234,9 +233,9 @@ def aes_gcm_decrypt_and_verify(data, key, tag, nonce): s_tag = ghash( hash_subkey, data - + [0] * pad_len # pad - + bytes_to_intlist((0 * 8).to_bytes(8, 'big') # length of associated data - + ((len(data) * 8).to_bytes(8, 'big'))), # length of data + + [0] * pad_len # pad + + list((0 * 8).to_bytes(8, 'big') # length of associated data + + ((len(data) * 8).to_bytes(8, 'big'))), # length of data ) if tag != aes_ctr_encrypt(s_tag, key, j0): @@ -300,8 +299,8 @@ def aes_decrypt_text(data, password, key_size_bytes): """ NONCE_LENGTH_BYTES = 8 - data = bytes_to_intlist(base64.b64decode(data)) - password = bytes_to_intlist(password.encode()) + data = list(base64.b64decode(data)) + password = list(password.encode()) key = password[:key_size_bytes] + [0] * (key_size_bytes - len(password)) key = aes_encrypt(key[:BLOCK_SIZE_BYTES], key_expansion(key)) * (key_size_bytes // BLOCK_SIZE_BYTES) @@ -310,7 +309,7 @@ def aes_decrypt_text(data, password, key_size_bytes): cipher = data[NONCE_LENGTH_BYTES:] decrypted_data = aes_ctr_decrypt(cipher, key, nonce + [0] * (BLOCK_SIZE_BYTES - NONCE_LENGTH_BYTES)) - return intlist_to_bytes(decrypted_data) + return bytes(decrypted_data) RCON = (0x8d, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40, 0x80, 0x1b, 0x36) diff --git a/yt_dlp/compat/__init__.py b/yt_dlp/compat/__init__.py index d820adaf1e..d779620688 100644 --- a/yt_dlp/compat/__init__.py +++ b/yt_dlp/compat/__init__.py @@ -1,5 +1,4 @@ import os -import sys import xml.etree.ElementTree as etree from .compat_utils import passthrough_module @@ -24,33 +23,14 @@ def compat_etree_fromstring(text): return etree.XML(text, parser=etree.XMLParser(target=_TreeBuilder())) -compat_os_name = os._name if os.name == 'java' else os.name - - -def compat_shlex_quote(s): - from ..utils import shell_quote - return shell_quote(s) - - def compat_ord(c): return c if isinstance(c, int) else ord(c) -if compat_os_name == 'nt' and sys.version_info < (3, 8): - # os.path.realpath on Windows does not follow symbolic links - # prior to Python 3.8 (see https://bugs.python.org/issue9949) - def compat_realpath(path): - while os.path.islink(path): - path = os.path.abspath(os.readlink(path)) - return os.path.realpath(path) -else: - compat_realpath = os.path.realpath - - # Python 3.8+ does not honor %HOME% on windows, but this breaks compatibility with youtube-dl # See https://github.com/yt-dlp/yt-dlp/issues/792 # https://docs.python.org/3/library/os.path.html#os.path.expanduser -if compat_os_name in ('nt', 'ce'): +if os.name in ('nt', 'ce'): def compat_expanduser(path): HOME = os.environ.get('HOME') if not HOME: diff --git a/yt_dlp/compat/_deprecated.py b/yt_dlp/compat/_deprecated.py index 607bae9999..445acc1a06 100644 --- a/yt_dlp/compat/_deprecated.py +++ b/yt_dlp/compat/_deprecated.py @@ -8,16 +8,14 @@ passthrough_module(__name__, '.._legacy', callback=lambda attr: warnings.warn( DeprecationWarning(f'{__name__}.{attr} is deprecated'), stacklevel=6)) del passthrough_module -import base64 -import urllib.error -import urllib.parse +import functools # noqa: F401 +import os -compat_str = str -compat_b64decode = base64.b64decode +compat_os_name = os.name +compat_realpath = os.path.realpath -compat_urlparse = urllib.parse -compat_parse_qs = urllib.parse.parse_qs -compat_urllib_parse_unquote = urllib.parse.unquote -compat_urllib_parse_urlencode = urllib.parse.urlencode -compat_urllib_parse_urlparse = urllib.parse.urlparse + +def compat_shlex_quote(s): + from ..utils import shell_quote + return shell_quote(s) diff --git a/yt_dlp/compat/_legacy.py b/yt_dlp/compat/_legacy.py index dfc792eae4..dae2c14592 100644 --- a/yt_dlp/compat/_legacy.py +++ b/yt_dlp/compat/_legacy.py @@ -30,7 +30,7 @@ from asyncio import run as compat_asyncio_run # noqa: F401 from re import Pattern as compat_Pattern # noqa: F401 from re import match as compat_Match # noqa: F401 -from . import compat_expanduser, compat_HTMLParseError, compat_realpath +from . import compat_expanduser, compat_HTMLParseError from .compat_utils import passthrough_module from ..dependencies import brotli as compat_brotli # noqa: F401 from ..dependencies import websockets as compat_websockets # noqa: F401 @@ -78,7 +78,7 @@ compat_kwargs = lambda kwargs: kwargs compat_map = map compat_numeric_types = (int, float, complex) compat_os_path_expanduser = compat_expanduser -compat_os_path_realpath = compat_realpath +compat_os_path_realpath = os.path.realpath compat_print = print compat_shlex_split = shlex.split compat_socket_create_connection = socket.create_connection @@ -104,5 +104,12 @@ compat_xml_parse_error = compat_xml_etree_ElementTree_ParseError = etree.ParseEr compat_xpath = lambda xpath: xpath compat_zip = zip workaround_optparse_bug9161 = lambda: None +compat_str = str +compat_b64decode = base64.b64decode +compat_urlparse = urllib.parse +compat_parse_qs = urllib.parse.parse_qs +compat_urllib_parse_unquote = urllib.parse.unquote +compat_urllib_parse_urlencode = urllib.parse.urlencode +compat_urllib_parse_urlparse = urllib.parse.urlparse legacy = [] diff --git a/yt_dlp/compat/functools.py b/yt_dlp/compat/functools.py deleted file mode 100644 index c2e9e90279..0000000000 --- a/yt_dlp/compat/functools.py +++ /dev/null @@ -1,7 +0,0 @@ -# flake8: noqa: F405 -from functools import * # noqa: F403 - -from .compat_utils import passthrough_module - -passthrough_module(__name__, 'functools') -del passthrough_module diff --git a/yt_dlp/compat/urllib/request.py b/yt_dlp/compat/urllib/request.py index ad9fa83c87..dfc7f4a2dc 100644 --- a/yt_dlp/compat/urllib/request.py +++ b/yt_dlp/compat/urllib/request.py @@ -7,9 +7,9 @@ passthrough_module(__name__, 'urllib.request') del passthrough_module -from .. import compat_os_name +import os -if compat_os_name == 'nt': +if os.name == 'nt': # On older Python versions, proxies are extracted from Windows registry erroneously. [1] # If the https proxy in the registry does not have a scheme, urllib will incorrectly add https:// to it. [2] # It is unlikely that the user has actually set it to be https, so we should be fine to safely downgrade @@ -37,4 +37,4 @@ if compat_os_name == 'nt': def getproxies(): return getproxies_environment() or getproxies_registry_patched() -del compat_os_name +del os diff --git a/yt_dlp/cookies.py b/yt_dlp/cookies.py index e673498244..d5b0d3991b 100644 --- a/yt_dlp/cookies.py +++ b/yt_dlp/cookies.py @@ -25,7 +25,6 @@ from .aes import ( aes_gcm_decrypt_and_verify_bytes, unpad_pkcs7, ) -from .compat import compat_os_name from .dependencies import ( _SECRETSTORAGE_UNAVAILABLE_REASON, secretstorage, @@ -343,7 +342,7 @@ def _extract_chrome_cookies(browser_name, profile, keyring, logger): logger.debug(f'cookie version breakdown: {counts}') return jar except PermissionError as error: - if compat_os_name == 'nt' and error.errno == 13: + if os.name == 'nt' and error.errno == 13: message = 'Could not copy Chrome cookie database. See https://github.com/yt-dlp/yt-dlp/issues/7271 for more info' logger.error(message) raise DownloadError(message) # force exit diff --git a/yt_dlp/downloader/common.py b/yt_dlp/downloader/common.py index 2e3ea2fc4e..e8dcb37cc3 100644 --- a/yt_dlp/downloader/common.py +++ b/yt_dlp/downloader/common.py @@ -20,9 +20,7 @@ from ..utils import ( Namespace, RetryManager, classproperty, - decodeArgument, deprecation_warning, - encodeFilename, format_bytes, join_nonempty, parse_bytes, @@ -219,7 +217,7 @@ class FileDownloader: def temp_name(self, filename): """Returns a temporary filename for the given filename.""" if self.params.get('nopart', False) or filename == '-' or \ - (os.path.exists(encodeFilename(filename)) and not os.path.isfile(encodeFilename(filename))): + (os.path.exists(filename) and not os.path.isfile(filename)): return filename return filename + '.part' @@ -273,7 +271,7 @@ class FileDownloader: """Try to set the last-modified time of the given file.""" if last_modified_hdr is None: return - if not os.path.isfile(encodeFilename(filename)): + if not os.path.isfile(filename): return timestr = last_modified_hdr if timestr is None: @@ -432,13 +430,13 @@ class FileDownloader: """ nooverwrites_and_exists = ( not self.params.get('overwrites', True) - and os.path.exists(encodeFilename(filename)) + and os.path.exists(filename) ) if not hasattr(filename, 'write'): continuedl_and_exists = ( self.params.get('continuedl', True) - and os.path.isfile(encodeFilename(filename)) + and os.path.isfile(filename) and not self.params.get('nopart', False) ) @@ -448,7 +446,7 @@ class FileDownloader: self._hook_progress({ 'filename': filename, 'status': 'finished', - 'total_bytes': os.path.getsize(encodeFilename(filename)), + 'total_bytes': os.path.getsize(filename), }, info_dict) self._finish_multiline_status() return True, False @@ -489,9 +487,7 @@ class FileDownloader: if not self.params.get('verbose', False): return - str_args = [decodeArgument(a) for a in args] - if exe is None: - exe = os.path.basename(str_args[0]) + exe = os.path.basename(args[0]) - self.write_debug(f'{exe} command line: {shell_quote(str_args)}') + self.write_debug(f'{exe} command line: {shell_quote(args)}') diff --git a/yt_dlp/downloader/external.py b/yt_dlp/downloader/external.py index 6c1ec403c8..7f6b5b45cc 100644 --- a/yt_dlp/downloader/external.py +++ b/yt_dlp/downloader/external.py @@ -23,7 +23,6 @@ from ..utils import ( cli_valueless_option, determine_ext, encodeArgument, - encodeFilename, find_available_port, remove_end, traverse_obj, @@ -67,7 +66,7 @@ class ExternalFD(FragmentFD): 'elapsed': time.time() - started, } if filename != '-': - fsize = os.path.getsize(encodeFilename(tmpfilename)) + fsize = os.path.getsize(tmpfilename) self.try_rename(tmpfilename, filename) status.update({ 'downloaded_bytes': fsize, @@ -184,9 +183,9 @@ class ExternalFD(FragmentFD): dest.write(decrypt_fragment(fragment, src.read())) src.close() if not self.params.get('keep_fragments', False): - self.try_remove(encodeFilename(fragment_filename)) + self.try_remove(fragment_filename) dest.close() - self.try_remove(encodeFilename(f'{tmpfilename}.frag.urls')) + self.try_remove(f'{tmpfilename}.frag.urls') return 0 def _call_process(self, cmd, info_dict): @@ -620,7 +619,7 @@ class FFmpegFD(ExternalFD): args += self._configuration_args(('_o1', '_o', '')) args = [encodeArgument(opt) for opt in args] - args.append(encodeFilename(ffpp._ffmpeg_filename_argument(tmpfilename), True)) + args.append(ffpp._ffmpeg_filename_argument(tmpfilename)) self._debug_cmd(args) piped = any(fmt['url'] in ('-', 'pipe:') for fmt in selected_formats) diff --git a/yt_dlp/downloader/fragment.py b/yt_dlp/downloader/fragment.py index 0d00196e2e..98784e7039 100644 --- a/yt_dlp/downloader/fragment.py +++ b/yt_dlp/downloader/fragment.py @@ -9,10 +9,9 @@ import time from .common import FileDownloader from .http import HttpFD from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 -from ..compat import compat_os_name from ..networking import Request from ..networking.exceptions import HTTPError, IncompleteRead -from ..utils import DownloadError, RetryManager, encodeFilename, traverse_obj +from ..utils import DownloadError, RetryManager, traverse_obj from ..utils.networking import HTTPHeaderDict from ..utils.progress import ProgressCalculator @@ -152,7 +151,7 @@ class FragmentFD(FileDownloader): if self.__do_ytdl_file(ctx): self._write_ytdl_file(ctx) if not self.params.get('keep_fragments', False): - self.try_remove(encodeFilename(ctx['fragment_filename_sanitized'])) + self.try_remove(ctx['fragment_filename_sanitized']) del ctx['fragment_filename_sanitized'] def _prepare_frag_download(self, ctx): @@ -188,7 +187,7 @@ class FragmentFD(FileDownloader): }) if self.__do_ytdl_file(ctx): - ytdl_file_exists = os.path.isfile(encodeFilename(self.ytdl_filename(ctx['filename']))) + ytdl_file_exists = os.path.isfile(self.ytdl_filename(ctx['filename'])) continuedl = self.params.get('continuedl', True) if continuedl and ytdl_file_exists: self._read_ytdl_file(ctx) @@ -390,7 +389,7 @@ class FragmentFD(FileDownloader): def __exit__(self, exc_type, exc_val, exc_tb): pass - if compat_os_name == 'nt': + if os.name == 'nt': def future_result(future): while True: try: diff --git a/yt_dlp/downloader/http.py b/yt_dlp/downloader/http.py index c0165790d1..9c6dd8b799 100644 --- a/yt_dlp/downloader/http.py +++ b/yt_dlp/downloader/http.py @@ -15,7 +15,6 @@ from ..utils import ( ThrottledDownload, XAttrMetadataError, XAttrUnavailableError, - encodeFilename, int_or_none, parse_http_range, try_call, @@ -58,9 +57,8 @@ class HttpFD(FileDownloader): if self.params.get('continuedl', True): # Establish possible resume length - if os.path.isfile(encodeFilename(ctx.tmpfilename)): - ctx.resume_len = os.path.getsize( - encodeFilename(ctx.tmpfilename)) + if os.path.isfile(ctx.tmpfilename): + ctx.resume_len = os.path.getsize(ctx.tmpfilename) ctx.is_resume = ctx.resume_len > 0 @@ -241,7 +239,7 @@ class HttpFD(FileDownloader): ctx.resume_len = byte_counter else: try: - ctx.resume_len = os.path.getsize(encodeFilename(ctx.tmpfilename)) + ctx.resume_len = os.path.getsize(ctx.tmpfilename) except FileNotFoundError: ctx.resume_len = 0 raise RetryDownload(e) diff --git a/yt_dlp/downloader/rtmp.py b/yt_dlp/downloader/rtmp.py index d7ffb3b34d..1b831e5f30 100644 --- a/yt_dlp/downloader/rtmp.py +++ b/yt_dlp/downloader/rtmp.py @@ -8,7 +8,6 @@ from ..utils import ( Popen, check_executable, encodeArgument, - encodeFilename, get_exe_version, ) @@ -179,7 +178,7 @@ class RtmpFD(FileDownloader): return False while retval in (RD_INCOMPLETE, RD_FAILED) and not test and not live: - prevsize = os.path.getsize(encodeFilename(tmpfilename)) + prevsize = os.path.getsize(tmpfilename) self.to_screen(f'[rtmpdump] Downloaded {prevsize} bytes') time.sleep(5.0) # This seems to be needed args = [*basic_args, '--resume'] @@ -187,7 +186,7 @@ class RtmpFD(FileDownloader): args += ['--skip', '1'] args = [encodeArgument(a) for a in args] retval = run_rtmpdump(args) - cursize = os.path.getsize(encodeFilename(tmpfilename)) + cursize = os.path.getsize(tmpfilename) if prevsize == cursize and retval == RD_FAILED: break # Some rtmp streams seem abort after ~ 99.8%. Don't complain for those @@ -196,7 +195,7 @@ class RtmpFD(FileDownloader): retval = RD_SUCCESS break if retval == RD_SUCCESS or (test and retval == RD_INCOMPLETE): - fsize = os.path.getsize(encodeFilename(tmpfilename)) + fsize = os.path.getsize(tmpfilename) self.to_screen(f'[rtmpdump] Downloaded {fsize} bytes') self.try_rename(tmpfilename, filename) self._hook_progress({ diff --git a/yt_dlp/downloader/rtsp.py b/yt_dlp/downloader/rtsp.py index e89269fed9..b4b0be7e6e 100644 --- a/yt_dlp/downloader/rtsp.py +++ b/yt_dlp/downloader/rtsp.py @@ -2,7 +2,7 @@ import os import subprocess from .common import FileDownloader -from ..utils import check_executable, encodeFilename +from ..utils import check_executable class RtspFD(FileDownloader): @@ -26,7 +26,7 @@ class RtspFD(FileDownloader): retval = subprocess.call(args) if retval == 0: - fsize = os.path.getsize(encodeFilename(tmpfilename)) + fsize = os.path.getsize(tmpfilename) self.to_screen(f'\r[{args[0]}] {fsize} bytes') self.try_rename(tmpfilename, filename) self._hook_progress({ diff --git a/yt_dlp/extractor/_extractors.py b/yt_dlp/extractor/_extractors.py index 51caefd4d7..967010826e 100644 --- a/yt_dlp/extractor/_extractors.py +++ b/yt_dlp/extractor/_extractors.py @@ -208,6 +208,10 @@ from .bandcamp import ( BandcampUserIE, BandcampWeeklyIE, ) +from .bandlab import ( + BandlabIE, + BandlabPlaylistIE, +) from .bannedvideo import BannedVideoIE from .bbc import ( BBCIE, @@ -942,6 +946,10 @@ from .kaltura import KalturaIE from .kankanews import KankaNewsIE from .karaoketv import KaraoketvIE from .kelbyone import KelbyOneIE +from .kenh14 import ( + Kenh14PlaylistIE, + Kenh14VideoIE, +) from .khanacademy import ( KhanAcademyIE, KhanAcademyUnitIE, @@ -1131,12 +1139,6 @@ from .microsoftembed import ( MicrosoftMediusIE, ) from .microsoftstream import MicrosoftStreamIE -from .mildom import ( - MildomClipIE, - MildomIE, - MildomUserVodIE, - MildomVodIE, -) from .minds import ( MindsChannelIE, MindsGroupIE, @@ -1518,8 +1520,8 @@ from .pgatour import PGATourIE from .philharmoniedeparis import PhilharmonieDeParisIE from .phoenix import PhoenixIE from .photobucket import PhotobucketIE +from .pialive import PiaLiveIE from .piapro import PiaproIE -from .piaulizaportal import PIAULIZAPortalIE from .picarto import ( PicartoIE, PicartoVodIE, @@ -1555,10 +1557,6 @@ from .podbayfm import ( ) from .podchaser import PodchaserIE from .podomatic import PodomaticIE -from .pokemon import ( - PokemonIE, - PokemonWatchIE, -) from .pokergo import ( PokerGoCollectionIE, PokerGoIE, @@ -1649,6 +1647,7 @@ from .radiokapital import ( RadioKapitalIE, RadioKapitalShowIE, ) +from .radioradicale import RadioRadicaleIE from .radiozet import RadioZetPodcastIE from .radlive import ( RadLiveChannelIE, @@ -2251,6 +2250,10 @@ from .ufctv import ( ) from .ukcolumn import UkColumnIE from .uktvplay import UKTVPlayIE +from .uliza import ( + UlizaPlayerIE, + UlizaPortalIE, +) from .umg import UMGDeIE from .unistra import UnistraIE from .unity import UnityIE @@ -2279,10 +2282,6 @@ from .utreon import UtreonIE from .varzesh3 import Varzesh3IE from .vbox7 import Vbox7IE from .veo import VeoIE -from .veoh import ( - VeohIE, - VeohUserIE, -) from .vesti import VestiIE from .vevo import ( VevoIE, diff --git a/yt_dlp/extractor/abematv.py b/yt_dlp/extractor/abematv.py index 66ab083fe0..b1343eed39 100644 --- a/yt_dlp/extractor/abematv.py +++ b/yt_dlp/extractor/abematv.py @@ -6,7 +6,6 @@ import hmac import io import json import re -import struct import time import urllib.parse import uuid @@ -18,10 +17,8 @@ from ..networking.exceptions import TransportError from ..utils import ( ExtractorError, OnDemandPagedList, - bytes_to_intlist, decode_base_n, int_or_none, - intlist_to_bytes, time_seconds, traverse_obj, update_url_query, @@ -72,15 +69,15 @@ class AbemaLicenseRH(RequestHandler): }) res = decode_base_n(license_response['k'], table=self._STRTABLE) - encvideokey = bytes_to_intlist(struct.pack('>QQ', res >> 64, res & 0xffffffffffffffff)) + encvideokey = list(res.to_bytes(16, 'big')) h = hmac.new( binascii.unhexlify(self._HKEY), (license_response['cid'] + self.ie._DEVICE_ID).encode(), digestmod=hashlib.sha256) - enckey = bytes_to_intlist(h.digest()) + enckey = list(h.digest()) - return intlist_to_bytes(aes_ecb_decrypt(encvideokey, enckey)) + return bytes(aes_ecb_decrypt(encvideokey, enckey)) class AbemaTVBaseIE(InfoExtractor): diff --git a/yt_dlp/extractor/adn.py b/yt_dlp/extractor/adn.py index c8a2613754..919e1d6af5 100644 --- a/yt_dlp/extractor/adn.py +++ b/yt_dlp/extractor/adn.py @@ -11,11 +11,9 @@ from ..networking.exceptions import HTTPError from ..utils import ( ExtractorError, ass_subtitles_timecode, - bytes_to_intlist, bytes_to_long, float_or_none, int_or_none, - intlist_to_bytes, join_nonempty, long_to_bytes, parse_iso8601, @@ -198,16 +196,16 @@ Format: Marked,Start,End,Style,Name,MarginL,MarginR,MarginV,Effect,Text''' links_url = try_get(options, lambda x: x['video']['url']) or (video_base_url + 'link') self._K = ''.join(random.choices('0123456789abcdef', k=16)) - message = bytes_to_intlist(json.dumps({ + message = list(json.dumps({ 'k': self._K, 't': token, - })) + }).encode()) # Sometimes authentication fails for no good reason, retry with # a different random padding links_data = None for _ in range(3): - padded_message = intlist_to_bytes(pkcs1pad(message, 128)) + padded_message = bytes(pkcs1pad(message, 128)) n, e = self._RSA_KEY encrypted_message = long_to_bytes(pow(bytes_to_long(padded_message), e, n)) authorization = base64.b64encode(encrypted_message).decode() diff --git a/yt_dlp/extractor/anvato.py b/yt_dlp/extractor/anvato.py index ba1d7df372..bd3b19b133 100644 --- a/yt_dlp/extractor/anvato.py +++ b/yt_dlp/extractor/anvato.py @@ -8,10 +8,8 @@ import time from .common import InfoExtractor from ..aes import aes_encrypt from ..utils import ( - bytes_to_intlist, determine_ext, int_or_none, - intlist_to_bytes, join_nonempty, smuggle_url, strip_jsonp, @@ -234,8 +232,8 @@ class AnvatoIE(InfoExtractor): server_time = self._server_time(access_key, video_id) input_data = f'{server_time}~{md5_text(video_data_url)}~{md5_text(server_time)}' - auth_secret = intlist_to_bytes(aes_encrypt( - bytes_to_intlist(input_data[:64]), bytes_to_intlist(self._AUTH_KEY))) + auth_secret = bytes(aes_encrypt( + list(input_data[:64].encode()), list(self._AUTH_KEY))) query = { 'X-Anvato-Adst-Auth': base64.b64encode(auth_secret).decode('ascii'), 'rtyp': 'fp', diff --git a/yt_dlp/extractor/bandlab.py b/yt_dlp/extractor/bandlab.py new file mode 100644 index 0000000000..64aa2ba70d --- /dev/null +++ b/yt_dlp/extractor/bandlab.py @@ -0,0 +1,437 @@ +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + float_or_none, + format_field, + int_or_none, + parse_iso8601, + parse_qs, + truncate_string, + url_or_none, +) +from ..utils.traversal import traverse_obj, value + + +class BandlabBaseIE(InfoExtractor): + def _call_api(self, endpoint, asset_id, **kwargs): + headers = kwargs.pop('headers', None) or {} + return self._download_json( + f'https://www.bandlab.com/api/v1.3/{endpoint}/{asset_id}', + asset_id, headers={ + 'accept': 'application/json', + 'referer': 'https://www.bandlab.com/', + 'x-client-id': 'BandLab-Web', + 'x-client-version': '10.1.124', + **headers, + }, **kwargs) + + def _parse_revision(self, revision_data, url=None): + return { + 'vcodec': 'none', + 'media_type': 'revision', + 'extractor_key': BandlabIE.ie_key(), + 'extractor': BandlabIE.IE_NAME, + **traverse_obj(revision_data, { + 'webpage_url': ( + 'id', ({value(url)}, {format_field(template='https://www.bandlab.com/revision/%s')}), filter, any), + 'id': (('revisionId', 'id'), {str}, any), + 'title': ('song', 'name', {str}), + 'track': ('song', 'name', {str}), + 'url': ('mixdown', 'file', {url_or_none}), + 'thumbnail': ('song', 'picture', 'url', {url_or_none}), + 'description': ('description', {str}), + 'uploader': ('creator', 'name', {str}), + 'uploader_id': ('creator', 'username', {str}), + 'timestamp': ('createdOn', {parse_iso8601}), + 'duration': ('mixdown', 'duration', {float_or_none}), + 'view_count': ('counters', 'plays', {int_or_none}), + 'like_count': ('counters', 'likes', {int_or_none}), + 'comment_count': ('counters', 'comments', {int_or_none}), + 'genres': ('genres', ..., 'name', {str}), + }), + } + + def _parse_track(self, track_data, url=None): + return { + 'vcodec': 'none', + 'media_type': 'track', + 'extractor_key': BandlabIE.ie_key(), + 'extractor': BandlabIE.IE_NAME, + **traverse_obj(track_data, { + 'webpage_url': ( + 'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any), + 'id': (('revisionId', 'id'), {str}, any), + 'url': ('track', 'sample', 'audioUrl', {url_or_none}), + 'title': ('track', 'name', {str}), + 'track': ('track', 'name', {str}), + 'description': ('caption', {str}), + 'thumbnail': ('track', 'picture', ('original', 'url'), {url_or_none}, any), + 'view_count': ('counters', 'plays', {int_or_none}), + 'like_count': ('counters', 'likes', {int_or_none}), + 'comment_count': ('counters', 'comments', {int_or_none}), + 'duration': ('track', 'sample', 'duration', {float_or_none}), + 'uploader': ('creator', 'name', {str}), + 'uploader_id': ('creator', 'username', {str}), + 'timestamp': ('createdOn', {parse_iso8601}), + }), + } + + def _parse_video(self, video_data, url=None): + return { + 'media_type': 'video', + 'extractor_key': BandlabIE.ie_key(), + 'extractor': BandlabIE.IE_NAME, + **traverse_obj(video_data, { + 'id': ('id', {str}), + 'webpage_url': ( + 'id', ({value(url)}, {format_field(template='https://www.bandlab.com/post/%s')}), filter, any), + 'url': ('video', 'url', {url_or_none}), + 'title': ('caption', {lambda x: x.replace('\n', ' ')}, {truncate_string(left=50)}), + 'description': ('caption', {str}), + 'thumbnail': ('video', 'picture', 'url', {url_or_none}), + 'view_count': ('video', 'counters', 'plays', {int_or_none}), + 'like_count': ('video', 'counters', 'likes', {int_or_none}), + 'comment_count': ('counters', 'comments', {int_or_none}), + 'duration': ('video', 'duration', {float_or_none}), + 'uploader': ('creator', 'name', {str}), + 'uploader_id': ('creator', 'username', {str}), + }), + } + + +class BandlabIE(BandlabBaseIE): + _VALID_URL = [ + r'https?://(?:www\.)?bandlab.com/(?Ptrack|post|revision)/(?P[\da-f_-]+)', + r'https?://(?:www\.)?bandlab.com/(?Pembed)/\?(?:[^#]*&)?id=(?P[\da-f-]+)', + ] + _EMBED_REGEX = [rf']+src=[\'"](?P{_VALID_URL[1]})[\'"]'] + _TESTS = [{ + 'url': 'https://www.bandlab.com/track/04b37e88dba24967b9dac8eb8567ff39_07d7f906fc96ee11b75e000d3a428fff', + 'md5': '46f7b43367dd268bbcf0bbe466753b2c', + 'info_dict': { + 'id': '02d7f906-fc96-ee11-b75e-000d3a428fff', + 'ext': 'm4a', + 'uploader_id': 'ender_milze', + 'track': 'sweet black', + 'description': 'composed by juanjn3737', + 'timestamp': 1702171963, + 'view_count': int, + 'like_count': int, + 'duration': 54.629999999999995, + 'title': 'sweet black', + 'upload_date': '20231210', + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/', + 'genres': ['Lofi'], + 'uploader': 'ender milze', + 'comment_count': int, + 'media_type': 'revision', + }, + }, { + # Same track as above but post URL + 'url': 'https://www.bandlab.com/post/07d7f906-fc96-ee11-b75e-000d3a428fff', + 'md5': '46f7b43367dd268bbcf0bbe466753b2c', + 'info_dict': { + 'id': '02d7f906-fc96-ee11-b75e-000d3a428fff', + 'ext': 'm4a', + 'uploader_id': 'ender_milze', + 'track': 'sweet black', + 'description': 'composed by juanjn3737', + 'timestamp': 1702171973, + 'view_count': int, + 'like_count': int, + 'duration': 54.629999999999995, + 'title': 'sweet black', + 'upload_date': '20231210', + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/fa082beb-b856-4730-9170-a57e4e32cc2c/', + 'genres': ['Lofi'], + 'uploader': 'ender milze', + 'comment_count': int, + 'media_type': 'revision', + }, + }, { + # SharedKey Example + 'url': 'https://www.bandlab.com/track/048916c2-c6da-ee11-85f9-6045bd2e11f9?sharedKey=0NNWX8qYAEmI38lWAzCNDA', + 'md5': '15174b57c44440e2a2008be9cae00250', + 'info_dict': { + 'id': '038916c2-c6da-ee11-85f9-6045bd2e11f9', + 'ext': 'm4a', + 'comment_count': int, + 'genres': ['Other'], + 'uploader_id': 'user8353034818103753', + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/51b18363-da23-4b9b-a29c-2933a3e561ca/', + 'timestamp': 1709625771, + 'track': 'PodcastMaerchen4b', + 'duration': 468.14, + 'view_count': int, + 'description': 'Podcast: Neues aus der Märchenwelt', + 'like_count': int, + 'upload_date': '20240305', + 'uploader': 'Erna Wageneder', + 'title': 'PodcastMaerchen4b', + 'media_type': 'revision', + }, + }, { + # Different Revision selected + 'url': 'https://www.bandlab.com/track/130343fc-148b-ea11-96d2-0003ffd1fc09?revId=110343fc-148b-ea11-96d2-0003ffd1fc09', + 'md5': '74e055ef9325d63f37088772fbfe4454', + 'info_dict': { + 'id': '110343fc-148b-ea11-96d2-0003ffd1fc09', + 'ext': 'm4a', + 'timestamp': 1588273294, + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/b612e533-e4f7-4542-9f50-3fcfd8dd822c/', + 'description': 'Final Revision.', + 'title': 'Replay ( Instrumental)', + 'uploader': 'David R Sparks', + 'uploader_id': 'davesnothome69', + 'view_count': int, + 'comment_count': int, + 'track': 'Replay ( Instrumental)', + 'genres': ['Rock'], + 'upload_date': '20200430', + 'like_count': int, + 'duration': 279.43, + 'media_type': 'revision', + }, + }, { + # Video + 'url': 'https://www.bandlab.com/post/5cdf9036-3857-ef11-991a-6045bd36e0d9', + 'md5': '8caa2ef28e86c1dacf167293cfdbeba9', + 'info_dict': { + 'id': '5cdf9036-3857-ef11-991a-6045bd36e0d9', + 'ext': 'mp4', + 'duration': 44.705, + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/videos/67c6cef1-cef6-40d3-831e-a55bc1dcb972/', + 'comment_count': int, + 'title': 'backing vocals', + 'uploader_id': 'marliashya', + 'uploader': 'auraa', + 'like_count': int, + 'description': 'backing vocals', + 'media_type': 'video', + }, + }, { + # Embed Example + 'url': 'https://www.bandlab.com/embed/?blur=false&id=014de0a4-7d82-ea11-a94c-0003ffd19c0f', + 'md5': 'a4ad05cb68c54faaed9b0a8453a8cf4a', + 'info_dict': { + 'id': '014de0a4-7d82-ea11-a94c-0003ffd19c0f', + 'ext': 'm4a', + 'comment_count': int, + 'genres': ['Electronic'], + 'uploader': 'Charlie Henson', + 'timestamp': 1587328674, + 'upload_date': '20200419', + 'view_count': int, + 'track': 'Positronic Meltdown', + 'duration': 318.55, + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/87165bc3-5439-496e-b1f7-a9f13b541ff2/', + 'description': 'Checkout my tracks at AOMX http://aomxsounds.com/', + 'uploader_id': 'microfreaks', + 'title': 'Positronic Meltdown', + 'like_count': int, + 'media_type': 'revision', + }, + }, { + # Track without revisions available + 'url': 'https://www.bandlab.com/track/55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5', + 'md5': 'f05d68a3769952c2d9257c473e14c15f', + 'info_dict': { + 'id': '55767ac51789ea11a94c0003ffd1fc09_2f007b0a37b94ec7a69bc25ae15108a5', + 'ext': 'm4a', + 'track': 'insame', + 'like_count': int, + 'duration': 84.03, + 'title': 'insame', + 'view_count': int, + 'comment_count': int, + 'uploader': 'Sorakime', + 'uploader_id': 'sorakime', + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/users/572a351a-0f3a-4c6a-ac39-1a5defdeeb1c/', + 'timestamp': 1691162128, + 'upload_date': '20230804', + 'media_type': 'track', + }, + }, { + 'url': 'https://www.bandlab.com/revision/014de0a4-7d82-ea11-a94c-0003ffd19c0f', + 'only_matching': True, + }] + _WEBPAGE_TESTS = [{ + 'url': 'https://phantomluigi.github.io/', + 'info_dict': { + 'id': 'e14223c3-7871-ef11-bdfd-000d3a980db3', + 'ext': 'm4a', + 'view_count': int, + 'upload_date': '20240913', + 'uploader_id': 'phantommusicofficial', + 'timestamp': 1726194897, + 'uploader': 'Phantom', + 'comment_count': int, + 'genres': ['Progresive Rock'], + 'description': 'md5:a38cd668f7a2843295ef284114f18429', + 'duration': 225.23, + 'like_count': int, + 'title': 'Vermilion Pt. 2 (Cover)', + 'track': 'Vermilion Pt. 2 (Cover)', + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/songs/62b10750-7aef-4f42-ad08-1af52f577e97/', + 'media_type': 'revision', + }, + }] + + def _real_extract(self, url): + display_id, url_type = self._match_valid_url(url).group('id', 'url_type') + + qs = parse_qs(url) + revision_id = traverse_obj(qs, (('revId', 'id'), 0, any)) + if url_type == 'revision': + revision_id = display_id + + revision_data = None + if not revision_id: + post_data = self._call_api( + 'posts', display_id, note='Downloading post data', + query=traverse_obj(qs, {'sharedKey': ('sharedKey', 0)})) + + revision_id = traverse_obj(post_data, (('revisionId', ('revision', 'id')), {str}, any)) + revision_data = traverse_obj(post_data, ('revision', {dict})) + + if not revision_data and not revision_id: + post_type = post_data.get('type') + if post_type == 'Video': + return self._parse_video(post_data, url=url) + if post_type == 'Track': + return self._parse_track(post_data, url=url) + raise ExtractorError(f'Could not extract data for post type {post_type!r}') + + if not revision_data: + revision_data = self._call_api( + 'revisions', revision_id, note='Downloading revision data', query={'edit': 'false'}) + + return self._parse_revision(revision_data, url=url) + + +class BandlabPlaylistIE(BandlabBaseIE): + _VALID_URL = [ + r'https?://(?:www\.)?bandlab.com/(?:[\w]+/)?(?Palbums|collections)/(?P[\da-f-]+)', + r'https?://(?:www\.)?bandlab.com/(?Pembed)/collection/\?(?:[^#]*&)?id=(?P[\da-f-]+)', + ] + _EMBED_REGEX = [rf']+src=[\'"](?P{_VALID_URL[1]})[\'"]'] + _TESTS = [{ + 'url': 'https://www.bandlab.com/davesnothome69/albums/89b79ea6-de42-ed11-b495-00224845aac7', + 'info_dict': { + 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/69507ff3-579a-45be-afca-9e87eddec944/', + 'release_date': '20221003', + 'title': 'Remnants', + 'album': 'Remnants', + 'like_count': int, + 'album_type': 'LP', + 'description': 'A collection of some feel good, rock hits.', + 'comment_count': int, + 'view_count': int, + 'id': '89b79ea6-de42-ed11-b495-00224845aac7', + 'uploader': 'David R Sparks', + 'uploader_id': 'davesnothome69', + }, + 'playlist_count': 10, + }, { + 'url': 'https://www.bandlab.com/slytheband/collections/955102d4-1040-ef11-86c3-000d3a42581b', + 'info_dict': { + 'id': '955102d4-1040-ef11-86c3-000d3a42581b', + 'timestamp': 1720762659, + 'view_count': int, + 'title': 'My Shit 🖤', + 'uploader_id': 'slytheband', + 'uploader': '𝓢𝓛𝓨', + 'upload_date': '20240712', + 'like_count': int, + 'thumbnail': 'https://bandlabimages.azureedge.net/v1.0/collections/2c64ca12-b180-4b76-8587-7a8da76bddc8/', + }, + 'playlist_count': 15, + }, { + # Embeds can contain both albums and collections with the same URL pattern. This is an album + 'url': 'https://www.bandlab.com/embed/collection/?id=12cc6f7f-951b-ee11-907c-00224844f303', + 'info_dict': { + 'id': '12cc6f7f-951b-ee11-907c-00224844f303', + 'release_date': '20230706', + 'description': 'This is a collection of songs I created when I had an Amiga computer.', + 'view_count': int, + 'title': 'Mark Salud The Amiga Collection', + 'uploader_id': 'mssirmooth1962', + 'comment_count': int, + 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/d618bd7b-0537-40d5-bdd8-61b066e77d59/', + 'like_count': int, + 'uploader': 'Mark Salud', + 'album': 'Mark Salud The Amiga Collection', + 'album_type': 'LP', + }, + 'playlist_count': 24, + }, { + # Tracks without revision id + 'url': 'https://www.bandlab.com/embed/collection/?id=e98aafb5-d932-ee11-b8f0-00224844c719', + 'info_dict': { + 'like_count': int, + 'uploader_id': 'sorakime', + 'comment_count': int, + 'uploader': 'Sorakime', + 'view_count': int, + 'description': 'md5:4ec31c568a5f5a5a2b17572ea64c3825', + 'release_date': '20230812', + 'title': 'Art', + 'album': 'Art', + 'album_type': 'Album', + 'id': 'e98aafb5-d932-ee11-b8f0-00224844c719', + 'thumbnail': 'https://bl-prod-images.azureedge.net/v1.3/albums/20c890de-e94a-4422-828a-2da6377a13c8/', + }, + 'playlist_count': 13, + }, { + 'url': 'https://www.bandlab.com/albums/89b79ea6-de42-ed11-b495-00224845aac7', + 'only_matching': True, + }] + + def _entries(self, album_data): + for post in traverse_obj(album_data, ('posts', lambda _, v: v['type'])): + post_type = post['type'] + if post_type == 'Revision': + yield self._parse_revision(post.get('revision')) + elif post_type == 'Track': + yield self._parse_track(post) + elif post_type == 'Video': + yield self._parse_video(post) + else: + self.report_warning(f'Skipping unknown post type: "{post_type}"') + + def _real_extract(self, url): + playlist_id, playlist_type = self._match_valid_url(url).group('id', 'type') + + endpoints = { + 'albums': ['albums'], + 'collections': ['collections'], + 'embed': ['collections', 'albums'], + }.get(playlist_type) + for endpoint in endpoints: + playlist_data = self._call_api( + endpoint, playlist_id, note=f'Downloading {endpoint[:-1]} data', + fatal=False, expected_status=404) + if not playlist_data.get('errorCode'): + playlist_type = endpoint + break + if error_code := playlist_data.get('errorCode'): + raise ExtractorError(f'Could not find playlist data. Error code: "{error_code}"') + + return self.playlist_result( + self._entries(playlist_data), playlist_id, + **traverse_obj(playlist_data, { + 'title': ('name', {str}), + 'description': ('description', {str}), + 'uploader': ('creator', 'name', {str}), + 'uploader_id': ('creator', 'username', {str}), + 'timestamp': ('createdOn', {parse_iso8601}), + 'release_date': ('releaseDate', {lambda x: x.replace('-', '')}, filter), + 'thumbnail': ('picture', ('original', 'url'), {url_or_none}, any), + 'like_count': ('counters', 'likes', {int_or_none}), + 'comment_count': ('counters', 'comments', {int_or_none}), + 'view_count': ('counters', 'plays', {int_or_none}), + }), + **(traverse_obj(playlist_data, { + 'album': ('name', {str}), + 'album_type': ('type', {str}), + }) if playlist_type == 'albums' else {})) diff --git a/yt_dlp/extractor/chaturbate.py b/yt_dlp/extractor/chaturbate.py index 864d61f9c2..a40b7d39c7 100644 --- a/yt_dlp/extractor/chaturbate.py +++ b/yt_dlp/extractor/chaturbate.py @@ -5,6 +5,7 @@ from ..utils import ( ExtractorError, lowercase_escape, url_or_none, + urlencode_postdata, ) @@ -40,14 +41,48 @@ class ChaturbateIE(InfoExtractor): 'only_matching': True, }] - _ROOM_OFFLINE = 'Room is currently offline' + _ERROR_MAP = { + 'offline': 'Room is currently offline', + 'private': 'Room is currently in a private show', + 'away': 'Performer is currently away', + 'password protected': 'Room is password protected', + 'hidden': 'Hidden session in progress', + } - def _real_extract(self, url): - video_id, tld = self._match_valid_url(url).group('id', 'tld') + def _extract_from_api(self, video_id, tld): + response = self._download_json( + f'https://chaturbate.{tld}/get_edge_hls_url_ajax/', video_id, + data=urlencode_postdata({'room_slug': video_id}), + headers={ + **self.geo_verification_headers(), + 'X-Requested-With': 'XMLHttpRequest', + 'Accept': 'application/json', + }, fatal=False, impersonate=True) or {} + status = response.get('room_status') + if status != 'public': + if error := self._ERROR_MAP.get(status): + raise ExtractorError(error, expected=True) + self.report_warning('Falling back to webpage extraction') + return None + + m3u8_url = response.get('url') + if not m3u8_url: + self.raise_geo_restricted() + + return { + 'id': video_id, + 'title': video_id, + 'thumbnail': f'https://roomimg.stream.highwebmedia.com/ri/{video_id}.jpg', + 'is_live': True, + 'age_limit': 18, + 'formats': self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4', live=True), + } + + def _extract_from_html(self, video_id, tld): webpage = self._download_webpage( f'https://chaturbate.{tld}/{video_id}/', video_id, - headers=self.geo_verification_headers()) + headers=self.geo_verification_headers(), impersonate=True) found_m3u8_urls = [] @@ -85,8 +120,8 @@ class ChaturbateIE(InfoExtractor): webpage, 'error', group='error', default=None) if not error: if any(p in webpage for p in ( - self._ROOM_OFFLINE, 'offline_tipping', 'tip_offline')): - error = self._ROOM_OFFLINE + self._ERROR_MAP['offline'], 'offline_tipping', 'tip_offline')): + error = self._ERROR_MAP['offline'] if error: raise ExtractorError(error, expected=True) raise ExtractorError('Unable to find stream URL') @@ -113,3 +148,7 @@ class ChaturbateIE(InfoExtractor): 'is_live': True, 'formats': formats, } + + def _real_extract(self, url): + video_id, tld = self._match_valid_url(url).group('id', 'tld') + return self._extract_from_api(video_id, tld) or self._extract_from_html(video_id, tld) diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py index 23f6fc6c46..28a3adf936 100644 --- a/yt_dlp/extractor/common.py +++ b/yt_dlp/extractor/common.py @@ -25,7 +25,6 @@ import xml.etree.ElementTree from ..compat import ( compat_etree_fromstring, compat_expanduser, - compat_os_name, urllib_req_to_req, ) from ..cookies import LenientSimpleCookie @@ -1029,7 +1028,7 @@ class InfoExtractor: filename = sanitize_filename(f'{basen}.dump', restricted=True) # Working around MAX_PATH limitation on Windows (see # http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx) - if compat_os_name == 'nt': + if os.name == 'nt': absfilepath = os.path.abspath(filename) if len(absfilepath) > 259: filename = fR'\\?\{absfilepath}' @@ -3768,7 +3767,7 @@ class InfoExtractor: """ Merge subtitle dictionaries, language by language. """ if target is None: target = {} - for d in dicts: + for d in filter(None, dicts): for lang, subs in d.items(): target[lang] = cls._merge_subtitle_items(target.get(lang, []), subs) return target diff --git a/yt_dlp/extractor/ctvnews.py b/yt_dlp/extractor/ctvnews.py index ebed9eb2d3..6d33f85e4a 100644 --- a/yt_dlp/extractor/ctvnews.py +++ b/yt_dlp/extractor/ctvnews.py @@ -1,14 +1,27 @@ +import json import re +import urllib.parse from .common import InfoExtractor -from ..utils import orderedSet +from .ninecninemedia import NineCNineMediaIE +from ..utils import extract_attributes, orderedSet +from ..utils.traversal import find_element, traverse_obj class CTVNewsIE(InfoExtractor): - _VALID_URL = r'https?://(?:.+?\.)?ctvnews\.ca/(?:video\?(?:clip|playlist|bin)Id=|.*?)(?P[0-9.]+)' + _BASE_REGEX = r'https?://(?:[^.]+\.)?ctvnews\.ca/' + _VIDEO_ID_RE = r'(?P\d{5,})' + _PLAYLIST_ID_RE = r'(?P\d\.\d{5,})' + _VALID_URL = [ + rf'{_BASE_REGEX}video/c{_VIDEO_ID_RE}', + rf'{_BASE_REGEX}video(?:-gallery)?/?\?clipId={_VIDEO_ID_RE}', + rf'{_BASE_REGEX}video/?\?(?:playlist|bin)Id={_PLAYLIST_ID_RE}', + rf'{_BASE_REGEX}(?!video/)[^?#]*?{_PLAYLIST_ID_RE}/?(?:$|[?#])', + rf'{_BASE_REGEX}(?!video/)[^?#]+\?binId={_PLAYLIST_ID_RE}', + ] _TESTS = [{ 'url': 'http://www.ctvnews.ca/video?clipId=901995', - 'md5': '9b8624ba66351a23e0b6e1391971f9af', + 'md5': 'b608f466c7fa24b9666c6439d766ab7e', 'info_dict': { 'id': '901995', 'ext': 'flv', @@ -16,6 +29,33 @@ class CTVNewsIE(InfoExtractor): 'description': 'md5:958dd3b4f5bbbf0ed4d045c790d89285', 'timestamp': 1467286284, 'upload_date': '20160630', + 'categories': [], + 'season_number': 0, + 'season': 'Season 0', + 'tags': [], + 'series': 'CTV News National | Archive | Stories 2', + 'season_id': '57981', + 'thumbnail': r're:https?://.*\.jpg$', + 'duration': 764.631, + }, + }, { + 'url': 'https://barrie.ctvnews.ca/video/c3030933-here_s-what_s-making-news-for-nov--15?binId=1272429', + 'md5': '8b8c2b33c5c1803e3c26bc74ff8694d5', + 'info_dict': { + 'id': '3030933', + 'ext': 'flv', + 'title': 'Here’s what’s making news for Nov. 15', + 'description': 'Here are the top stories we’re working on for CTV News at 11 for Nov. 15', + 'thumbnail': 'http://images2.9c9media.com/image_asset/2021_2_22_a602e68e-1514-410e-a67a-e1f7cccbacab_png_2000x1125.jpg', + 'season_id': '58104', + 'season_number': 0, + 'tags': [], + 'season': 'Season 0', + 'categories': [], + 'series': 'CTV News Barrie', + 'upload_date': '20241116', + 'duration': 42.943, + 'timestamp': 1731722452, }, }, { 'url': 'http://www.ctvnews.ca/video?playlistId=1.2966224', @@ -31,6 +71,72 @@ class CTVNewsIE(InfoExtractor): 'id': '1.2876780', }, 'playlist_mincount': 100, + }, { + 'url': 'https://www.ctvnews.ca/it-s-been-23-years-since-toronto-called-in-the-army-after-a-major-snowstorm-1.5736957', + 'info_dict': + { + 'id': '1.5736957', + }, + 'playlist_mincount': 6, + }, { + 'url': 'https://www.ctvnews.ca/business/respondents-to-bank-of-canada-questionnaire-largely-oppose-creating-a-digital-loonie-1.6665797', + 'md5': '24bc4b88cdc17d8c3fc01dfc228ab72c', + 'info_dict': { + 'id': '2695026', + 'ext': 'flv', + 'season_id': '89852', + 'series': 'From CTV News Channel', + 'description': 'md5:796a985a23cacc7e1e2fafefd94afd0a', + 'season': '2023', + 'title': 'Bank of Canada asks public about digital currency', + 'categories': [], + 'tags': [], + 'upload_date': '20230526', + 'season_number': 2023, + 'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg', + 'timestamp': 1685105157, + 'duration': 253.553, + }, + }, { + 'url': 'https://stox.ctvnews.ca/video-gallery?clipId=582589', + 'md5': '135cc592df607d29dddc931f1b756ae2', + 'info_dict': { + 'id': '582589', + 'ext': 'flv', + 'categories': [], + 'timestamp': 1427906183, + 'season_number': 0, + 'duration': 125.559, + 'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg', + 'series': 'CTV News Stox', + 'description': 'CTV original footage of the rise and fall of the Berlin Wall.', + 'title': 'Berlin Wall', + 'season_id': '63817', + 'season': 'Season 0', + 'tags': [], + 'upload_date': '20150401', + }, + }, { + 'url': 'https://ottawa.ctvnews.ca/features/regional-contact/regional-contact-archive?binId=1.1164587#3023759', + 'md5': 'a14c0603557decc6531260791c23cc5e', + 'info_dict': { + 'id': '3023759', + 'ext': 'flv', + 'season_number': 2024, + 'timestamp': 1731798000, + 'season': '2024', + 'episode': 'Episode 125', + 'description': 'CTV News Ottawa at Six', + 'duration': 2712.076, + 'episode_number': 125, + 'upload_date': '20241116', + 'title': 'CTV News Ottawa at Six for Saturday, November 16, 2024', + 'thumbnail': 'http://images2.9c9media.com/image_asset/2019_3_28_35f5afc3-10f6-4d92-b194-8b9a86f55c6a_png_1920x1080.jpg', + 'categories': [], + 'tags': [], + 'series': 'CTV News Ottawa at Six', + 'season_id': '92667', + }, }, { 'url': 'http://www.ctvnews.ca/1.810401', 'only_matching': True, @@ -42,29 +148,35 @@ class CTVNewsIE(InfoExtractor): 'only_matching': True, }] + def _ninecninemedia_url_result(self, clip_id): + return self.url_result(f'9c9media:ctvnews_web:{clip_id}', NineCNineMediaIE, clip_id) + def _real_extract(self, url): page_id = self._match_id(url) - def ninecninemedia_url_result(clip_id): - return { - '_type': 'url_transparent', - 'id': clip_id, - 'url': f'9c9media:ctvnews_web:{clip_id}', - 'ie_key': 'NineCNineMedia', - } + if mobj := re.fullmatch(self._VIDEO_ID_RE, urllib.parse.urlparse(url).fragment): + page_id = mobj.group('id') - if page_id.isdigit(): - return ninecninemedia_url_result(page_id) - else: - webpage = self._download_webpage(f'http://www.ctvnews.ca/{page_id}', page_id, query={ - 'ot': 'example.AjaxPageLayout.ot', - 'maxItemsPerPage': 1000000, - }) - entries = [ninecninemedia_url_result(clip_id) for clip_id in orderedSet( - re.findall(r'clip\.id\s*=\s*(\d+);', webpage))] - if not entries: - webpage = self._download_webpage(url, page_id) - if 'getAuthStates("' in webpage: - entries = [ninecninemedia_url_result(clip_id) for clip_id in - self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')] - return self.playlist_result(entries, page_id) + if re.fullmatch(self._VIDEO_ID_RE, page_id): + return self._ninecninemedia_url_result(page_id) + + webpage = self._download_webpage(f'https://www.ctvnews.ca/{page_id}', page_id, query={ + 'ot': 'example.AjaxPageLayout.ot', + 'maxItemsPerPage': 1000000, + }) + entries = [self._ninecninemedia_url_result(clip_id) + for clip_id in orderedSet(re.findall(r'clip\.id\s*=\s*(\d+);', webpage))] + if not entries: + webpage = self._download_webpage(url, page_id) + if 'getAuthStates("' in webpage: + entries = [self._ninecninemedia_url_result(clip_id) for clip_id in + self._search_regex(r'getAuthStates\("([\d+,]+)"', webpage, 'clip ids').split(',')] + else: + entries = [ + self._ninecninemedia_url_result(clip_id) for clip_id in + traverse_obj(webpage, ( + {find_element(tag='jasper-player-container', html=True)}, + {extract_attributes}, 'axis-ids', {json.loads}, ..., 'axisId', {str})) + ] + + return self.playlist_result(entries, page_id) diff --git a/yt_dlp/extractor/dailymotion.py b/yt_dlp/extractor/dailymotion.py index cb1453d3f5..423c11c573 100644 --- a/yt_dlp/extractor/dailymotion.py +++ b/yt_dlp/extractor/dailymotion.py @@ -261,6 +261,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'tags': [], 'view_count': int, 'like_count': int, + 'thumbnail': r're:https://\w+.dmcdn.net/v/WnEY61cmvMxt2Fi6d/x1080', }, }, { # https://geo.dailymotion.com/player/xf7zn.html?playlist=x7wdsj @@ -288,6 +289,25 @@ class DailymotionIE(DailymotionBaseInfoExtractor): 'description': 'À bord du « véloto », l’alternative à la voiture pour la campagne', 'tags': ['biclou', 'vélo', 'véloto', 'campagne', 'voiture', 'environnement', 'véhicules intermédiaires'], }, + }, { + # https://geo.dailymotion.com/player/xry80.html?video=x8vu47w + 'url': 'https://www.metatube.com/en/videos/546765/This-frogs-decorates-Christmas-tree/', + 'info_dict': { + 'id': 'x8vu47w', + 'ext': 'mp4', + 'like_count': int, + 'uploader': 'Metatube', + 'thumbnail': r're:https://\w+.dmcdn.net/v/W1G_S1coGSFTfkTeR/x1080', + 'upload_date': '20240326', + 'view_count': int, + 'timestamp': 1711496732, + 'age_limit': 0, + 'uploader_id': 'x2xpy74', + 'title': 'Está lindas ranitas ponen su arbolito', + 'duration': 28, + 'description': 'Que lindura', + 'tags': [], + }, }] _GEO_BYPASS = False _COMMON_MEDIA_FIELDS = '''description @@ -302,7 +322,7 @@ class DailymotionIE(DailymotionBaseInfoExtractor): yield from super()._extract_embed_urls(url, webpage) for mobj in re.finditer( r'(?s)DM\.player\([^,]+,\s*{.*?video[\'"]?\s*:\s*["\']?(?P[0-9a-zA-Z]+).+?}\s*\);', webpage): - yield from 'https://www.dailymotion.com/embed/video/' + mobj.group('id') + yield 'https://www.dailymotion.com/embed/video/' + mobj.group('id') for mobj in re.finditer( r'(?s)