diff --git a/devscripts/check-porn.py b/devscripts/check-porn.py index 08f663e4ba..89644a43dc 100644 --- a/devscripts/check-porn.py +++ b/devscripts/check-porn.py @@ -13,9 +13,11 @@ import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from test.helper import gettestcases -from yt_dlp.utils import compat_urllib_parse_urlparse, compat_urllib_request +import urllib.request + +from test.helper import gettestcases +from yt_dlp.utils import compat_urllib_parse_urlparse if len(sys.argv) > 1: METHOD = 'LIST' @@ -26,7 +28,7 @@ else: for test in gettestcases(): if METHOD == 'EURISTIC': try: - webpage = compat_urllib_request.urlopen(test['url'], timeout=10).read() + webpage = urllib.request.urlopen(test['url'], timeout=10).read() except Exception: print('\nFail: {}'.format(test['name'])) continue diff --git a/devscripts/update-formulae.py b/devscripts/update-formulae.py index a89872c7b9..02b869304b 100644 --- a/devscripts/update-formulae.py +++ b/devscripts/update-formulae.py @@ -1,12 +1,15 @@ #!/usr/bin/env python3 -import json + +# Allow direct execution import os -import re import sys sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from yt_dlp.compat import compat_urllib_request + +import json +import re +import urllib.request # usage: python3 ./devscripts/update-formulae.py # version can be either 0-aligned (yt-dlp version) or normalized (PyPl version) @@ -15,7 +18,7 @@ filename, version = sys.argv[1:] normalized_version = '.'.join(str(int(x)) for x in version.split('.')) -pypi_release = json.loads(compat_urllib_request.urlopen( +pypi_release = json.loads(urllib.request.urlopen( 'https://pypi.org/pypi/yt-dlp/%s/json' % normalized_version ).read().decode()) diff --git a/test/test_InfoExtractor.py b/test/test_InfoExtractor.py index 928246668b..f0571c41a9 100644 --- a/test/test_InfoExtractor.py +++ b/test/test_InfoExtractor.py @@ -6,10 +6,12 @@ import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -import threading -from test.helper import FakeYDL, expect_dict, expect_value, http_server_port -from yt_dlp.compat import compat_etree_fromstring, compat_http_server +import http.server +import threading + +from test.helper import FakeYDL, expect_dict, expect_value, http_server_port +from yt_dlp.compat import compat_etree_fromstring from yt_dlp.extractor import YoutubeIE, get_info_extractor from yt_dlp.extractor.common import InfoExtractor from yt_dlp.utils import ( @@ -23,7 +25,7 @@ TEAPOT_RESPONSE_STATUS = 418 TEAPOT_RESPONSE_BODY = "

418 I'm a teapot

" -class InfoExtractorTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): +class InfoExtractorTestRequestHandler(http.server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass @@ -1655,7 +1657,7 @@ jwplayer("mediaplayer").setup({"abouttext":"Visit Indie DB","aboutlink":"http:\/ # or the underlying `_download_webpage_handle` returning no content # when a response matches `expected_status`. - httpd = compat_http_server.HTTPServer( + httpd = http.server.HTTPServer( ('127.0.0.1', 0), InfoExtractorTestRequestHandler) port = http_server_port(httpd) server_thread = threading.Thread(target=httpd.serve_forever) diff --git a/test/test_YoutubeDL.py b/test/test_YoutubeDL.py index 15ad15115f..1153881e94 100644 --- a/test/test_YoutubeDL.py +++ b/test/test_YoutubeDL.py @@ -8,15 +8,11 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import copy import json -from test.helper import FakeYDL, assertRegexpMatches +import urllib.error +from test.helper import FakeYDL, assertRegexpMatches from yt_dlp import YoutubeDL -from yt_dlp.compat import ( - compat_os_name, - compat_setenv, - compat_str, - compat_urllib_error, -) +from yt_dlp.compat import compat_os_name, compat_str from yt_dlp.extractor import YoutubeIE from yt_dlp.extractor.common import InfoExtractor from yt_dlp.postprocessor.common import PostProcessor @@ -841,14 +837,14 @@ class TestYoutubeDL(unittest.TestCase): # test('%(foo|)s', ('', '_')) # fixme # Environment variable expansion for prepare_filename - compat_setenv('__yt_dlp_var', 'expanded') + os.environ['__yt_dlp_var'] = 'expanded' envvar = '%__yt_dlp_var%' if compat_os_name == 'nt' else '$__yt_dlp_var' test(envvar, (envvar, 'expanded')) if compat_os_name == 'nt': test('%s%', ('%s%', '%s%')) - compat_setenv('s', 'expanded') + os.environ['s'] = 'expanded' test('%s%', ('%s%', 'expanded')) # %s% should be expanded before escaping %s - compat_setenv('(test)s', 'expanded') + os.environ['(test)s'] = 'expanded' test('%(test)s%', ('NA%', 'expanded')) # Environment should take priority over template # Path expansion and escaping @@ -1101,7 +1097,7 @@ class TestYoutubeDL(unittest.TestCase): def test_urlopen_no_file_protocol(self): # see https://github.com/ytdl-org/youtube-dl/issues/8227 ydl = YDL() - self.assertRaises(compat_urllib_error.URLError, ydl.urlopen, 'file:///etc/passwd') + self.assertRaises(urllib.error.URLError, ydl.urlopen, 'file:///etc/passwd') def test_do_not_override_ie_key_in_url_transparent(self): ydl = YDL() diff --git a/test/test_compat.py b/test/test_compat.py index ce95a6afaf..62bf5a3062 100644 --- a/test/test_compat.py +++ b/test/test_compat.py @@ -7,16 +7,15 @@ import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) +import struct +import urllib.parse + from yt_dlp import compat from yt_dlp.compat import ( compat_etree_fromstring, compat_expanduser, - compat_getenv, - compat_setenv, compat_str, - compat_struct_unpack, compat_urllib_parse_unquote, - compat_urllib_parse_unquote_plus, compat_urllib_parse_urlencode, ) @@ -31,26 +30,14 @@ class TestCompat(unittest.TestCase): compat.asyncio.events # Must not raise error - def test_compat_getenv(self): - test_str = 'тест' - compat_setenv('yt_dlp_COMPAT_GETENV', test_str) - self.assertEqual(compat_getenv('yt_dlp_COMPAT_GETENV'), test_str) - - def test_compat_setenv(self): - test_var = 'yt_dlp_COMPAT_SETENV' - test_str = 'тест' - compat_setenv(test_var, test_str) - compat_getenv(test_var) - self.assertEqual(compat_getenv(test_var), test_str) - def test_compat_expanduser(self): old_home = os.environ.get('HOME') test_str = R'C:\Documents and Settings\тест\Application Data' try: - compat_setenv('HOME', test_str) + os.environ['HOME'] = test_str self.assertEqual(compat_expanduser('~'), test_str) finally: - compat_setenv('HOME', old_home or '') + os.environ['HOME'] = old_home or '' def test_compat_urllib_parse_unquote(self): self.assertEqual(compat_urllib_parse_unquote('abc%20def'), 'abc def') @@ -72,8 +59,8 @@ class TestCompat(unittest.TestCase): '''(^◣_◢^)っ︻デ═一 ⇀ ⇀ ⇀ ⇀ ⇀ ↶%I%Break%Things%''') def test_compat_urllib_parse_unquote_plus(self): - self.assertEqual(compat_urllib_parse_unquote_plus('abc%20def'), 'abc def') - self.assertEqual(compat_urllib_parse_unquote_plus('%7e/abc+def'), '~/abc def') + self.assertEqual(urllib.parse.unquote_plus('abc%20def'), 'abc def') + self.assertEqual(urllib.parse.unquote_plus('%7e/abc+def'), '~/abc def') def test_compat_urllib_parse_urlencode(self): self.assertEqual(compat_urllib_parse_urlencode({'abc': 'def'}), 'abc=def') @@ -107,7 +94,7 @@ class TestCompat(unittest.TestCase): compat_etree_fromstring(xml) def test_struct_unpack(self): - self.assertEqual(compat_struct_unpack('!B', b'\x00'), (0,)) + self.assertEqual(struct.unpack('!B', b'\x00'), (0,)) if __name__ == '__main__': diff --git a/test/test_download.py b/test/test_download.py index c9825c0742..b82f174bb0 100755 --- a/test/test_download.py +++ b/test/test_download.py @@ -1,14 +1,18 @@ #!/usr/bin/env python3 # Allow direct execution -import hashlib -import json import os -import socket import sys import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import hashlib +import json +import socket +import urllib.error +import http.client + from test.helper import ( assertGreaterEqual, expect_info_dict, @@ -19,13 +23,8 @@ from test.helper import ( report_warning, try_rm, ) - -import yt_dlp.YoutubeDL -from yt_dlp.compat import ( - compat_http_client, - compat_HTTPError, - compat_urllib_error, -) +import yt_dlp.YoutubeDL # isort: split +from yt_dlp.compat import compat_HTTPError from yt_dlp.extractor import get_info_extractor from yt_dlp.utils import ( DownloadError, @@ -167,7 +166,7 @@ def generator(test_case, tname): force_generic_extractor=params.get('force_generic_extractor', False)) except (DownloadError, ExtractorError) as err: # Check if the exception is not a network related one - if not err.exc_info[0] in (compat_urllib_error.URLError, socket.timeout, UnavailableVideoError, compat_http_client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): + if not err.exc_info[0] in (urllib.error.URLError, socket.timeout, UnavailableVideoError, http.client.BadStatusLine) or (err.exc_info[0] == compat_HTTPError and err.exc_info[1].code == 503): raise if try_num == RETRIES: diff --git a/test/test_downloader_http.py b/test/test_downloader_http.py index c33308064f..dac7707588 100644 --- a/test/test_downloader_http.py +++ b/test/test_downloader_http.py @@ -1,17 +1,18 @@ #!/usr/bin/env python3 # Allow direct execution import os -import re import sys import unittest +import http.server sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + +import re import threading from test.helper import http_server_port, try_rm from yt_dlp import YoutubeDL -from yt_dlp.compat import compat_http_server from yt_dlp.downloader.http import HttpFD from yt_dlp.utils import encodeFilename @@ -21,7 +22,7 @@ TEST_DIR = os.path.dirname(os.path.abspath(__file__)) TEST_SIZE = 10 * 1024 -class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): +class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass @@ -78,7 +79,7 @@ class FakeLogger: class TestHttpFD(unittest.TestCase): def setUp(self): - self.httpd = compat_http_server.HTTPServer( + self.httpd = http.server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) self.port = http_server_port(self.httpd) self.server_thread = threading.Thread(target=self.httpd.serve_forever) diff --git a/test/test_http.py b/test/test_http.py index 146df7500c..828797ec71 100644 --- a/test/test_http.py +++ b/test/test_http.py @@ -3,20 +3,22 @@ import os import sys import unittest +import http.server sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) + import ssl import threading +import urllib.request from test.helper import http_server_port from yt_dlp import YoutubeDL -from yt_dlp.compat import compat_http_server, compat_urllib_request TEST_DIR = os.path.dirname(os.path.abspath(__file__)) -class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): +class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): def log_message(self, format, *args): pass @@ -53,7 +55,7 @@ class FakeLogger: class TestHTTP(unittest.TestCase): def setUp(self): - self.httpd = compat_http_server.HTTPServer( + self.httpd = http.server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) self.port = http_server_port(self.httpd) self.server_thread = threading.Thread(target=self.httpd.serve_forever) @@ -64,7 +66,7 @@ class TestHTTP(unittest.TestCase): class TestHTTPS(unittest.TestCase): def setUp(self): certfn = os.path.join(TEST_DIR, 'testcert.pem') - self.httpd = compat_http_server.HTTPServer( + self.httpd = http.server.HTTPServer( ('127.0.0.1', 0), HTTPTestRequestHandler) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.load_cert_chain(certfn, None) @@ -90,7 +92,7 @@ class TestClientCert(unittest.TestCase): certfn = os.path.join(TEST_DIR, 'testcert.pem') self.certdir = os.path.join(TEST_DIR, 'testdata', 'certificate') cacertfn = os.path.join(self.certdir, 'ca.crt') - self.httpd = compat_http_server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler) + self.httpd = http.server.HTTPServer(('127.0.0.1', 0), HTTPTestRequestHandler) sslctx = ssl.SSLContext(ssl.PROTOCOL_TLS_SERVER) sslctx.verify_mode = ssl.CERT_REQUIRED sslctx.load_verify_locations(cafile=cacertfn) @@ -130,7 +132,7 @@ class TestClientCert(unittest.TestCase): def _build_proxy_handler(name): - class HTTPTestRequestHandler(compat_http_server.BaseHTTPRequestHandler): + class HTTPTestRequestHandler(http.server.BaseHTTPRequestHandler): proxy_name = name def log_message(self, format, *args): @@ -146,14 +148,14 @@ def _build_proxy_handler(name): class TestProxy(unittest.TestCase): def setUp(self): - self.proxy = compat_http_server.HTTPServer( + self.proxy = http.server.HTTPServer( ('127.0.0.1', 0), _build_proxy_handler('normal')) self.port = http_server_port(self.proxy) self.proxy_thread = threading.Thread(target=self.proxy.serve_forever) self.proxy_thread.daemon = True self.proxy_thread.start() - self.geo_proxy = compat_http_server.HTTPServer( + self.geo_proxy = http.server.HTTPServer( ('127.0.0.1', 0), _build_proxy_handler('geo')) self.geo_port = http_server_port(self.geo_proxy) self.geo_proxy_thread = threading.Thread(target=self.geo_proxy.serve_forever) @@ -170,7 +172,7 @@ class TestProxy(unittest.TestCase): response = ydl.urlopen(url).read().decode() self.assertEqual(response, f'normal: {url}') - req = compat_urllib_request.Request(url) + req = urllib.request.Request(url) req.add_header('Ytdl-request-proxy', geo_proxy) response = ydl.urlopen(req).read().decode() self.assertEqual(response, f'geo: {url}') diff --git a/test/test_socks.py b/test/test_socks.py index a8b068cddb..0b8e03a9f7 100644 --- a/test/test_socks.py +++ b/test/test_socks.py @@ -8,9 +8,10 @@ sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) import random import subprocess -from test.helper import FakeYDL, get_params, is_download_test +import urllib.request -from yt_dlp.compat import compat_str, compat_urllib_request +from test.helper import FakeYDL, get_params, is_download_test +from yt_dlp.compat import compat_str @is_download_test @@ -51,7 +52,7 @@ class TestMultipleSocks(unittest.TestCase): if params is None: return ydl = FakeYDL() - req = compat_urllib_request.Request('http://yt-dl.org/ip') + req = urllib.request.Request('http://yt-dl.org/ip') req.add_header('Ytdl-request-proxy', params['secondary_proxy']) self.assertEqual( ydl.urlopen(req).read().decode(), @@ -62,7 +63,7 @@ class TestMultipleSocks(unittest.TestCase): if params is None: return ydl = FakeYDL() - req = compat_urllib_request.Request('https://yt-dl.org/ip') + req = urllib.request.Request('https://yt-dl.org/ip') req.add_header('Ytdl-request-proxy', params['secondary_proxy']) self.assertEqual( ydl.urlopen(req).read().decode(), diff --git a/test/test_subtitles.py b/test/test_subtitles.py index 15cd6fc1ed..5120f83e2d 100644 --- a/test/test_subtitles.py +++ b/test/test_subtitles.py @@ -6,8 +6,8 @@ import unittest sys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) -from test.helper import FakeYDL, is_download_test, md5 +from test.helper import FakeYDL, is_download_test, md5 from yt_dlp.extractor import ( NPOIE, NRKTVIE, diff --git a/test/test_utils.py b/test/test_utils.py index 184c39cff2..38647adb44 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -15,12 +15,9 @@ import json import xml.etree.ElementTree from yt_dlp.compat import ( - compat_chr, compat_etree_fromstring, - compat_getenv, compat_HTMLParseError, compat_os_name, - compat_setenv, ) from yt_dlp.utils import ( Config, @@ -266,20 +263,20 @@ class TestUtil(unittest.TestCase): def env(var): return f'%{var}%' if sys.platform == 'win32' else f'${var}' - compat_setenv('yt_dlp_EXPATH_PATH', 'expanded') + os.environ['yt_dlp_EXPATH_PATH'] = 'expanded' self.assertEqual(expand_path(env('yt_dlp_EXPATH_PATH')), 'expanded') old_home = os.environ.get('HOME') test_str = R'C:\Documents and Settings\тест\Application Data' try: - compat_setenv('HOME', test_str) - self.assertEqual(expand_path(env('HOME')), compat_getenv('HOME')) - self.assertEqual(expand_path('~'), compat_getenv('HOME')) + os.environ['HOME'] = test_str + self.assertEqual(expand_path(env('HOME')), os.getenv('HOME')) + self.assertEqual(expand_path('~'), os.getenv('HOME')) self.assertEqual( expand_path('~/%s' % env('yt_dlp_EXPATH_PATH')), - '%s/expanded' % compat_getenv('HOME')) + '%s/expanded' % os.getenv('HOME')) finally: - compat_setenv('HOME', old_home or '') + os.environ['HOME'] = old_home or '' def test_prepend_extension(self): self.assertEqual(prepend_extension('abc.ext', 'temp'), 'abc.temp.ext') @@ -1128,7 +1125,7 @@ class TestUtil(unittest.TestCase): self.assertEqual(extract_attributes(''), {'x': 'décompose\u0301'}) # "Narrow" Python builds don't support unicode code points outside BMP. try: - compat_chr(0x10000) + chr(0x10000) supports_outside_bmp = True except ValueError: supports_outside_bmp = False diff --git a/yt_dlp/YoutubeDL.py b/yt_dlp/YoutubeDL.py index fc0689882c..df4eef4e2b 100644 --- a/yt_dlp/YoutubeDL.py +++ b/yt_dlp/YoutubeDL.py @@ -26,15 +26,8 @@ import urllib.request from string import ascii_letters from .cache import Cache -from .compat import ( - HAS_LEGACY as compat_has_legacy, - compat_get_terminal_size, - compat_os_name, - compat_shlex_quote, - compat_str, - compat_urllib_error, - compat_urllib_request, -) +from .compat import HAS_LEGACY as compat_has_legacy +from .compat import compat_os_name, compat_shlex_quote, compat_str from .cookies import load_cookies from .downloader import FFmpegFD, get_suitable_downloader, shorten_protocol_name from .downloader.rtmp import rtmpdump_version @@ -644,7 +637,7 @@ class YoutubeDL: try: import pty master, slave = pty.openpty() - width = compat_get_terminal_size().columns + width = shutil.get_terminal_size().columns width_args = [] if width is None else ['-w', str(width)] sp_kwargs = {'stdin': subprocess.PIPE, 'stdout': slave, 'stderr': self._out_files.error} try: @@ -3724,7 +3717,7 @@ class YoutubeDL: else: proxies = {'http': opts_proxy, 'https': opts_proxy} else: - proxies = compat_urllib_request.getproxies() + proxies = urllib.request.getproxies() # Set HTTPS proxy to HTTP one if given (https://github.com/ytdl-org/youtube-dl/issues/805) if 'http' in proxies and 'https' not in proxies: proxies['https'] = proxies['http'] @@ -3740,13 +3733,13 @@ class YoutubeDL: # default FileHandler and allows us to disable the file protocol, which # can be used for malicious purposes (see # https://github.com/ytdl-org/youtube-dl/issues/8227) - file_handler = compat_urllib_request.FileHandler() + file_handler = urllib.request.FileHandler() def file_open(*args, **kwargs): - raise compat_urllib_error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons') + raise urllib.error.URLError('file:// scheme is explicitly disabled in yt-dlp for security reasons') file_handler.file_open = file_open - opener = compat_urllib_request.build_opener( + opener = urllib.request.build_opener( proxy_handler, https_handler, cookie_processor, ydlh, redirect_handler, data_handler, file_handler) # Delete the default user-agent header, which would otherwise apply in diff --git a/yt_dlp/__init__.py b/yt_dlp/__init__.py index ee873e00c0..a5921c5656 100644 --- a/yt_dlp/__init__.py +++ b/yt_dlp/__init__.py @@ -3,13 +3,14 @@ f'You are using an unsupported version of Python. Only Python versions 3.6 and a __license__ = 'Public Domain' +import getpass import itertools import optparse import os import re import sys -from .compat import compat_getpass, compat_shlex_quote +from .compat import compat_shlex_quote from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS from .downloader import FileDownloader from .downloader.external import get_external_downloader @@ -531,9 +532,9 @@ def validate_options(opts): # Ask for passwords if opts.username is not None and opts.password is None: - opts.password = compat_getpass('Type account password and press [Return]: ') + opts.password = getpass.getpass('Type account password and press [Return]: ') if opts.ap_username is not None and opts.ap_password is None: - opts.ap_password = compat_getpass('Type TV provider account password and press [Return]: ') + opts.ap_password = getpass.getpass('Type TV provider account password and press [Return]: ') return warnings, deprecation_warnings diff --git a/yt_dlp/cache.py b/yt_dlp/cache.py index e3f8a7dab2..83351b7976 100644 --- a/yt_dlp/cache.py +++ b/yt_dlp/cache.py @@ -6,7 +6,6 @@ import re import shutil import traceback -from .compat import compat_getenv from .utils import expand_path, write_json_file @@ -17,7 +16,7 @@ class Cache: def _get_root_dir(self): res = self._ydl.params.get('cachedir') if res is None: - cache_root = compat_getenv('XDG_CACHE_HOME', '~/.cache') + cache_root = os.getenv('XDG_CACHE_HOME', '~/.cache') res = os.path.join(cache_root, 'yt-dlp') return expand_path(res) diff --git a/yt_dlp/compat/_deprecated.py b/yt_dlp/compat/_deprecated.py index 390f765770..342f1f80d6 100644 --- a/yt_dlp/compat/_deprecated.py +++ b/yt_dlp/compat/_deprecated.py @@ -1,52 +1,16 @@ """Deprecated - New code should avoid these""" import base64 -import getpass -import html -import html.parser -import http -import http.client -import http.cookiejar -import http.cookies -import http.server -import itertools -import os -import shutil -import struct -import tokenize -import urllib +import urllib.error +import urllib.parse + +compat_str = str compat_b64decode = base64.b64decode -compat_chr = chr -compat_cookiejar = http.cookiejar -compat_cookiejar_Cookie = http.cookiejar.Cookie -compat_cookies_SimpleCookie = http.cookies.SimpleCookie -compat_get_terminal_size = shutil.get_terminal_size -compat_getenv = os.getenv -compat_getpass = getpass.getpass -compat_html_entities = html.entities -compat_html_entities_html5 = html.entities.html5 -compat_HTMLParser = html.parser.HTMLParser -compat_http_client = http.client -compat_http_server = http.server + compat_HTTPError = urllib.error.HTTPError -compat_itertools_count = itertools.count +compat_urlparse = urllib.parse compat_parse_qs = urllib.parse.parse_qs -compat_str = str -compat_struct_pack = struct.pack -compat_struct_unpack = struct.unpack -compat_tokenize_tokenize = tokenize.tokenize -compat_urllib_error = urllib.error compat_urllib_parse_unquote = urllib.parse.unquote -compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus compat_urllib_parse_urlencode = urllib.parse.urlencode compat_urllib_parse_urlparse = urllib.parse.urlparse -compat_urllib_request = urllib.request -compat_urlparse = compat_urllib_parse = urllib.parse - - -def compat_setenv(key, value, env=os.environ): - env[key] = value - - -__all__ = [x for x in globals() if x.startswith('compat_')] diff --git a/yt_dlp/compat/_legacy.py b/yt_dlp/compat/_legacy.py index 79461617d5..2b33638b66 100644 --- a/yt_dlp/compat/_legacy.py +++ b/yt_dlp/compat/_legacy.py @@ -2,15 +2,23 @@ import collections import ctypes -import http +import getpass +import html.entities +import html.parser import http.client import http.cookiejar import http.cookies import http.server +import itertools +import os import shlex +import shutil import socket import struct -import urllib +import tokenize +import urllib.error +import urllib.parse +import urllib.request import xml.etree.ElementTree as etree from subprocess import DEVNULL @@ -32,12 +40,17 @@ def compat_ctypes_WINFUNCTYPE(*args, **kwargs): return ctypes.WINFUNCTYPE(*args, **kwargs) +def compat_setenv(key, value, env=os.environ): + env[key] = value + + compat_basestring = str compat_collections_abc = collections.abc compat_cookies = http.cookies compat_etree_Element = etree.Element compat_etree_register_namespace = etree.register_namespace compat_filter = filter +compat_getenv = os.getenv compat_input = input compat_integer_types = (int, ) compat_kwargs = lambda kwargs: kwargs @@ -53,9 +66,28 @@ compat_urllib_parse_quote_plus = urllib.parse.quote_plus compat_urllib_parse_unquote_to_bytes = urllib.parse.unquote_to_bytes compat_urllib_parse_urlunparse = urllib.parse.urlunparse compat_urllib_request_DataHandler = urllib.request.DataHandler +compat_urllib_request = urllib.request compat_urllib_response = urllib.response compat_urlretrieve = urllib.request.urlretrieve compat_xml_parse_error = etree.ParseError compat_xpath = lambda xpath: xpath compat_zip = zip workaround_optparse_bug9161 = lambda: None +compat_getpass = getpass.getpass +compat_chr = chr +compat_urllib_parse = urllib.parse +compat_itertools_count = itertools.count +compat_cookiejar = http.cookiejar +compat_cookiejar_Cookie = http.cookiejar.Cookie +compat_cookies_SimpleCookie = http.cookies.SimpleCookie +compat_get_terminal_size = shutil.get_terminal_size +compat_html_entities = html.entities +compat_html_entities_html5 = html.entities.html5 +compat_tokenize_tokenize = tokenize.tokenize +compat_HTMLParser = html.parser.HTMLParser +compat_http_client = http.client +compat_http_server = http.server +compat_struct_pack = struct.pack +compat_struct_unpack = struct.unpack +compat_urllib_error = urllib.error +compat_urllib_parse_unquote_plus = urllib.parse.unquote_plus diff --git a/yt_dlp/cookies.py b/yt_dlp/cookies.py index a74701750f..6811a72881 100644 --- a/yt_dlp/cookies.py +++ b/yt_dlp/cookies.py @@ -11,13 +11,14 @@ import time from datetime import datetime, timedelta, timezone from enum import Enum, auto from hashlib import pbkdf2_hmac +import http.cookiejar from .aes import ( aes_cbc_decrypt_bytes, aes_gcm_decrypt_and_verify_bytes, unpad_pkcs7, ) -from .compat import compat_b64decode, compat_cookiejar_Cookie +from .compat import compat_b64decode from .dependencies import ( _SECRETSTORAGE_UNAVAILABLE_REASON, secretstorage, @@ -142,7 +143,7 @@ def _extract_firefox_cookies(profile, logger): total_cookie_count = len(table) for i, (host, name, value, path, expiry, is_secure) in enumerate(table): progress_bar.print(f'Loading cookie {i: 6d}/{total_cookie_count: 6d}') - cookie = compat_cookiejar_Cookie( + cookie = http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=host, domain_specified=bool(host), domain_initial_dot=host.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiry, discard=False, @@ -297,7 +298,7 @@ def _process_chrome_cookie(decryptor, host_key, name, value, encrypted_value, pa if value is None: return is_encrypted, None - return is_encrypted, compat_cookiejar_Cookie( + return is_encrypted, http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=host_key, domain_specified=bool(host_key), domain_initial_dot=host_key.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expires_utc, discard=False, @@ -589,7 +590,7 @@ def _parse_safari_cookies_record(data, jar, logger): p.skip_to(record_size, 'space at the end of the record') - cookie = compat_cookiejar_Cookie( + cookie = http.cookiejar.Cookie( version=0, name=name, value=value, port=None, port_specified=False, domain=domain, domain_specified=bool(domain), domain_initial_dot=domain.startswith('.'), path=path, path_specified=bool(path), secure=is_secure, expires=expiration_date, discard=False, diff --git a/yt_dlp/downloader/external.py b/yt_dlp/downloader/external.py index a1cb07e056..dee945affd 100644 --- a/yt_dlp/downloader/external.py +++ b/yt_dlp/downloader/external.py @@ -7,7 +7,6 @@ import time from .fragment import FragmentFD from ..compat import functools # isort: split -from ..compat import compat_setenv from ..postprocessor.ffmpeg import EXT_TO_OUT_FORMATS, FFmpegPostProcessor from ..utils import ( Popen, @@ -403,8 +402,8 @@ class FFmpegFD(ExternalFD): # We could switch to the following code if we are able to detect version properly # args += ['-http_proxy', proxy] env = os.environ.copy() - compat_setenv('HTTP_PROXY', proxy, env=env) - compat_setenv('http_proxy', proxy, env=env) + env['HTTP_PROXY'] = proxy + env['http_proxy'] = proxy protocol = info_dict.get('protocol') diff --git a/yt_dlp/downloader/f4m.py b/yt_dlp/downloader/f4m.py index 6609447c65..f26afb4543 100644 --- a/yt_dlp/downloader/f4m.py +++ b/yt_dlp/downloader/f4m.py @@ -1,14 +1,13 @@ import io import itertools +import struct import time +import urllib.error from .fragment import FragmentFD from ..compat import ( compat_b64decode, compat_etree_fromstring, - compat_struct_pack, - compat_struct_unpack, - compat_urllib_error, compat_urllib_parse_urlparse, compat_urlparse, ) @@ -35,13 +34,13 @@ class FlvReader(io.BytesIO): # Utility functions for reading numbers and strings def read_unsigned_long_long(self): - return compat_struct_unpack('!Q', self.read_bytes(8))[0] + return struct.unpack('!Q', self.read_bytes(8))[0] def read_unsigned_int(self): - return compat_struct_unpack('!I', self.read_bytes(4))[0] + return struct.unpack('!I', self.read_bytes(4))[0] def read_unsigned_char(self): - return compat_struct_unpack('!B', self.read_bytes(1))[0] + return struct.unpack('!B', self.read_bytes(1))[0] def read_string(self): res = b'' @@ -203,11 +202,11 @@ def build_fragments_list(boot_info): def write_unsigned_int(stream, val): - stream.write(compat_struct_pack('!I', val)) + stream.write(struct.pack('!I', val)) def write_unsigned_int_24(stream, val): - stream.write(compat_struct_pack('!I', val)[1:]) + stream.write(struct.pack('!I', val)[1:]) def write_flv_header(stream): @@ -411,7 +410,7 @@ class F4mFD(FragmentFD): if box_type == b'mdat': self._append_fragment(ctx, box_data) break - except compat_urllib_error.HTTPError as err: + except urllib.error.HTTPError as err: if live and (err.code == 404 or err.code == 410): # We didn't keep up with the live window. Continue # with the next available fragment. diff --git a/yt_dlp/downloader/fragment.py b/yt_dlp/downloader/fragment.py index 7c27f6cdf2..3535e0e7d1 100644 --- a/yt_dlp/downloader/fragment.py +++ b/yt_dlp/downloader/fragment.py @@ -4,12 +4,14 @@ import http.client import json import math import os +import struct import time +import urllib.error from .common import FileDownloader from .http import HttpFD from ..aes import aes_cbc_decrypt_bytes, unpad_pkcs7 -from ..compat import compat_os_name, compat_struct_pack, compat_urllib_error +from ..compat import compat_os_name from ..utils import ( DownloadError, encodeFilename, @@ -348,7 +350,7 @@ class FragmentFD(FileDownloader): decrypt_info = fragment.get('decrypt_info') if not decrypt_info or decrypt_info['METHOD'] != 'AES-128': return frag_content - iv = decrypt_info.get('IV') or compat_struct_pack('>8xq', fragment['media_sequence']) + iv = decrypt_info.get('IV') or struct.pack('>8xq', fragment['media_sequence']) decrypt_info['KEY'] = decrypt_info.get('KEY') or _get_key(info_dict.get('_decryption_key_url') or decrypt_info['URI']) # Don't decrypt the content in tests since the data is explicitly truncated and it's not to a valid block # size (see https://github.com/ytdl-org/youtube-dl/pull/27660). Tests only care that the correct data downloaded, @@ -457,7 +459,7 @@ class FragmentFD(FileDownloader): if self._download_fragment(ctx, fragment['url'], info_dict, headers): break return - except (compat_urllib_error.HTTPError, http.client.IncompleteRead) as err: + except (urllib.error.HTTPError, http.client.IncompleteRead) as err: # Unavailable (possibly temporary) fragments may be served. # First we try to retry then either skip or abort. # See https://github.com/ytdl-org/youtube-dl/issues/10165, diff --git a/yt_dlp/downloader/http.py b/yt_dlp/downloader/http.py index c6b6627a56..ca5b264985 100644 --- a/yt_dlp/downloader/http.py +++ b/yt_dlp/downloader/http.py @@ -3,9 +3,10 @@ import random import socket import ssl import time +import urllib.error +import http.client from .common import FileDownloader -from ..compat import compat_http_client, compat_urllib_error from ..utils import ( ContentTooShortError, ThrottledDownload, @@ -24,7 +25,7 @@ RESPONSE_READ_EXCEPTIONS = ( socket.timeout, # compat: py < 3.10 ConnectionError, ssl.SSLError, - compat_http_client.HTTPException + http.client.HTTPException ) @@ -155,7 +156,7 @@ class HttpFD(FileDownloader): ctx.resume_len = 0 ctx.open_mode = 'wb' ctx.data_len = ctx.content_len = int_or_none(ctx.data.info().get('Content-length', None)) - except compat_urllib_error.HTTPError as err: + except urllib.error.HTTPError as err: if err.code == 416: # Unable to resume (requested range not satisfiable) try: @@ -163,7 +164,7 @@ class HttpFD(FileDownloader): ctx.data = self.ydl.urlopen( sanitized_Request(url, request_data, headers)) content_length = ctx.data.info()['Content-Length'] - except compat_urllib_error.HTTPError as err: + except urllib.error.HTTPError as err: if err.code < 500 or err.code >= 600: raise else: @@ -196,7 +197,7 @@ class HttpFD(FileDownloader): # Unexpected HTTP error raise raise RetryDownload(err) - except compat_urllib_error.URLError as err: + except urllib.error.URLError as err: if isinstance(err.reason, ssl.CertificateError): raise raise RetryDownload(err) diff --git a/yt_dlp/downloader/ism.py b/yt_dlp/downloader/ism.py index 9efc5e4d9a..8a0071ab39 100644 --- a/yt_dlp/downloader/ism.py +++ b/yt_dlp/downloader/ism.py @@ -2,9 +2,9 @@ import binascii import io import struct import time +import urllib.error from .fragment import FragmentFD -from ..compat import compat_urllib_error u8 = struct.Struct('>B') u88 = struct.Struct('>Bx') @@ -268,7 +268,7 @@ class IsmFD(FragmentFD): extra_state['ism_track_written'] = True self._append_fragment(ctx, frag_content) break - except compat_urllib_error.HTTPError as err: + except urllib.error.HTTPError as err: count += 1 if count <= fragment_retries: self.report_retry_fragment(err, frag_index, count, fragment_retries) diff --git a/yt_dlp/downloader/youtube_live_chat.py b/yt_dlp/downloader/youtube_live_chat.py index cad6822232..5334c6c956 100644 --- a/yt_dlp/downloader/youtube_live_chat.py +++ b/yt_dlp/downloader/youtube_live_chat.py @@ -1,8 +1,8 @@ import json import time +import urllib.error from .fragment import FragmentFD -from ..compat import compat_urllib_error from ..utils import RegexNotFoundError, dict_get, int_or_none, try_get @@ -128,7 +128,7 @@ class YoutubeLiveChatFD(FragmentFD): elif info_dict['protocol'] == 'youtube_live_chat': continuation_id, offset, click_tracking_params = parse_actions_live(live_chat_continuation) return True, continuation_id, offset, click_tracking_params - except compat_urllib_error.HTTPError as err: + except urllib.error.HTTPError as err: count += 1 if count <= fragment_retries: self.report_retry_fragment(err, frag_index, count, fragment_retries) diff --git a/yt_dlp/extractor/abematv.py b/yt_dlp/extractor/abematv.py index 6b8e6e31b6..0706f85594 100644 --- a/yt_dlp/extractor/abematv.py +++ b/yt_dlp/extractor/abematv.py @@ -7,12 +7,13 @@ import json import re import struct import time +import urllib.request import urllib.response import uuid from .common import InfoExtractor from ..aes import aes_ecb_decrypt -from ..compat import compat_urllib_parse_urlparse, compat_urllib_request +from ..compat import compat_urllib_parse_urlparse from ..utils import ( ExtractorError, bytes_to_intlist, @@ -33,7 +34,7 @@ def add_opener(ydl, handler): ''' Add a handler for opening URLs, like _download_webpage ''' # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 - assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector) + assert isinstance(ydl._opener, urllib.request.OpenerDirector) ydl._opener.add_handler(handler) @@ -46,7 +47,7 @@ def remove_opener(ydl, handler): # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L426 # https://github.com/python/cpython/blob/main/Lib/urllib/request.py#L605 opener = ydl._opener - assert isinstance(ydl._opener, compat_urllib_request.OpenerDirector) + assert isinstance(ydl._opener, urllib.request.OpenerDirector) if isinstance(handler, (type, tuple)): find_cp = lambda x: isinstance(x, handler) else: @@ -96,7 +97,7 @@ def remove_opener(ydl, handler): opener.handlers[:] = [x for x in opener.handlers if not find_cp(x)] -class AbemaLicenseHandler(compat_urllib_request.BaseHandler): +class AbemaLicenseHandler(urllib.request.BaseHandler): handler_order = 499 STRTABLE = '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz' HKEY = b'3AF0298C219469522A313570E8583005A642E73EDD58E3EA2FB7339D3DF1597E' diff --git a/yt_dlp/extractor/adobepass.py b/yt_dlp/extractor/adobepass.py index 66e46832de..a2666c2b83 100644 --- a/yt_dlp/extractor/adobepass.py +++ b/yt_dlp/extractor/adobepass.py @@ -1,3 +1,4 @@ +import getpass import json import re import time @@ -5,19 +6,15 @@ import urllib.error import xml.etree.ElementTree as etree from .common import InfoExtractor -from ..compat import ( - compat_urlparse, - compat_getpass -) +from ..compat import compat_urlparse from ..utils import ( - unescapeHTML, - urlencode_postdata, - unified_timestamp, - ExtractorError, NO_DEFAULT, + ExtractorError, + unescapeHTML, + unified_timestamp, + urlencode_postdata, ) - MSO_INFO = { 'DTV': { 'name': 'DIRECTV', @@ -1506,7 +1503,7 @@ class AdobePassIE(InfoExtractor): 'send_confirm_link': False, 'send_token': True })) - philo_code = compat_getpass('Type auth code you have received [Return]: ') + philo_code = getpass.getpass('Type auth code you have received [Return]: ') self._download_webpage( 'https://idp.philo.com/auth/update/login_code', video_id, 'Submitting token', data=urlencode_postdata({ 'token': philo_code diff --git a/yt_dlp/extractor/archiveorg.py b/yt_dlp/extractor/archiveorg.py index 179602d466..1ca6ddc4d8 100644 --- a/yt_dlp/extractor/archiveorg.py +++ b/yt_dlp/extractor/archiveorg.py @@ -1,36 +1,34 @@ -import re import json +import re +import urllib.parse + from .common import InfoExtractor -from .youtube import YoutubeIE, YoutubeBaseInfoExtractor -from ..compat import ( - compat_urllib_parse_unquote, - compat_urllib_parse_unquote_plus, - compat_HTTPError -) +from .youtube import YoutubeBaseInfoExtractor, YoutubeIE +from ..compat import compat_HTTPError, compat_urllib_parse_unquote from ..utils import ( + KNOWN_EXTENSIONS, + ExtractorError, + HEADRequest, bug_reports_message, clean_html, dict_get, extract_attributes, - ExtractorError, get_element_by_id, - HEADRequest, int_or_none, join_nonempty, - KNOWN_EXTENSIONS, merge_dicts, mimetype2ext, orderedSet, parse_duration, parse_qs, - str_to_int, str_or_none, + str_to_int, traverse_obj, try_get, unified_strdate, unified_timestamp, + url_or_none, urlhandle_detect_ext, - url_or_none ) @@ -143,7 +141,7 @@ class ArchiveOrgIE(InfoExtractor): return json.loads(extract_attributes(element)['value']) def _real_extract(self, url): - video_id = compat_urllib_parse_unquote_plus(self._match_id(url)) + video_id = urllib.parse.unquote_plus(self._match_id(url)) identifier, entry_id = (video_id.split('/', 1) + [None])[:2] # Archive.org metadata API doesn't clearly demarcate playlist entries diff --git a/yt_dlp/extractor/bbc.py b/yt_dlp/extractor/bbc.py index 9cb019a491..5ddeef7b5d 100644 --- a/yt_dlp/extractor/bbc.py +++ b/yt_dlp/extractor/bbc.py @@ -1,16 +1,12 @@ -import xml.etree.ElementTree import functools import itertools import json import re +import urllib.error +import xml.etree.ElementTree from .common import InfoExtractor -from ..compat import ( - compat_HTTPError, - compat_str, - compat_urllib_error, - compat_urlparse, -) +from ..compat import compat_HTTPError, compat_str, compat_urlparse from ..utils import ( ExtractorError, OnDemandPagedList, @@ -391,7 +387,7 @@ class BBCCoUkIE(InfoExtractor): href, programme_id, ext='mp4', entry_protocol='m3u8_native', m3u8_id=format_id, fatal=False) except ExtractorError as e: - if not (isinstance(e.exc_info[1], compat_urllib_error.HTTPError) + if not (isinstance(e.exc_info[1], urllib.error.HTTPError) and e.exc_info[1].code in (403, 404)): raise fmts = [] diff --git a/yt_dlp/extractor/cda.py b/yt_dlp/extractor/cda.py index 9b257bee97..6d01c60d5e 100644 --- a/yt_dlp/extractor/cda.py +++ b/yt_dlp/extractor/cda.py @@ -1,13 +1,9 @@ import codecs -import re import json +import re from .common import InfoExtractor -from ..compat import ( - compat_chr, - compat_ord, - compat_urllib_parse_unquote, -) +from ..compat import compat_ord, compat_urllib_parse_unquote from ..utils import ( ExtractorError, float_or_none, @@ -16,8 +12,8 @@ from ..utils import ( multipart_encode, parse_duration, random_birthday, - urljoin, try_get, + urljoin, ) @@ -144,7 +140,7 @@ class CDAIE(InfoExtractor): b = [] for c in a: f = compat_ord(c) - b.append(compat_chr(33 + (f + 14) % 94) if 33 <= f <= 126 else compat_chr(f)) + b.append(chr(33 + (f + 14) % 94) if 33 <= f <= 126 else chr(f)) a = ''.join(b) a = a.replace('.cda.mp4', '') for p in ('.2cda.pl', '.3cda.pl'): diff --git a/yt_dlp/extractor/chingari.py b/yt_dlp/extractor/chingari.py index 7e8c0bfc99..e54d92a86a 100644 --- a/yt_dlp/extractor/chingari.py +++ b/yt_dlp/extractor/chingari.py @@ -1,11 +1,11 @@ import itertools import json +import urllib.parse from .common import InfoExtractor -from ..compat import compat_urllib_parse_unquote_plus from ..utils import ( - clean_html, ExtractorError, + clean_html, int_or_none, str_to_int, url_or_none, @@ -47,8 +47,8 @@ class ChingariBaseIE(InfoExtractor): 'id': id, 'extractor_key': ChingariIE.ie_key(), 'extractor': 'Chingari', - 'title': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))), - 'description': compat_urllib_parse_unquote_plus(clean_html(post_data.get('caption'))), + 'title': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))), + 'description': urllib.parse.unquote_plus(clean_html(post_data.get('caption'))), 'duration': media_data.get('duration'), 'thumbnail': url_or_none(thumbnail), 'like_count': post_data.get('likeCount'), diff --git a/yt_dlp/extractor/common.py b/yt_dlp/extractor/common.py index fe43ff519c..1c3d4af2c0 100644 --- a/yt_dlp/extractor/common.py +++ b/yt_dlp/extractor/common.py @@ -1,5 +1,6 @@ import base64 import collections +import getpass import hashlib import itertools import json @@ -9,22 +10,20 @@ import os import random import sys import time +import urllib.request import xml.etree.ElementTree +import http.client +import http.cookiejar +import http.cookies from ..compat import functools, re # isort: split from ..compat import ( - compat_cookiejar_Cookie, - compat_cookies_SimpleCookie, compat_etree_fromstring, compat_expanduser, - compat_getpass, - compat_http_client, compat_os_name, compat_str, - compat_urllib_error, compat_urllib_parse_unquote, compat_urllib_parse_urlencode, - compat_urllib_request, compat_urlparse, ) from ..downloader import FileDownloader @@ -671,7 +670,7 @@ class InfoExtractor: if hasattr(e, 'countries'): kwargs['countries'] = e.countries raise type(e)(e.orig_msg, **kwargs) - except compat_http_client.IncompleteRead as e: + except http.client.IncompleteRead as e: raise ExtractorError('A network error has occurred.', cause=e, expected=True, video_id=self.get_temp_id(url)) except (KeyError, StopIteration) as e: raise ExtractorError('An extractor error has occurred.', cause=e, video_id=self.get_temp_id(url)) @@ -730,7 +729,7 @@ class InfoExtractor: @staticmethod def __can_accept_status_code(err, expected_status): - assert isinstance(err, compat_urllib_error.HTTPError) + assert isinstance(err, urllib.error.HTTPError) if expected_status is None: return False elif callable(expected_status): @@ -739,7 +738,7 @@ class InfoExtractor: return err.code in variadic(expected_status) def _create_request(self, url_or_request, data=None, headers={}, query={}): - if isinstance(url_or_request, compat_urllib_request.Request): + if isinstance(url_or_request, urllib.request.Request): return update_Request(url_or_request, data=data, headers=headers, query=query) if query: url_or_request = update_url_query(url_or_request, query) @@ -779,7 +778,7 @@ class InfoExtractor: try: return self._downloader.urlopen(self._create_request(url_or_request, data, headers, query)) except network_exceptions as err: - if isinstance(err, compat_urllib_error.HTTPError): + if isinstance(err, urllib.error.HTTPError): if self.__can_accept_status_code(err, expected_status): # Retain reference to error to prevent file object from # being closed before it can be read. Works around the @@ -807,7 +806,7 @@ class InfoExtractor: Arguments: url_or_request -- plain text URL as a string or - a compat_urllib_request.Requestobject + a urllib.request.Request object video_id -- Video/playlist/item identifier (string) Keyword arguments: @@ -1056,7 +1055,7 @@ class InfoExtractor: while True: try: return self.__download_webpage(url_or_request, video_id, note, errnote, None, fatal, *args, **kwargs) - except compat_http_client.IncompleteRead as e: + except http.client.IncompleteRead as e: try_count += 1 if try_count >= tries: raise e @@ -1292,7 +1291,7 @@ class InfoExtractor: if tfa is not None: return tfa - return compat_getpass('Type %s and press [Return]: ' % note) + return getpass.getpass('Type %s and press [Return]: ' % note) # Helper functions for extracting OpenGraph info @staticmethod @@ -3597,15 +3596,15 @@ class InfoExtractor: def _set_cookie(self, domain, name, value, expire_time=None, port=None, path='/', secure=False, discard=False, rest={}, **kwargs): - cookie = compat_cookiejar_Cookie( + cookie = http.cookiejar.Cookie( 0, name, value, port, port is not None, domain, True, domain.startswith('.'), path, True, secure, expire_time, discard, None, None, rest) self.cookiejar.set_cookie(cookie) def _get_cookies(self, url): - """ Return a compat_cookies_SimpleCookie with the cookies for the url """ - return compat_cookies_SimpleCookie(self._downloader._calc_cookies(url)) + """ Return a http.cookies.SimpleCookie with the cookies for the url """ + return http.cookies.SimpleCookie(self._downloader._calc_cookies(url)) def _apply_first_set_cookie_header(self, url_handle, cookie): """ diff --git a/yt_dlp/extractor/crunchyroll.py b/yt_dlp/extractor/crunchyroll.py index 1d186bd388..6877e1a3fc 100644 --- a/yt_dlp/extractor/crunchyroll.py +++ b/yt_dlp/extractor/crunchyroll.py @@ -1,19 +1,20 @@ import base64 -import re import json -import zlib - +import re +import urllib.request import xml.etree.ElementTree +import zlib from hashlib import sha1 -from math import pow, sqrt, floor +from math import floor, pow, sqrt + from .common import InfoExtractor from .vrv import VRVBaseIE +from ..aes import aes_cbc_decrypt from ..compat import ( compat_b64decode, compat_etree_fromstring, compat_str, compat_urllib_parse_urlencode, - compat_urllib_request, compat_urlparse, ) from ..utils import ( @@ -22,8 +23,8 @@ from ..utils import ( extract_attributes, float_or_none, format_field, - intlist_to_bytes, int_or_none, + intlist_to_bytes, join_nonempty, lowercase_escape, merge_dicts, @@ -34,9 +35,6 @@ from ..utils import ( try_get, xpath_text, ) -from ..aes import ( - aes_cbc_decrypt, -) class CrunchyrollBaseIE(InfoExtractor): @@ -259,7 +257,7 @@ class CrunchyrollIE(CrunchyrollBaseIE, VRVBaseIE): } def _download_webpage(self, url_or_request, *args, **kwargs): - request = (url_or_request if isinstance(url_or_request, compat_urllib_request.Request) + request = (url_or_request if isinstance(url_or_request, urllib.request.Request) else sanitized_Request(url_or_request)) # Accept-Language must be set explicitly to accept any language to avoid issues # similar to https://github.com/ytdl-org/youtube-dl/issues/6797. diff --git a/yt_dlp/extractor/espn.py b/yt_dlp/extractor/espn.py index 7aa454063e..44e0c0989d 100644 --- a/yt_dlp/extractor/espn.py +++ b/yt_dlp/extractor/espn.py @@ -1,7 +1,7 @@ import base64 import json import re -import urllib +import urllib.parse from .common import InfoExtractor from .adobepass import AdobePassIE diff --git a/yt_dlp/extractor/facebook.py b/yt_dlp/extractor/facebook.py index de45f92987..5b34f3bff3 100644 --- a/yt_dlp/extractor/facebook.py +++ b/yt_dlp/extractor/facebook.py @@ -1,18 +1,18 @@ import json import re +import urllib.parse from .common import InfoExtractor from ..compat import ( compat_etree_fromstring, compat_str, compat_urllib_parse_unquote, - compat_urllib_parse_unquote_plus, ) from ..utils import ( + ExtractorError, clean_html, determine_ext, error_to_compat_str, - ExtractorError, float_or_none, get_element_by_id, get_first, @@ -467,7 +467,7 @@ class FacebookIE(InfoExtractor): dash_manifest = video.get('dash_manifest') if dash_manifest: formats.extend(self._parse_mpd_formats( - compat_etree_fromstring(compat_urllib_parse_unquote_plus(dash_manifest)))) + compat_etree_fromstring(urllib.parse.unquote_plus(dash_manifest)))) def process_formats(formats): # Downloads with browser's User-Agent are rate limited. Working around diff --git a/yt_dlp/extractor/iwara.py b/yt_dlp/extractor/iwara.py index f99e22fab6..32ebd2ba61 100644 --- a/yt_dlp/extractor/iwara.py +++ b/yt_dlp/extractor/iwara.py @@ -1,6 +1,6 @@ import itertools import re -import urllib +import urllib.parse from .common import InfoExtractor from ..utils import ( diff --git a/yt_dlp/extractor/kusi.py b/yt_dlp/extractor/kusi.py index f1221ef1be..4fec2c2b22 100644 --- a/yt_dlp/extractor/kusi.py +++ b/yt_dlp/extractor/kusi.py @@ -1,10 +1,10 @@ import random +import urllib.parse from .common import InfoExtractor -from ..compat import compat_urllib_parse_unquote_plus from ..utils import ( - int_or_none, float_or_none, + int_or_none, timeconvert, update_url_query, xpath_text, @@ -66,7 +66,7 @@ class KUSIIE(InfoExtractor): formats = [] for quality in quality_options: formats.append({ - 'url': compat_urllib_parse_unquote_plus(quality.attrib['url']), + 'url': urllib.parse.unquote_plus(quality.attrib['url']), 'height': int_or_none(quality.attrib.get('height')), 'width': int_or_none(quality.attrib.get('width')), 'vbr': float_or_none(quality.attrib.get('bitratebits'), scale=1000), diff --git a/yt_dlp/extractor/metacafe.py b/yt_dlp/extractor/metacafe.py index 31fec86d28..048c74e68b 100644 --- a/yt_dlp/extractor/metacafe.py +++ b/yt_dlp/extractor/metacafe.py @@ -1,17 +1,14 @@ import json import re +import urllib.parse from .common import InfoExtractor -from ..compat import ( - compat_parse_qs, - compat_urllib_parse, - compat_urllib_parse_unquote, -) +from ..compat import compat_parse_qs, compat_urllib_parse_unquote from ..utils import ( - determine_ext, ExtractorError, - int_or_none, + determine_ext, get_element_by_attribute, + int_or_none, mimetype2ext, ) @@ -143,7 +140,7 @@ class MetacafeIE(InfoExtractor): headers = { # Disable family filter - 'Cookie': 'user=%s; ' % compat_urllib_parse.quote(json.dumps({'ffilter': False})) + 'Cookie': 'user=%s; ' % urllib.parse.quote(json.dumps({'ffilter': False})) } # AnyClip videos require the flashversion cookie so that we get the link diff --git a/yt_dlp/extractor/mixcloud.py b/yt_dlp/extractor/mixcloud.py index 796f268f43..a77d7e6824 100644 --- a/yt_dlp/extractor/mixcloud.py +++ b/yt_dlp/extractor/mixcloud.py @@ -3,7 +3,6 @@ import itertools from .common import InfoExtractor from ..compat import ( compat_b64decode, - compat_chr, compat_ord, compat_str, compat_urllib_parse_unquote, @@ -72,7 +71,7 @@ class MixcloudIE(MixcloudBaseIE): def _decrypt_xor_cipher(key, ciphertext): """Encrypt/Decrypt XOR cipher. Both ways are possible because it's XOR.""" return ''.join([ - compat_chr(compat_ord(ch) ^ compat_ord(k)) + chr(compat_ord(ch) ^ compat_ord(k)) for ch, k in zip(ciphertext, itertools.cycle(key))]) def _real_extract(self, url): diff --git a/yt_dlp/extractor/ndtv.py b/yt_dlp/extractor/ndtv.py index fbb033169f..bfe52f77de 100644 --- a/yt_dlp/extractor/ndtv.py +++ b/yt_dlp/extractor/ndtv.py @@ -1,13 +1,7 @@ +import urllib.parse + from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_unquote_plus -) -from ..utils import ( - parse_duration, - remove_end, - unified_strdate, - urljoin -) +from ..utils import parse_duration, remove_end, unified_strdate, urljoin class NDTVIE(InfoExtractor): @@ -80,7 +74,7 @@ class NDTVIE(InfoExtractor): webpage = self._download_webpage(url, video_id) # '__title' does not contain extra words such as sub-site name, "Video" etc. - title = compat_urllib_parse_unquote_plus( + title = urllib.parse.unquote_plus( self._search_regex(r"__title\s*=\s*'([^']+)'", webpage, 'title', default=None) or self._og_search_title(webpage)) diff --git a/yt_dlp/extractor/nebula.py b/yt_dlp/extractor/nebula.py index ff9a2adf07..9478f3aa30 100644 --- a/yt_dlp/extractor/nebula.py +++ b/yt_dlp/extractor/nebula.py @@ -1,14 +1,11 @@ import itertools import json import time -import urllib +import urllib.parse +import urllib.error -from ..utils import ( - ExtractorError, - parse_iso8601, - try_get, -) from .common import InfoExtractor +from ..utils import ExtractorError, parse_iso8601, try_get class NebulaBaseIE(InfoExtractor): diff --git a/yt_dlp/extractor/neteasemusic.py b/yt_dlp/extractor/neteasemusic.py index 4def7e76bc..f9a67876ab 100644 --- a/yt_dlp/extractor/neteasemusic.py +++ b/yt_dlp/extractor/neteasemusic.py @@ -1,18 +1,12 @@ -from hashlib import md5 +import itertools +import re from base64 import b64encode from datetime import datetime -import re +from hashlib import md5 from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_urlencode, - compat_str, - compat_itertools_count, -) -from ..utils import ( - sanitized_Request, - float_or_none, -) +from ..compat import compat_str, compat_urllib_parse_urlencode +from ..utils import float_or_none, sanitized_Request class NetEaseMusicBaseIE(InfoExtractor): @@ -449,7 +443,7 @@ class NetEaseMusicDjRadioIE(NetEaseMusicBaseIE): name = None desc = None entries = [] - for offset in compat_itertools_count(start=0, step=self._PAGE_SIZE): + for offset in itertools.count(start=0, step=self._PAGE_SIZE): info = self.query_api( 'dj/program/byradio?asc=false&limit=%d&radioId=%s&offset=%d' % (self._PAGE_SIZE, dj_id, offset), diff --git a/yt_dlp/extractor/peloton.py b/yt_dlp/extractor/peloton.py index 8e50ffc7f6..3fc05d1f27 100644 --- a/yt_dlp/extractor/peloton.py +++ b/yt_dlp/extractor/peloton.py @@ -1,11 +1,9 @@ import json import re +import urllib.parse from .common import InfoExtractor -from ..compat import ( - compat_HTTPError, - compat_urllib_parse, -) +from ..compat import compat_HTTPError from ..utils import ( ExtractorError, float_or_none, @@ -125,7 +123,7 @@ class PelotonIE(InfoExtractor): is_live = False if ride_data.get('content_format') == 'audio': - url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), compat_urllib_parse.quote(token)) + url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('vod_stream_url'), urllib.parse.quote(token)) formats = [{ 'url': url, 'ext': 'm4a', @@ -138,9 +136,9 @@ class PelotonIE(InfoExtractor): url = 'https://members.onepeloton.com/.netlify/functions/m3u8-proxy?displayLanguage=en&acceptedSubtitles=%s&url=%s?hdnea=%s' % ( ','.join([re.sub('^([a-z]+)-([A-Z]+)$', r'\1', caption) for caption in ride_data['captions']]), ride_data['vod_stream_url'], - compat_urllib_parse.quote(compat_urllib_parse.quote(token))) + urllib.parse.quote(urllib.parse.quote(token))) elif ride_data.get('live_stream_url'): - url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), compat_urllib_parse.quote(token)) + url = self._MANIFEST_URL_TEMPLATE % (ride_data.get('live_stream_url'), urllib.parse.quote(token)) is_live = True else: raise ExtractorError('Missing video URL') diff --git a/yt_dlp/extractor/playvid.py b/yt_dlp/extractor/playvid.py index 5ffefc934d..18aeda7de3 100644 --- a/yt_dlp/extractor/playvid.py +++ b/yt_dlp/extractor/playvid.py @@ -1,14 +1,9 @@ import re +import urllib.parse from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse_unquote, - compat_urllib_parse_unquote_plus, -) -from ..utils import ( - clean_html, - ExtractorError, -) +from ..compat import compat_urllib_parse_unquote +from ..utils import ExtractorError, clean_html class PlayvidIE(InfoExtractor): @@ -62,7 +57,7 @@ class PlayvidIE(InfoExtractor): val = videovars_match.group(2) if key == 'title': - video_title = compat_urllib_parse_unquote_plus(val) + video_title = urllib.parse.unquote_plus(val) if key == 'duration': try: duration = int(val) diff --git a/yt_dlp/extractor/popcorntimes.py b/yt_dlp/extractor/popcorntimes.py index ed741a07be..ddc5ec8c8e 100644 --- a/yt_dlp/extractor/popcorntimes.py +++ b/yt_dlp/extractor/popcorntimes.py @@ -1,8 +1,5 @@ from .common import InfoExtractor -from ..compat import ( - compat_b64decode, - compat_chr, -) +from ..compat import compat_b64decode from ..utils import int_or_none @@ -50,7 +47,7 @@ class PopcorntimesIE(InfoExtractor): c_ord += 13 if upper < c_ord: c_ord -= 26 - loc_b64 += compat_chr(c_ord) + loc_b64 += chr(c_ord) video_url = compat_b64decode(loc_b64).decode('utf-8') diff --git a/yt_dlp/extractor/pornhub.py b/yt_dlp/extractor/pornhub.py index 023b5f3b97..35468b4fc8 100644 --- a/yt_dlp/extractor/pornhub.py +++ b/yt_dlp/extractor/pornhub.py @@ -3,29 +3,26 @@ import itertools import math import operator import re +import urllib.request from .common import InfoExtractor -from ..compat import ( - compat_HTTPError, - compat_str, - compat_urllib_request, -) from .openload import PhantomJSwrapper +from ..compat import compat_HTTPError, compat_str from ..utils import ( + NO_DEFAULT, + ExtractorError, clean_html, determine_ext, - ExtractorError, format_field, int_or_none, merge_dicts, - NO_DEFAULT, orderedSet, remove_quotes, remove_start, str_to_int, update_url_query, - urlencode_postdata, url_or_none, + urlencode_postdata, ) @@ -50,7 +47,7 @@ class PornHubBaseIE(InfoExtractor): r'document\.location\.reload\(true\)')): url_or_request = args[0] url = (url_or_request.get_full_url() - if isinstance(url_or_request, compat_urllib_request.Request) + if isinstance(url_or_request, urllib.request.Request) else url_or_request) phantom = PhantomJSwrapper(self, required_version='2.0') phantom.get(url, html=webpage) diff --git a/yt_dlp/extractor/rtve.py b/yt_dlp/extractor/rtve.py index 42a6029688..798dde7fa6 100644 --- a/yt_dlp/extractor/rtve.py +++ b/yt_dlp/extractor/rtve.py @@ -1,14 +1,12 @@ import base64 import io +import struct from .common import InfoExtractor -from ..compat import ( - compat_b64decode, - compat_struct_unpack, -) +from ..compat import compat_b64decode from ..utils import ( - determine_ext, ExtractorError, + determine_ext, float_or_none, qualities, remove_end, @@ -73,7 +71,7 @@ class RTVEALaCartaIE(InfoExtractor): def _decrypt_url(png): encrypted_data = io.BytesIO(compat_b64decode(png)[8:]) while True: - length = compat_struct_unpack('!I', encrypted_data.read(4))[0] + length = struct.unpack('!I', encrypted_data.read(4))[0] chunk_type = encrypted_data.read(4) if chunk_type == b'IEND': break diff --git a/yt_dlp/extractor/screencast.py b/yt_dlp/extractor/screencast.py index e3dbaab690..df5e79bef7 100644 --- a/yt_dlp/extractor/screencast.py +++ b/yt_dlp/extractor/screencast.py @@ -1,11 +1,8 @@ +import urllib.request + from .common import InfoExtractor -from ..compat import ( - compat_parse_qs, - compat_urllib_request, -) -from ..utils import ( - ExtractorError, -) +from ..compat import compat_parse_qs +from ..utils import ExtractorError class ScreencastIE(InfoExtractor): @@ -75,7 +72,7 @@ class ScreencastIE(InfoExtractor): flash_vars_s = flash_vars_s.replace(',', '&') if flash_vars_s: flash_vars = compat_parse_qs(flash_vars_s) - video_url_raw = compat_urllib_request.quote( + video_url_raw = urllib.request.quote( flash_vars['content'][0]) video_url = video_url_raw.replace('http%3A', 'http:') diff --git a/yt_dlp/extractor/shared.py b/yt_dlp/extractor/shared.py index 5bc097b0d8..31c0080ba4 100644 --- a/yt_dlp/extractor/shared.py +++ b/yt_dlp/extractor/shared.py @@ -1,14 +1,15 @@ + + +import urllib.parse + from .common import InfoExtractor -from ..compat import ( - compat_b64decode, - compat_urllib_parse_unquote_plus, -) +from ..compat import compat_b64decode from ..utils import ( - determine_ext, + KNOWN_EXTENSIONS, ExtractorError, + determine_ext, int_or_none, js_to_json, - KNOWN_EXTENSIONS, parse_filesize, rot47, url_or_none, @@ -130,7 +131,7 @@ class VivoIE(SharedBaseIE): return stream_url def decode_url(encoded_url): - return rot47(compat_urllib_parse_unquote_plus(encoded_url)) + return rot47(urllib.parse.unquote_plus(encoded_url)) return decode_url(self._parse_json( self._search_regex( diff --git a/yt_dlp/extractor/udemy.py b/yt_dlp/extractor/udemy.py index 94ea2fe590..1dc2dbdc46 100644 --- a/yt_dlp/extractor/udemy.py +++ b/yt_dlp/extractor/udemy.py @@ -1,16 +1,12 @@ import re +import urllib.request from .common import InfoExtractor -from ..compat import ( - compat_HTTPError, - compat_str, - compat_urllib_request, - compat_urlparse, -) +from ..compat import compat_HTTPError, compat_str, compat_urlparse from ..utils import ( + ExtractorError, determine_ext, extract_attributes, - ExtractorError, float_or_none, int_or_none, js_to_json, @@ -155,7 +151,7 @@ class UdemyIE(InfoExtractor): headers['X-Udemy-Bearer-Token'] = cookie.value headers['X-Udemy-Authorization'] = 'Bearer %s' % cookie.value - if isinstance(url_or_request, compat_urllib_request.Request): + if isinstance(url_or_request, urllib.request.Request): for header, value in headers.items(): url_or_request.add_header(header, value) else: diff --git a/yt_dlp/extractor/urort.py b/yt_dlp/extractor/urort.py index 296799d386..be508f4349 100644 --- a/yt_dlp/extractor/urort.py +++ b/yt_dlp/extractor/urort.py @@ -1,10 +1,9 @@ + + +import urllib.parse + from .common import InfoExtractor -from ..compat import ( - compat_urllib_parse, -) -from ..utils import ( - unified_strdate, -) +from ..utils import unified_strdate class UrortIE(InfoExtractor): @@ -31,7 +30,7 @@ class UrortIE(InfoExtractor): def _real_extract(self, url): playlist_id = self._match_id(url) - fstr = compat_urllib_parse.quote("InternalBandUrl eq '%s'" % playlist_id) + fstr = urllib.parse.quote("InternalBandUrl eq '%s'" % playlist_id) json_url = 'http://urort.p3.no/breeze/urort/TrackDTOViews?$filter=%s&$orderby=Released%%20desc&$expand=Tags%%2CFiles' % fstr songs = self._download_json(json_url, playlist_id) entries = [] diff --git a/yt_dlp/extractor/videa.py b/yt_dlp/extractor/videa.py index 251eb78feb..9b05c86a5e 100644 --- a/yt_dlp/extractor/videa.py +++ b/yt_dlp/extractor/videa.py @@ -1,8 +1,10 @@ import random import re import string +import struct from .common import InfoExtractor +from ..compat import compat_b64decode, compat_ord from ..utils import ( ExtractorError, int_or_none, @@ -14,11 +16,6 @@ from ..utils import ( xpath_element, xpath_text, ) -from ..compat import ( - compat_b64decode, - compat_ord, - compat_struct_pack, -) class VideaIE(InfoExtractor): @@ -102,7 +99,7 @@ class VideaIE(InfoExtractor): j = (j + S[i]) % 256 S[i], S[j] = S[j], S[i] k = S[(S[i] + S[j]) % 256] - res += compat_struct_pack('B', k ^ compat_ord(cipher_text[m])) + res += struct.pack('B', k ^ compat_ord(cipher_text[m])) return res.decode() diff --git a/yt_dlp/extractor/vrv.py b/yt_dlp/extractor/vrv.py index 35662753ed..0b9bf2903b 100644 --- a/yt_dlp/extractor/vrv.py +++ b/yt_dlp/extractor/vrv.py @@ -1,17 +1,14 @@ import base64 -import json import hashlib import hmac +import json import random import string import time +import urllib.parse from .common import InfoExtractor -from ..compat import ( - compat_HTTPError, - compat_urllib_parse_urlencode, - compat_urllib_parse, -) +from ..compat import compat_HTTPError, compat_urllib_parse_urlencode from ..utils import ( ExtractorError, float_or_none, @@ -46,12 +43,12 @@ class VRVBaseIE(InfoExtractor): headers['Content-Type'] = 'application/json' base_string = '&'.join([ 'POST' if data else 'GET', - compat_urllib_parse.quote(base_url, ''), - compat_urllib_parse.quote(encoded_query, '')]) + urllib.parse.quote(base_url, ''), + urllib.parse.quote(encoded_query, '')]) oauth_signature = base64.b64encode(hmac.new( (self._API_PARAMS['oAuthSecret'] + '&' + self._TOKEN_SECRET).encode('ascii'), base_string.encode(), hashlib.sha1).digest()).decode() - encoded_query += '&oauth_signature=' + compat_urllib_parse.quote(oauth_signature, '') + encoded_query += '&oauth_signature=' + urllib.parse.quote(oauth_signature, '') try: return self._download_json( '?'.join([base_url, encoded_query]), video_id, diff --git a/yt_dlp/extractor/vshare.py b/yt_dlp/extractor/vshare.py index 8ef75d30e3..fd5226bbc6 100644 --- a/yt_dlp/extractor/vshare.py +++ b/yt_dlp/extractor/vshare.py @@ -1,11 +1,7 @@ import re from .common import InfoExtractor -from ..compat import compat_chr -from ..utils import ( - decode_packed_codes, - ExtractorError, -) +from ..utils import ExtractorError, decode_packed_codes class VShareIE(InfoExtractor): @@ -37,7 +33,7 @@ class VShareIE(InfoExtractor): digits = [int(digit) for digit in digits.split(',')] key_digit = self._search_regex( r'fromCharCode\(.+?(\d+)\)}', unpacked, 'key digit') - chars = [compat_chr(d - int(key_digit)) for d in digits] + chars = [chr(d - int(key_digit)) for d in digits] return ''.join(chars) def _real_extract(self, url): diff --git a/yt_dlp/extractor/xfileshare.py b/yt_dlp/extractor/xfileshare.py index 28b6ecb6ea..63abe4a1f4 100644 --- a/yt_dlp/extractor/xfileshare.py +++ b/yt_dlp/extractor/xfileshare.py @@ -1,11 +1,10 @@ import re from .common import InfoExtractor -from ..compat import compat_chr from ..utils import ( + ExtractorError, decode_packed_codes, determine_ext, - ExtractorError, int_or_none, js_to_json, urlencode_postdata, @@ -32,11 +31,11 @@ def aa_decode(aa_code): aa_char = aa_char.replace('+ ', '') m = re.match(r'^\d+', aa_char) if m: - ret += compat_chr(int(m.group(0), 8)) + ret += chr(int(m.group(0), 8)) else: m = re.match(r'^u([\da-f]+)', aa_char) if m: - ret += compat_chr(int(m.group(1), 16)) + ret += chr(int(m.group(1), 16)) return ret diff --git a/yt_dlp/extractor/yahoo.py b/yt_dlp/extractor/yahoo.py index 171fbf585a..8811df6d85 100644 --- a/yt_dlp/extractor/yahoo.py +++ b/yt_dlp/extractor/yahoo.py @@ -1,15 +1,15 @@ import hashlib import itertools import re +import urllib.parse +from .brightcove import BrightcoveNewIE from .common import InfoExtractor, SearchInfoExtractor -from ..compat import ( - compat_str, - compat_urllib_parse, -) +from .youtube import YoutubeIE +from ..compat import compat_str from ..utils import ( - clean_html, ExtractorError, + clean_html, int_or_none, mimetype2ext, parse_iso8601, @@ -18,9 +18,6 @@ from ..utils import ( url_or_none, ) -from .brightcove import BrightcoveNewIE -from .youtube import YoutubeIE - class YahooIE(InfoExtractor): IE_DESC = 'Yahoo screen and movies' @@ -333,7 +330,7 @@ class YahooSearchIE(SearchInfoExtractor): def _search_results(self, query): for pagenum in itertools.count(0): - result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (compat_urllib_parse.quote_plus(query), pagenum * 30) + result_url = 'http://video.search.yahoo.com/search/?p=%s&fr=screen&o=js&gs=0&b=%d' % (urllib.parse.quote_plus(query), pagenum * 30) info = self._download_json(result_url, query, note='Downloading results page ' + str(pagenum + 1)) yield from (self.url_result(result['rurl']) for result in info['results']) diff --git a/yt_dlp/extractor/ynet.py b/yt_dlp/extractor/ynet.py index 4447859479..27eda97219 100644 --- a/yt_dlp/extractor/ynet.py +++ b/yt_dlp/extractor/ynet.py @@ -1,8 +1,8 @@ -import re import json +import re +import urllib.parse from .common import InfoExtractor -from ..compat import compat_urllib_parse_unquote_plus class YnetIE(InfoExtractor): @@ -31,7 +31,7 @@ class YnetIE(InfoExtractor): video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) - content = compat_urllib_parse_unquote_plus(self._og_search_video_url(webpage)) + content = urllib.parse.unquote_plus(self._og_search_video_url(webpage)) config = json.loads(self._search_regex(r'config=({.+?})$', content, 'video config')) f4m_url = config['clip']['url'] title = self._og_search_title(webpage) diff --git a/yt_dlp/extractor/youtube.py b/yt_dlp/extractor/youtube.py index d168bfff5a..37a6d4c754 100644 --- a/yt_dlp/extractor/youtube.py +++ b/yt_dlp/extractor/youtube.py @@ -13,15 +13,14 @@ import sys import threading import time import traceback +import urllib.parse from .common import InfoExtractor, SearchInfoExtractor from ..compat import functools # isort: split from ..compat import ( - compat_chr, compat_HTTPError, compat_parse_qs, compat_str, - compat_urllib_parse_unquote_plus, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, compat_urlparse, @@ -2483,7 +2482,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): if code: res = self._parse_sig_js(code) - test_string = ''.join(map(compat_chr, range(len(example_sig)))) + test_string = ''.join(map(chr, range(len(example_sig)))) cache_res = res(test_string) cache_spec = [ord(c) for c in cache_res] @@ -2522,7 +2521,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): else: yield _genslice(start, i, step) - test_string = ''.join(map(compat_chr, range(len(example_sig)))) + test_string = ''.join(map(chr, range(len(example_sig)))) cache_res = func(test_string) cache_spec = [ord(c) for c in cache_res] expr_code = ' + '.join(gen_sig_code(cache_spec)) @@ -3421,7 +3420,7 @@ class YoutubeIE(YoutubeBaseInfoExtractor): # fields may contain comma as well (see # https://github.com/ytdl-org/youtube-dl/issues/8536) feed_data = compat_parse_qs( - compat_urllib_parse_unquote_plus(feed)) + urllib.parse.unquote_plus(feed)) def feed_entry(name): return try_get( @@ -5846,7 +5845,7 @@ class YoutubeMusicSearchURLIE(YoutubeTabBaseInfoExtractor): if params: section = next((k for k, v in self._SECTIONS.items() if v == params), params) else: - section = compat_urllib_parse_unquote_plus((url.split('#') + [''])[1]).lower() + section = urllib.parse.unquote_plus((url.split('#') + [''])[1]).lower() params = self._SECTIONS.get(section) if not params: section = None diff --git a/yt_dlp/options.py b/yt_dlp/options.py index 995ea6a96a..a4f8e09593 100644 --- a/yt_dlp/options.py +++ b/yt_dlp/options.py @@ -4,10 +4,11 @@ import optparse import os.path import re import shlex +import shutil import string import sys -from .compat import compat_expanduser, compat_get_terminal_size, compat_getenv +from .compat import compat_expanduser from .cookies import SUPPORTED_BROWSERS, SUPPORTED_KEYRINGS from .downloader.external import list_external_downloaders from .postprocessor import ( @@ -39,7 +40,7 @@ def parseOpts(overrideArguments=None, ignore_config_files='if_override'): def _readUserConf(package_name, default=[]): # .config - xdg_config_home = compat_getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config') + xdg_config_home = os.getenv('XDG_CONFIG_HOME') or compat_expanduser('~/.config') userConfFile = os.path.join(xdg_config_home, package_name, 'config') if not os.path.isfile(userConfFile): userConfFile = os.path.join(xdg_config_home, '%s.conf' % package_name) @@ -48,7 +49,7 @@ def parseOpts(overrideArguments=None, ignore_config_files='if_override'): return userConf, userConfFile # appdata - appdata_dir = compat_getenv('appdata') + appdata_dir = os.getenv('appdata') if appdata_dir: userConfFile = os.path.join(appdata_dir, package_name, 'config') userConf = Config.read_file(userConfFile, default=None) @@ -137,7 +138,7 @@ def parseOpts(overrideArguments=None, ignore_config_files='if_override'): class _YoutubeDLHelpFormatter(optparse.IndentedHelpFormatter): def __init__(self): # No need to wrap help messages if we're on a wide console - max_width = compat_get_terminal_size().columns or 80 + max_width = shutil.get_terminal_size().columns or 80 # The % is chosen to get a pretty output in README.md super().__init__(width=max_width, max_help_position=int(0.45 * max_width)) diff --git a/yt_dlp/socks.py b/yt_dlp/socks.py index 34ba1394ab..f93328f63a 100644 --- a/yt_dlp/socks.py +++ b/yt_dlp/socks.py @@ -8,8 +8,9 @@ import collections import socket +import struct -from .compat import compat_ord, compat_struct_pack, compat_struct_unpack +from .compat import compat_ord __author__ = 'Timo Schmid ' @@ -19,7 +20,7 @@ SOCKS4_REPLY_VERSION = 0x00 # if the client cannot resolve the destination host's domain name to find its # IP address, it should set the first three bytes of DSTIP to NULL and the last # byte to a non-zero value. -SOCKS4_DEFAULT_DSTIP = compat_struct_pack('!BBBB', 0, 0, 0, 0xFF) +SOCKS4_DEFAULT_DSTIP = struct.pack('!BBBB', 0, 0, 0, 0xFF) SOCKS5_VERSION = 5 SOCKS5_USER_AUTH_VERSION = 0x01 @@ -122,11 +123,11 @@ class sockssocket(socket.socket): def _recv_bytes(self, cnt): data = self.recvall(cnt) - return compat_struct_unpack(f'!{cnt}B', data) + return struct.unpack(f'!{cnt}B', data) @staticmethod def _len_and_data(data): - return compat_struct_pack('!B', len(data)) + data + return struct.pack('!B', len(data)) + data def _check_response_version(self, expected_version, got_version): if got_version != expected_version: @@ -147,7 +148,7 @@ class sockssocket(socket.socket): ipaddr = self._resolve_address(destaddr, SOCKS4_DEFAULT_DSTIP, use_remote_dns=is_4a) - packet = compat_struct_pack('!BBH', SOCKS4_VERSION, Socks4Command.CMD_CONNECT, port) + ipaddr + packet = struct.pack('!BBH', SOCKS4_VERSION, Socks4Command.CMD_CONNECT, port) + ipaddr username = (self._proxy.username or '').encode() packet += username + b'\x00' @@ -157,7 +158,7 @@ class sockssocket(socket.socket): self.sendall(packet) - version, resp_code, dstport, dsthost = compat_struct_unpack('!BBHI', self.recvall(8)) + version, resp_code, dstport, dsthost = struct.unpack('!BBHI', self.recvall(8)) self._check_response_version(SOCKS4_REPLY_VERSION, version) @@ -171,14 +172,14 @@ class sockssocket(socket.socket): self._setup_socks4(address, is_4a=True) def _socks5_auth(self): - packet = compat_struct_pack('!B', SOCKS5_VERSION) + packet = struct.pack('!B', SOCKS5_VERSION) auth_methods = [Socks5Auth.AUTH_NONE] if self._proxy.username and self._proxy.password: auth_methods.append(Socks5Auth.AUTH_USER_PASS) - packet += compat_struct_pack('!B', len(auth_methods)) - packet += compat_struct_pack(f'!{len(auth_methods)}B', *auth_methods) + packet += struct.pack('!B', len(auth_methods)) + packet += struct.pack(f'!{len(auth_methods)}B', *auth_methods) self.sendall(packet) @@ -194,7 +195,7 @@ class sockssocket(socket.socket): if method == Socks5Auth.AUTH_USER_PASS: username = self._proxy.username.encode() password = self._proxy.password.encode() - packet = compat_struct_pack('!B', SOCKS5_USER_AUTH_VERSION) + packet = struct.pack('!B', SOCKS5_USER_AUTH_VERSION) packet += self._len_and_data(username) + self._len_and_data(password) self.sendall(packet) @@ -214,14 +215,14 @@ class sockssocket(socket.socket): self._socks5_auth() reserved = 0 - packet = compat_struct_pack('!BBB', SOCKS5_VERSION, Socks5Command.CMD_CONNECT, reserved) + packet = struct.pack('!BBB', SOCKS5_VERSION, Socks5Command.CMD_CONNECT, reserved) if ipaddr is None: destaddr = destaddr.encode() - packet += compat_struct_pack('!B', Socks5AddressType.ATYP_DOMAINNAME) + packet += struct.pack('!B', Socks5AddressType.ATYP_DOMAINNAME) packet += self._len_and_data(destaddr) else: - packet += compat_struct_pack('!B', Socks5AddressType.ATYP_IPV4) + ipaddr - packet += compat_struct_pack('!H', port) + packet += struct.pack('!B', Socks5AddressType.ATYP_IPV4) + ipaddr + packet += struct.pack('!H', port) self.sendall(packet) @@ -240,7 +241,7 @@ class sockssocket(socket.socket): destaddr = self.recvall(alen) elif atype == Socks5AddressType.ATYP_IPV6: destaddr = self.recvall(16) - destport = compat_struct_unpack('!H', self.recvall(2))[0] + destport = struct.unpack('!H', self.recvall(2))[0] return (destaddr, destport) diff --git a/yt_dlp/utils.py b/yt_dlp/utils.py index 3fc4961dd3..6b02eb450e 100644 --- a/yt_dlp/utils.py +++ b/yt_dlp/utils.py @@ -14,6 +14,8 @@ import errno import gzip import hashlib import hmac +import html.entities +import html.parser import importlib.util import io import itertools @@ -29,6 +31,7 @@ import re import shlex import socket import ssl +import struct import subprocess import sys import tempfile @@ -36,35 +39,27 @@ import time import traceback import types import urllib.parse +import urllib.request import xml.etree.ElementTree import zlib +import http.client +import http.cookiejar from .compat import asyncio, functools # isort: split from .compat import ( - compat_chr, - compat_cookiejar, compat_etree_fromstring, compat_expanduser, - compat_html_entities, - compat_html_entities_html5, compat_HTMLParseError, - compat_HTMLParser, - compat_http_client, compat_HTTPError, compat_os_name, compat_parse_qs, compat_shlex_quote, compat_str, - compat_struct_pack, - compat_struct_unpack, - compat_urllib_error, - compat_urllib_parse_unquote_plus, compat_urllib_parse_urlencode, compat_urllib_parse_urlparse, - compat_urllib_request, compat_urlparse, ) -from .dependencies import brotli, certifi, websockets +from .dependencies import brotli, certifi, websockets, xattr from .socks import ProxyType, sockssocket @@ -445,7 +440,7 @@ def get_elements_text_and_html_by_attribute(attribute, value, html, escape_value ) -class HTMLBreakOnClosingTagParser(compat_HTMLParser): +class HTMLBreakOnClosingTagParser(html.parser.HTMLParser): """ HTML parser which raises HTMLBreakOnClosingTagException upon reaching the closing tag for the first opening tag it has encountered, and can be used @@ -457,7 +452,7 @@ class HTMLBreakOnClosingTagParser(compat_HTMLParser): def __init__(self): self.tagstack = collections.deque() - compat_HTMLParser.__init__(self) + html.parser.HTMLParser.__init__(self) def __enter__(self): return self @@ -522,22 +517,22 @@ def get_element_text_and_html_by_tag(tag, html): raise compat_HTMLParseError('unexpected end of html') -class HTMLAttributeParser(compat_HTMLParser): +class HTMLAttributeParser(html.parser.HTMLParser): """Trivial HTML parser to gather the attributes for a single element""" def __init__(self): self.attrs = {} - compat_HTMLParser.__init__(self) + html.parser.HTMLParser.__init__(self) def handle_starttag(self, tag, attrs): self.attrs = dict(attrs) -class HTMLListAttrsParser(compat_HTMLParser): +class HTMLListAttrsParser(html.parser.HTMLParser): """HTML parser to gather the attributes for the elements of a list""" def __init__(self): - compat_HTMLParser.__init__(self) + html.parser.HTMLParser.__init__(self) self.items = [] self._level = 0 @@ -763,7 +758,7 @@ def sanitized_Request(url, *args, **kwargs): if auth_header is not None: headers = args[1] if len(args) >= 2 else kwargs.setdefault('headers', {}) headers['Authorization'] = auth_header - return compat_urllib_request.Request(url, *args, **kwargs) + return urllib.request.Request(url, *args, **kwargs) def expand_path(s): @@ -788,13 +783,13 @@ def _htmlentity_transform(entity_with_semicolon): entity = entity_with_semicolon[:-1] # Known non-numeric HTML entity - if entity in compat_html_entities.name2codepoint: - return compat_chr(compat_html_entities.name2codepoint[entity]) + if entity in html.entities.name2codepoint: + return chr(html.entities.name2codepoint[entity]) # TODO: HTML5 allows entities without a semicolon. For example, # 'Éric' should be decoded as 'Éric'. - if entity_with_semicolon in compat_html_entities_html5: - return compat_html_entities_html5[entity_with_semicolon] + if entity_with_semicolon in html.entities.html5: + return html.entities.html5[entity_with_semicolon] mobj = re.match(r'#(x[0-9a-fA-F]+|[0-9]+)', entity) if mobj is not None: @@ -806,7 +801,7 @@ def _htmlentity_transform(entity_with_semicolon): base = 10 # See https://github.com/ytdl-org/youtube-dl/issues/7518 with contextlib.suppress(ValueError): - return compat_chr(int(numstr, base)) + return chr(int(numstr, base)) # Unknown entity in name, return its literal representation return '&%s;' % entity @@ -1015,7 +1010,7 @@ class YoutubeDLError(Exception): super().__init__(self.msg) -network_exceptions = [compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error] +network_exceptions = [urllib.error.URLError, http.client.HTTPException, socket.error] if hasattr(ssl, 'CertificateError'): network_exceptions.append(ssl.CertificateError) network_exceptions = tuple(network_exceptions) @@ -1267,7 +1262,7 @@ def handle_youtubedl_headers(headers): return filtered_headers -class YoutubeDLHandler(compat_urllib_request.HTTPHandler): +class YoutubeDLHandler(urllib.request.HTTPHandler): """Handler for HTTP requests and responses. This class, when installed with an OpenerDirector, automatically adds @@ -1286,11 +1281,11 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """ def __init__(self, params, *args, **kwargs): - compat_urllib_request.HTTPHandler.__init__(self, *args, **kwargs) + urllib.request.HTTPHandler.__init__(self, *args, **kwargs) self._params = params def http_open(self, req): - conn_class = compat_http_client.HTTPConnection + conn_class = http.client.HTTPConnection socks_proxy = req.headers.get('Ytdl-socks-proxy') if socks_proxy: @@ -1365,18 +1360,18 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): break else: raise original_ioerror - resp = compat_urllib_request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) + resp = urllib.request.addinfourl(uncompressed, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # deflate if resp.headers.get('Content-encoding', '') == 'deflate': gz = io.BytesIO(self.deflate(resp.read())) - resp = compat_urllib_request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) + resp = urllib.request.addinfourl(gz, old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] # brotli if resp.headers.get('Content-encoding', '') == 'br': - resp = compat_urllib_request.addinfourl( + resp = urllib.request.addinfourl( io.BytesIO(self.brotli(resp.read())), old_resp.headers, old_resp.url, old_resp.code) resp.msg = old_resp.msg del resp.headers['Content-encoding'] @@ -1399,7 +1394,7 @@ class YoutubeDLHandler(compat_urllib_request.HTTPHandler): def make_socks_conn_class(base_class, socks_proxy): assert issubclass(base_class, ( - compat_http_client.HTTPConnection, compat_http_client.HTTPSConnection)) + http.client.HTTPConnection, http.client.HTTPSConnection)) url_components = compat_urlparse.urlparse(socks_proxy) if url_components.scheme.lower() == 'socks5': @@ -1412,7 +1407,7 @@ def make_socks_conn_class(base_class, socks_proxy): def unquote_if_non_empty(s): if not s: return s - return compat_urllib_parse_unquote_plus(s) + return urllib.parse.unquote_plus(s) proxy_args = ( socks_type, @@ -1430,7 +1425,7 @@ def make_socks_conn_class(base_class, socks_proxy): self.sock.settimeout(self.timeout) self.sock.connect((self.host, self.port)) - if isinstance(self, compat_http_client.HTTPSConnection): + if isinstance(self, http.client.HTTPSConnection): if hasattr(self, '_context'): # Python > 2.6 self.sock = self._context.wrap_socket( self.sock, server_hostname=self.host) @@ -1440,10 +1435,10 @@ def make_socks_conn_class(base_class, socks_proxy): return SocksConnection -class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): +class YoutubeDLHTTPSHandler(urllib.request.HTTPSHandler): def __init__(self, params, https_conn_class=None, *args, **kwargs): - compat_urllib_request.HTTPSHandler.__init__(self, *args, **kwargs) - self._https_conn_class = https_conn_class or compat_http_client.HTTPSConnection + urllib.request.HTTPSHandler.__init__(self, *args, **kwargs) + self._https_conn_class = https_conn_class or http.client.HTTPSConnection self._params = params def https_open(self, req): @@ -1470,7 +1465,7 @@ class YoutubeDLHTTPSHandler(compat_urllib_request.HTTPSHandler): raise -class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): +class YoutubeDLCookieJar(http.cookiejar.MozillaCookieJar): """ See [1] for cookie file format. @@ -1541,7 +1536,7 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): if self.filename is not None: filename = self.filename else: - raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT) + raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT) # Store session cookies with `expires` set to 0 instead of an empty string for cookie in self: @@ -1558,7 +1553,7 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): if self.filename is not None: filename = self.filename else: - raise ValueError(compat_cookiejar.MISSING_FILENAME_TEXT) + raise ValueError(http.cookiejar.MISSING_FILENAME_TEXT) def prepare_line(line): if line.startswith(self._HTTPONLY_PREFIX): @@ -1568,10 +1563,10 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): return line cookie_list = line.split('\t') if len(cookie_list) != self._ENTRY_LEN: - raise compat_cookiejar.LoadError('invalid length %d' % len(cookie_list)) + raise http.cookiejar.LoadError('invalid length %d' % len(cookie_list)) cookie = self._CookieFileEntry(*cookie_list) if cookie.expires_at and not cookie.expires_at.isdigit(): - raise compat_cookiejar.LoadError('invalid expires at %s' % cookie.expires_at) + raise http.cookiejar.LoadError('invalid expires at %s' % cookie.expires_at) return line cf = io.StringIO() @@ -1579,9 +1574,9 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): for line in f: try: cf.write(prepare_line(line)) - except compat_cookiejar.LoadError as e: + except http.cookiejar.LoadError as e: if f'{line.strip()} '[0] in '[{"': - raise compat_cookiejar.LoadError( + raise http.cookiejar.LoadError( 'Cookies file must be Netscape formatted, not JSON. See ' 'https://github.com/ytdl-org/youtube-dl#how-do-i-pass-cookies-to-youtube-dl') write_string(f'WARNING: skipping cookie file entry due to {e}: {line!r}\n') @@ -1604,18 +1599,18 @@ class YoutubeDLCookieJar(compat_cookiejar.MozillaCookieJar): cookie.discard = True -class YoutubeDLCookieProcessor(compat_urllib_request.HTTPCookieProcessor): +class YoutubeDLCookieProcessor(urllib.request.HTTPCookieProcessor): def __init__(self, cookiejar=None): - compat_urllib_request.HTTPCookieProcessor.__init__(self, cookiejar) + urllib.request.HTTPCookieProcessor.__init__(self, cookiejar) def http_response(self, request, response): - return compat_urllib_request.HTTPCookieProcessor.http_response(self, request, response) + return urllib.request.HTTPCookieProcessor.http_response(self, request, response) - https_request = compat_urllib_request.HTTPCookieProcessor.http_request + https_request = urllib.request.HTTPCookieProcessor.http_request https_response = http_response -class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): +class YoutubeDLRedirectHandler(urllib.request.HTTPRedirectHandler): """YoutubeDL redirect handler The code is based on HTTPRedirectHandler implementation from CPython [1]. @@ -1630,7 +1625,7 @@ class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): 3. https://github.com/ytdl-org/youtube-dl/issues/28768 """ - http_error_301 = http_error_303 = http_error_307 = http_error_308 = compat_urllib_request.HTTPRedirectHandler.http_error_302 + http_error_301 = http_error_303 = http_error_307 = http_error_308 = urllib.request.HTTPRedirectHandler.http_error_302 def redirect_request(self, req, fp, code, msg, headers, newurl): """Return a Request or None in response to a redirect. @@ -1672,7 +1667,7 @@ class YoutubeDLRedirectHandler(compat_urllib_request.HTTPRedirectHandler): if code in (301, 302) and m == 'POST': m = 'GET' - return compat_urllib_request.Request( + return urllib.request.Request( newurl, headers=newheaders, origin_req_host=req.origin_req_host, unverifiable=True, method=m) @@ -1967,7 +1962,7 @@ def bytes_to_intlist(bs): def intlist_to_bytes(xs): if not xs: return b'' - return compat_struct_pack('%dB' % len(xs), *xs) + return struct.pack('%dB' % len(xs), *xs) class LockingUnsupportedError(OSError): @@ -2427,12 +2422,12 @@ def urljoin(base, path): return compat_urlparse.urljoin(base, path) -class HEADRequest(compat_urllib_request.Request): +class HEADRequest(urllib.request.Request): def get_method(self): return 'HEAD' -class PUTRequest(compat_urllib_request.Request): +class PUTRequest(urllib.request.Request): def get_method(self): return 'PUT' @@ -2484,7 +2479,7 @@ def url_or_none(url): def request_to_url(req): - if isinstance(req, compat_urllib_request.Request): + if isinstance(req, urllib.request.Request): return req.get_full_url() else: return req @@ -3037,7 +3032,7 @@ def update_Request(req, url=None, data=None, headers={}, query={}): elif req_get_method == 'PUT': req_type = PUTRequest else: - req_type = compat_urllib_request.Request + req_type = urllib.request.Request new_req = req_type( req_url, data=req_data, headers=req_headers, origin_req_host=req.origin_req_host, unverifiable=req.unverifiable) @@ -4636,20 +4631,20 @@ class GeoUtils: else: block = code_or_block addr, preflen = block.split('/') - addr_min = compat_struct_unpack('!L', socket.inet_aton(addr))[0] + addr_min = struct.unpack('!L', socket.inet_aton(addr))[0] addr_max = addr_min | (0xffffffff >> int(preflen)) return compat_str(socket.inet_ntoa( - compat_struct_pack('!L', random.randint(addr_min, addr_max)))) + struct.pack('!L', random.randint(addr_min, addr_max)))) -class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): +class PerRequestProxyHandler(urllib.request.ProxyHandler): def __init__(self, proxies=None): # Set default handlers for type in ('http', 'https'): setattr(self, '%s_open' % type, lambda r, proxy='__noproxy__', type=type, meth=self.proxy_open: meth(r, proxy, type)) - compat_urllib_request.ProxyHandler.__init__(self, proxies) + urllib.request.ProxyHandler.__init__(self, proxies) def proxy_open(self, req, proxy, type): req_proxy = req.headers.get('Ytdl-request-proxy') @@ -4663,7 +4658,7 @@ class PerRequestProxyHandler(compat_urllib_request.ProxyHandler): req.add_header('Ytdl-socks-proxy', proxy) # yt-dlp's http/https handlers do wrapping the socket with socks return None - return compat_urllib_request.ProxyHandler.proxy_open( + return urllib.request.ProxyHandler.proxy_open( self, req, proxy, type) @@ -4683,7 +4678,7 @@ def long_to_bytes(n, blocksize=0): s = b'' n = int(n) while n > 0: - s = compat_struct_pack('>I', n & 0xffffffff) + s + s = struct.pack('>I', n & 0xffffffff) + s n = n >> 32 # strip off leading zeros for i in range(len(s)): @@ -4714,7 +4709,7 @@ def bytes_to_long(s): s = b'\000' * extra + s length = length + extra for i in range(0, length, 4): - acc = (acc << 32) + compat_struct_unpack('>I', s[i:i + 4])[0] + acc = (acc << 32) + struct.unpack('>I', s[i:i + 4])[0] return acc @@ -4842,7 +4837,7 @@ def decode_png(png_data): raise OSError('Not a valid PNG file.') int_map = {1: '>B', 2: '>H', 4: '>I'} - unpack_integer = lambda x: compat_struct_unpack(int_map[len(x)], x)[0] + unpack_integer = lambda x: struct.unpack(int_map[len(x)], x)[0] chunks = [] @@ -4954,7 +4949,6 @@ def write_xattr(path, key, value): return # UNIX Method 1. Use xattrs/pyxattrs modules - from .dependencies import xattr setxattr = None if getattr(xattr, '_yt_dlp__identifier', None) == 'pyxattr':