yt-dlp/yt_dlp/extractor/aparat.py

157 lines
5.9 KiB
Python
Raw Normal View History

2013-12-20 17:05:28 +01:00
from .common import InfoExtractor
from ..utils import (
2021-01-01 17:56:37 +05:30
get_element_by_id,
int_or_none,
merge_dicts,
mimetype2ext,
2024-08-25 17:36:15 +00:00
traverse_obj,
2018-07-21 19:08:28 +07:00
url_or_none,
2024-11-16 07:17:48 +00:00
urljoin,
2013-12-20 17:05:28 +01:00
)
class AparatIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?aparat\.com/(?:v/|video/video/embed/videohash/)(?P<id>[a-zA-Z0-9]+)'
_EMBED_REGEX = [r'<iframe .*?src="(?P<url>http://www\.aparat\.com/video/[^"]+)"']
2013-12-20 17:05:28 +01:00
_TESTS = [{
2014-08-22 01:44:35 +02:00
'url': 'http://www.aparat.com/v/wP8On',
2016-08-08 12:59:07 +08:00
'md5': '131aca2e14fe7c4dcb3c4877ba300c89',
2014-08-22 01:44:35 +02:00
'info_dict': {
'id': 'wP8On',
'ext': 'mp4',
'title': 'تیم گلکسی 11 - زومیت',
'description': 'md5:096bdabcdcc4569f2b8a5e903a3b3028',
'duration': 231,
'timestamp': 1387394859,
'upload_date': '20131218',
'view_count': int,
2013-12-20 17:05:28 +01:00
},
}, {
# multiple formats
'url': 'https://www.aparat.com/v/8dflw/',
'only_matching': True,
}]
2013-12-20 17:05:28 +01:00
2022-01-11 00:27:53 +05:30
def _parse_options(self, webpage, video_id, fatal=True):
return self._parse_json(self._search_regex(
r'options\s*=\s*({.+?})\s*;', webpage, 'options', default='{}'), video_id)
2013-12-20 17:05:28 +01:00
def _real_extract(self, url):
2014-11-26 12:40:51 +01:00
video_id = self._match_id(url)
2013-12-20 17:05:28 +01:00
2022-01-11 00:27:53 +05:30
# If available, provides more metadata
webpage = self._download_webpage(url, video_id, fatal=False)
2022-01-11 00:27:53 +05:30
options = self._parse_options(webpage, video_id, fatal=False)
2022-01-11 00:27:53 +05:30
if not options:
webpage = self._download_webpage(
'http://www.aparat.com/video/video/embed/vt/frame/showvideo/yes/videohash/' + video_id,
2022-01-11 00:27:53 +05:30
video_id, 'Downloading embed webpage')
options = self._parse_options(webpage, video_id)
2018-09-06 02:08:38 +04:30
formats = []
2021-01-01 17:56:37 +05:30
for sources in (options.get('multiSRC') or []):
for item in sources:
if not isinstance(item, dict):
continue
2018-09-06 02:08:38 +04:30
file_url = url_or_none(item.get('src'))
if not file_url:
continue
item_type = item.get('type')
if item_type == 'application/vnd.apple.mpegurl':
formats.extend(self._extract_m3u8_formats(
file_url, video_id, 'mp4',
entry_protocol='m3u8_native', m3u8_id='hls',
fatal=False))
else:
ext = mimetype2ext(item.get('type'))
label = item.get('label')
formats.append({
'url': file_url,
'ext': ext,
'format_id': 'http-%s' % (label or ext),
'height': int_or_none(self._search_regex(
r'(\d+)[pP]', label or '', 'height',
default=None)),
})
info = self._search_json_ld(webpage, video_id, default={})
if not info.get('title'):
2021-01-01 17:56:37 +05:30
info['title'] = get_element_by_id('videoTitle', webpage) or \
self._html_search_meta(['og:title', 'twitter:title', 'DC.Title', 'title'], webpage, fatal=True)
2013-12-20 17:05:28 +01:00
return merge_dicts(info, {
2013-12-20 17:05:28 +01:00
'id': video_id,
'thumbnail': url_or_none(options.get('poster')),
2021-01-01 17:56:37 +05:30
'duration': int_or_none(options.get('duration')),
'formats': formats,
})
2024-08-25 17:36:15 +00:00
class AparatPlaylistIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.)?aparat\.com/playlist/(?P<id>\d+)'
_TESTS = [{
'url': 'https://www.aparat.com/playlist/1001307',
'info_dict': {
'id': '1001307',
'title': 'مبانی یادگیری عمیق',
'description': '',
'thumbnails': 'count:2',
'channel': 'mrmohammadi_iust',
'channel_id': '6463423',
'channel_url': 'https://www.aparat.com/mrmohammadi_iust',
'channel_follower_count': int,
},
'playlist_mincount': 1,
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.aparat.com/playlist/1234567',
'info_dict': {
'id': '1234567',
'title': 'ساخت اکانت',
'description': '',
'thumbnails': 'count:0',
'channel': 'reza.shadow',
'channel_id': '8159952',
'channel_url': 'https://www.aparat.com/reza.shadow',
'channel_follower_count': int,
},
'playlist_count': 0,
'params': {
'skip_download': True,
},
}, {
'url': 'https://www.aparat.com/playlist/1256882',
'only_matching': True,
}]
def _real_extract(self, url):
playlist_id = self._match_id(url)
info = self._download_json(
f'https://www.aparat.com/api/fa/v1/video/playlist/one/playlist_id/{playlist_id}', playlist_id)
info_dict = traverse_obj(info, ('data', 'attributes', {
'playlist_title': ('title'),
'description': ('description'),
}), default={})
info_dict.update(thumbnails=traverse_obj([
traverse_obj(info, ('data', 'attributes', {'url': ('big_poster', {url_or_none})})),
traverse_obj(info, ('data', 'attributes', {'url': ('small_poster', {url_or_none})})),
], (...), default=[]))
info_dict.update(**traverse_obj(info, ('included', lambda _, v: v['type'] == 'channel', 'attributes', {
'channel': ('username'),
'channel_id': ('id'),
2024-11-16 07:17:48 +00:00
'channel_url': ('link', filter, {urljoin(base=url)}), # starts with a slash
2024-08-25 17:36:15 +00:00
'channel_follower_count': ('follower_cnt', {int_or_none}),
}), get_all=False))
return self.playlist_result(traverse_obj(info, (
'included', lambda _, v: v['type'] == 'Video', 'attributes', 'uid',
{lambda uid: self.url_result(f'https://www.aparat.com/v/{uid}?playlist={playlist_id}')},
), default=[]), playlist_id, **info_dict)