summaryrefslogtreecommitdiffstats
path: root/yt_dlp/extractor/swearnet.py
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--yt_dlp/extractor/swearnet.py64
1 files changed, 15 insertions, 49 deletions
diff --git a/yt_dlp/extractor/swearnet.py b/yt_dlp/extractor/swearnet.py
index b4835c5..2d6fb3e 100644
--- a/yt_dlp/extractor/swearnet.py
+++ b/yt_dlp/extractor/swearnet.py
@@ -1,55 +1,31 @@
-from .common import InfoExtractor
-from ..utils import ExtractorError, int_or_none, traverse_obj
+from .vidyard import VidyardBaseIE
+from ..utils import ExtractorError, int_or_none, make_archive_id
-class SwearnetEpisodeIE(InfoExtractor):
+class SwearnetEpisodeIE(VidyardBaseIE):
_VALID_URL = r'https?://www\.swearnet\.com/shows/(?P<id>[\w-]+)/seasons/(?P<season_num>\d+)/episodes/(?P<episode_num>\d+)'
_TESTS = [{
'url': 'https://www.swearnet.com/shows/gettin-learnt-with-ricky/seasons/1/episodes/1',
'info_dict': {
- 'id': '232819',
+ 'id': 'wicK2EOzjOdxkUXGDIgcPw',
+ 'display_id': '232819',
'ext': 'mp4',
'episode_number': 1,
'episode': 'Episode 1',
'duration': 719,
- 'description': 'md5:c48ef71440ce466284c07085cd7bd761',
+ 'description': r're:Are you drunk and high and craving a grilled cheese sandwich.+',
'season': 'Season 1',
'title': 'Episode 1 - Grilled Cheese Sammich',
'season_number': 1,
- 'thumbnail': 'https://cdn.vidyard.com/thumbnails/232819/_RX04IKIq60a2V6rIRqq_Q_small.jpg',
+ 'thumbnail': 'https://cdn.vidyard.com/thumbnails/custom/0dd74f9b-388a-452e-b570-b407fb64435b_small.jpg',
+ 'tags': ['Getting Learnt with Ricky', 'drunk', 'grilled cheese', 'high'],
+ '_old_archive_ids': ['swearnetepisode 232819'],
},
}]
- def _get_formats_and_subtitle(self, video_source, video_id):
- video_source = video_source or {}
- formats, subtitles = [], {}
- for key, value in video_source.items():
- if key == 'hls':
- for video_hls in value:
- fmts, subs = self._extract_m3u8_formats_and_subtitles(video_hls.get('url'), video_id)
- formats.extend(fmts)
- self._merge_subtitles(subs, target=subtitles)
- else:
- formats.extend({
- 'url': video_mp4.get('url'),
- 'ext': 'mp4',
- } for video_mp4 in value)
-
- return formats, subtitles
-
- def _get_direct_subtitle(self, caption_json):
- subs = {}
- for caption in caption_json:
- subs.setdefault(caption.get('language') or 'und', []).append({
- 'url': caption.get('vttUrl'),
- 'name': caption.get('name'),
- })
-
- return subs
-
def _real_extract(self, url):
- display_id, season_number, episode_number = self._match_valid_url(url).group('id', 'season_num', 'episode_num')
- webpage = self._download_webpage(url, display_id)
+ slug, season_number, episode_number = self._match_valid_url(url).group('id', 'season_num', 'episode_num')
+ webpage = self._download_webpage(url, slug)
try:
external_id = self._search_regex(r'externalid\s*=\s*"([^"]+)', webpage, 'externalid')
@@ -58,22 +34,12 @@ class SwearnetEpisodeIE(InfoExtractor):
self.raise_login_required()
raise
- json_data = self._download_json(
- f'https://play.vidyard.com/player/{external_id}.json', display_id)['payload']['chapters'][0]
-
- formats, subtitles = self._get_formats_and_subtitle(json_data['sources'], display_id)
- self._merge_subtitles(self._get_direct_subtitle(json_data.get('captions')), target=subtitles)
+ info = self._process_video_json(self._fetch_video_json(external_id)['chapters'][0], external_id)
+ if info.get('display_id'):
+ info['_old_archive_ids'] = [make_archive_id(self, info['display_id'])]
return {
- 'id': str(json_data['videoId']),
- 'title': json_data.get('name') or self._html_search_meta(['og:title', 'twitter:title'], webpage),
- 'description': (json_data.get('description')
- or self._html_search_meta(['og:description', 'twitter:description'], webpage)),
- 'duration': int_or_none(json_data.get('seconds')),
- 'formats': formats,
- 'subtitles': subtitles,
+ **info,
'season_number': int_or_none(season_number),
'episode_number': int_or_none(episode_number),
- 'thumbnails': [{'url': thumbnail_url}
- for thumbnail_url in traverse_obj(json_data, ('thumbnailUrls', ...))],
}