From a0758dfa1afd5b04773ba3b3b17ac71d22054821 Mon Sep 17 00:00:00 2001 From: felix Date: Wed, 5 Aug 2015 22:40:46 +0200 Subject: [filmon] new extractor --- youtube_dl/extractor/extractors.py | 1 + 1 file changed, 1 insertion(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 578359a5e..c9b9ebd23 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -287,6 +287,7 @@ from .fc2 import ( FC2EmbedIE, ) from .fczenit import FczenitIE +from .filmon import FilmOnIE, FilmOnVODIE from .firstpost import FirstpostIE from .firsttv import FirstTVIE from .fivemin import FiveMinIE -- cgit 1.4.1 From cd55c6ccd7b9cd0c48d475330c40f382eb0bc625 Mon Sep 17 00:00:00 2001 From: sh!zeeg Date: Wed, 4 Jan 2017 01:51:08 +0300 Subject: [beam:live] Add extractor --- youtube_dl/extractor/beampro.py | 82 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 83 insertions(+) create mode 100644 youtube_dl/extractor/beampro.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/beampro.py b/youtube_dl/extractor/beampro.py new file mode 100644 index 000000000..dc0a2b4af --- /dev/null +++ b/youtube_dl/extractor/beampro.py @@ -0,0 +1,82 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + clean_html, + compat_str, + int_or_none, + parse_iso8601, + try_get, +) + + +class BeamProLiveIE(InfoExtractor): + IE_NAME = 'Beam:live' + _VALID_URL = r'https?://(?:\w+.)?beam.pro/(?P[^?]+)$' + _API_CHANNEL = 'https://beam.pro/api/v1/channels/{0}' + _API_MANIFEST = 'https://beam.pro/api/v1/channels/{0}/manifest.m3u8' + _RATINGS = {'family': 0, 'teen': 13, '18+': 18} + + _TEST = { + 'url': 'http://www.beam.pro/niterhayven', + 'info_dict': { + 'id': '261562', + 'ext': 'mp4', + 'uploader': 'niterhayven', + 'timestamp': 1483477281, + 'age_limit': 18, + 'title': 'Introducing The Witcher 3 // The Grind Starts Now!', + 'thumbnail': r're:https://.*\.jpg$', + 'upload_date': '20170103', + 'uploader_id': 373396, + 'description': 'md5:0b161ac080f15fe05d18a07adb44a74d', + 'is_live': True, + }, + 'skip': 'niterhayven is offline', + 'params': { + 'skip_download': True, + }, + } + + def _real_extract(self, url): + channel_id = self._match_id(url) + chan_data = self._download_json(self._API_CHANNEL.format(channel_id), channel_id) + + if not chan_data.get('online'): + raise ExtractorError('{0} is offline'.format(channel_id), expected=True) + + formats = self._extract_m3u8_formats( + self._API_MANIFEST.format(chan_data.get('id')), channel_id, ext='mp4') + + self._sort_formats(formats) + info = {} + info['formats'] = formats + if chan_data: + info.update(self._extract_info(chan_data)) + if not info.get('title'): + info['title'] = self._live_title(channel_id) + if not info.get('id'): # barely possible but just in case + info['id'] = compat_str(abs(hash(channel_id)) % (10 ** 8)) + + return info + + def _extract_info(self, info): + thumbnail = try_get(info, lambda x: x['thumbnail']['url'], compat_str) + username = try_get(info, lambda x: x['user']['url'], compat_str) + video_id = compat_str(info['id']) if info.get('id') else None + rating = info.get('audience') + + return { + 'id': video_id, + 'title': info.get('name'), + 'description': clean_html(info.get('description')), + 'age_limit': self._RATINGS[rating] if rating in self._RATINGS else None, + 'is_live': True if info.get('online') else False, + 'timestamp': parse_iso8601(info.get('updatedAt')), + 'uploader': info.get('token') or username, + 'uploader_id': int_or_none(info.get('userId')), + 'view_count': int_or_none(info.get('viewersTotal')), + 'thumbnail': thumbnail, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 5ba8efb0e..9d0610d21 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -88,6 +88,7 @@ from .bbc import ( BBCCoUkPlaylistIE, BBCIE, ) +from .beampro import BeamProLiveIE from .beeg import BeegIE from .behindkink import BehindKinkIE from .bellmedia import BellMediaIE -- cgit 1.4.1 From e84495cd8d7bdb89bbfe233263bd8ad0b448f8cc Mon Sep 17 00:00:00 2001 From: Alex Seiler Date: Sat, 21 Jan 2017 15:23:26 +0100 Subject: [azmedien] Add extractor (closes #11785) --- youtube_dl/extractor/azmedientv.py | 87 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 4 ++ 2 files changed, 91 insertions(+) create mode 100644 youtube_dl/extractor/azmedientv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/azmedientv.py b/youtube_dl/extractor/azmedientv.py new file mode 100644 index 000000000..51d46fb94 --- /dev/null +++ b/youtube_dl/extractor/azmedientv.py @@ -0,0 +1,87 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .kaltura import KalturaIE +from ..utils import get_element_by_class + + +class AZMedienTVIE(InfoExtractor): + IE_DESC = 'telezueri.ch, telebaern.tv and telem1.ch videos' + _VALID_URL = r'http://(?:www\.)?(?:telezueri\.ch|telebaern\.tv|telem1\.ch)/[0-9]+-show-[^/#]+(?:/[0-9]+-episode-[^/#]+(?:/[0-9]+-segment-(?:[^/#]+#)?|#)|#)(?P[^#]+)' + + _TESTS = [{ + # URL with 'segment' + 'url': 'http://www.telezueri.ch/62-show-zuerinews/13772-episode-sonntag-18-dezember-2016/32419-segment-massenabweisungen-beim-hiltl-club-wegen-pelzboom', + 'md5': 'fda85ada1299cee517a622bfbc5f6b66', + 'info_dict': { + 'id': '1_2444peh4', + 'ext': 'mov', + 'title': 'Massenabweisungen beim Hiltl Club wegen Pelzboom', + 'description': 'md5:9ea9dd1b159ad65b36ddcf7f0d7c76a8', + 'uploader_id': 'TeleZ?ri', + 'upload_date': '20161218', + 'timestamp': 1482084490, + } + }, { + # URL with 'segment' and fragment: + 'url': 'http://www.telebaern.tv/118-show-news/14240-episode-dienstag-17-januar-2017/33666-segment-achtung-gefahr#zu-wenig-pflegerinnen-und-pfleger', + 'only_matching': True + }, { + # URL with 'episode' and fragment: + 'url': 'http://www.telem1.ch/47-show-sonntalk/13986-episode-soldaten-fuer-grenzschutz-energiestrategie-obama-bilanz#soldaten-fuer-grenzschutz-energiestrategie-obama-bilanz', + 'only_matching': True + }, { + # URL with 'show' and fragment: + 'url': 'http://www.telezueri.ch/66-show-sonntalk#burka-plakate-trump-putin-china-besuch', + 'only_matching': True + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + kaltura_partner_id = self._html_search_regex( + r']+src=["\']https?://www\.kaltura\.com/.*/partner_id/([0-9]+)', + webpage, 'Kaltura partner ID') + kaltura_entry_id = self._html_search_regex( + r']+data-id=["\'](.*?)["\'][^>]+data-slug=["\']%s' % video_id, + webpage, 'Kaltura entry ID') + + return self.url_result( + 'kaltura:%s:%s' % (kaltura_partner_id, kaltura_entry_id), + ie=KalturaIE.ie_key()) + + +class AZMedienTVShowIE(InfoExtractor): + IE_DESC = 'telezueri.ch, telebaern.tv and telem1.ch shows' + _VALID_URL = r'http://(?:www\.)?(?:telezueri\.ch|telebaern\.tv|telem1\.ch)/(?P[0-9]+-show-[^/#]+(?:/[0-9]+-episode-[^/#]+)?)$' + + _TESTS = [{ + # URL with 'episode': + 'url': 'http://www.telebaern.tv/118-show-news/13735-episode-donnerstag-15-dezember-2016', + 'info_dict': { + 'id': '118-show-news/13735-episode-donnerstag-15-dezember-2016', + 'title': 'News', + }, + 'playlist_count': 9, + }, { + # URL with 'show' only: + 'url': 'http://www.telezueri.ch/86-show-talktaeglich', + 'only_matching': True + }] + + def _real_extract(self, url): + show_id = self._match_id(url) + webpage = self._download_webpage(url, show_id) + + title = get_element_by_class('title-block-cell', webpage) + if title: + title = title.strip() + + entries = [self.url_result(m.group('url'), ie=AZMedienTVIE.ie_key()) for m in re.finditer( + r']+data-real=["\'](?P.+?)["\']', webpage)] + + return self.playlist_result( + entries, show_id, title) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 9d0610d21..4cfb3c70f 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -77,6 +77,10 @@ from .awaan import ( AWAANLiveIE, AWAANSeasonIE, ) +from .azmedientv import ( + AZMedienTVIE, + AZMedienTVShowIE, +) from .azubu import AzubuIE, AzubuLiveIE from .baidu import BaiduVideoIE from .bambuser import BambuserIE, BambuserChannelIE -- cgit 1.4.1 From 94629e537f2f6ed80b19e3863456f9ba8073af36 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sun, 22 Jan 2017 02:15:20 +0700 Subject: [azmedien] Improve (closes #11784) --- youtube_dl/extractor/azmedien.py | 132 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/azmedientv.py | 87 ------------------------ youtube_dl/extractor/extractors.py | 6 +- 3 files changed, 135 insertions(+), 90 deletions(-) create mode 100644 youtube_dl/extractor/azmedien.py delete mode 100644 youtube_dl/extractor/azmedientv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/azmedien.py b/youtube_dl/extractor/azmedien.py new file mode 100644 index 000000000..059dc6e4b --- /dev/null +++ b/youtube_dl/extractor/azmedien.py @@ -0,0 +1,132 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .kaltura import KalturaIE +from ..utils import ( + get_element_by_class, + strip_or_none, +) + + +class AZMedienBaseIE(InfoExtractor): + def _kaltura_video(self, partner_id, entry_id): + return self.url_result( + 'kaltura:%s:%s' % (partner_id, entry_id), ie=KalturaIE.ie_key(), + video_id=entry_id) + + +class AZMedienIE(AZMedienBaseIE): + IE_DESC = 'AZ Medien videos' + _VALID_URL = r'''(?x) + https?:// + (?:www\.)? + (?: + telezueri\.ch| + telebaern\.tv| + telem1\.ch + )/ + [0-9]+-show-[^/\#]+ + (?: + /[0-9]+-episode-[^/\#]+ + (?: + /[0-9]+-segment-(?:[^/\#]+\#)?| + \# + )| + \# + ) + (?P[^\#]+) + ''' + + _TESTS = [{ + # URL with 'segment' + 'url': 'http://www.telezueri.ch/62-show-zuerinews/13772-episode-sonntag-18-dezember-2016/32419-segment-massenabweisungen-beim-hiltl-club-wegen-pelzboom', + 'info_dict': { + 'id': '1_2444peh4', + 'ext': 'mov', + 'title': 'Massenabweisungen beim Hiltl Club wegen Pelzboom', + 'description': 'md5:9ea9dd1b159ad65b36ddcf7f0d7c76a8', + 'uploader_id': 'TeleZ?ri', + 'upload_date': '20161218', + 'timestamp': 1482084490, + }, + 'params': { + 'skip_download': True, + }, + }, { + # URL with 'segment' and fragment: + 'url': 'http://www.telebaern.tv/118-show-news/14240-episode-dienstag-17-januar-2017/33666-segment-achtung-gefahr#zu-wenig-pflegerinnen-und-pfleger', + 'only_matching': True + }, { + # URL with 'episode' and fragment: + 'url': 'http://www.telem1.ch/47-show-sonntalk/13986-episode-soldaten-fuer-grenzschutz-energiestrategie-obama-bilanz#soldaten-fuer-grenzschutz-energiestrategie-obama-bilanz', + 'only_matching': True + }, { + # URL with 'show' and fragment: + 'url': 'http://www.telezueri.ch/66-show-sonntalk#burka-plakate-trump-putin-china-besuch', + 'only_matching': True + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + partner_id = self._search_regex( + r']+src=["\'](?:https?:)?//(?:[^/]+\.)?kaltura\.com(?:/[^/]+)*/(?:p|partner_id)/([0-9]+)', + webpage, 'kaltura partner id') + entry_id = self._html_search_regex( + r']+data-id=(["\'])(?P(?:(?!\1).)+)\1[^>]+data-slug=["\']%s' + % re.escape(video_id), webpage, 'kaltura entry id', group='id') + + return self._kaltura_video(partner_id, entry_id) + + +class AZMedienShowIE(AZMedienBaseIE): + IE_DESC = 'AZ Medien shows' + _VALID_URL = r'https?://(?:www\.)?(?:telezueri\.ch|telebaern\.tv|telem1\.ch)/(?P[0-9]+-show-[^/#]+(?:/[0-9]+-episode-[^/#]+)?)$' + + _TESTS = [{ + # URL with 'episode' + 'url': 'http://www.telebaern.tv/118-show-news/13735-episode-donnerstag-15-dezember-2016', + 'info_dict': { + 'id': '118-show-news/13735-episode-donnerstag-15-dezember-2016', + 'title': 'News - Donnerstag, 15. Dezember 2016', + }, + 'playlist_count': 9, + }, { + # URL with 'show' only + 'url': 'http://www.telezueri.ch/86-show-talktaeglich', + 'only_matching': True + }] + + def _real_extract(self, url): + show_id = self._match_id(url) + webpage = self._download_webpage(url, show_id) + + entries = [] + + partner_id = self._search_regex( + r'src=["\'](?:https?:)?//(?:[^/]+\.)kaltura\.com/(?:[^/]+/)*(?:p|partner_id)/(\d+)', + webpage, 'kaltura partner id', default=None) + + if partner_id: + entries = [ + self._kaltura_video(partner_id, m.group('id')) + for m in re.finditer( + r'data-id=(["\'])(?P(?:(?!\1).)+)\1', webpage)] + + if not entries: + entries = [ + self.url_result(m.group('url'), ie=AZMedienIE.ie_key()) + for m in re.finditer( + r']+data-real=(["\'])(?Phttp.+?)\1', webpage)] + + title = self._search_regex( + r'episodeShareTitle\s*=\s*(["\'])(?P(?:(?!\1).)+)\1', + webpage, 'title', + default=strip_or_none(get_element_by_class( + 'title-block-cell', webpage)), group='title') + + return self.playlist_result(entries, show_id, title) diff --git a/youtube_dl/extractor/azmedientv.py b/youtube_dl/extractor/azmedientv.py deleted file mode 100644 index 51d46fb94..000000000 --- a/youtube_dl/extractor/azmedientv.py +++ /dev/null @@ -1,87 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from .kaltura import KalturaIE -from ..utils import get_element_by_class - - -class AZMedienTVIE(InfoExtractor): - IE_DESC = 'telezueri.ch, telebaern.tv and telem1.ch videos' - _VALID_URL = r'http://(?:www\.)?(?:telezueri\.ch|telebaern\.tv|telem1\.ch)/[0-9]+-show-[^/#]+(?:/[0-9]+-episode-[^/#]+(?:/[0-9]+-segment-(?:[^/#]+#)?|#)|#)(?P<id>[^#]+)' - - _TESTS = [{ - # URL with 'segment' - 'url': 'http://www.telezueri.ch/62-show-zuerinews/13772-episode-sonntag-18-dezember-2016/32419-segment-massenabweisungen-beim-hiltl-club-wegen-pelzboom', - 'md5': 'fda85ada1299cee517a622bfbc5f6b66', - 'info_dict': { - 'id': '1_2444peh4', - 'ext': 'mov', - 'title': 'Massenabweisungen beim Hiltl Club wegen Pelzboom', - 'description': 'md5:9ea9dd1b159ad65b36ddcf7f0d7c76a8', - 'uploader_id': 'TeleZ?ri', - 'upload_date': '20161218', - 'timestamp': 1482084490, - } - }, { - # URL with 'segment' and fragment: - 'url': 'http://www.telebaern.tv/118-show-news/14240-episode-dienstag-17-januar-2017/33666-segment-achtung-gefahr#zu-wenig-pflegerinnen-und-pfleger', - 'only_matching': True - }, { - # URL with 'episode' and fragment: - 'url': 'http://www.telem1.ch/47-show-sonntalk/13986-episode-soldaten-fuer-grenzschutz-energiestrategie-obama-bilanz#soldaten-fuer-grenzschutz-energiestrategie-obama-bilanz', - 'only_matching': True - }, { - # URL with 'show' and fragment: - 'url': 'http://www.telezueri.ch/66-show-sonntalk#burka-plakate-trump-putin-china-besuch', - 'only_matching': True - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - kaltura_partner_id = self._html_search_regex( - r'<script[^>]+src=["\']https?://www\.kaltura\.com/.*/partner_id/([0-9]+)', - webpage, 'Kaltura partner ID') - kaltura_entry_id = self._html_search_regex( - r'<a[^>]+data-id=["\'](.*?)["\'][^>]+data-slug=["\']%s' % video_id, - webpage, 'Kaltura entry ID') - - return self.url_result( - 'kaltura:%s:%s' % (kaltura_partner_id, kaltura_entry_id), - ie=KalturaIE.ie_key()) - - -class AZMedienTVShowIE(InfoExtractor): - IE_DESC = 'telezueri.ch, telebaern.tv and telem1.ch shows' - _VALID_URL = r'http://(?:www\.)?(?:telezueri\.ch|telebaern\.tv|telem1\.ch)/(?P<id>[0-9]+-show-[^/#]+(?:/[0-9]+-episode-[^/#]+)?)$' - - _TESTS = [{ - # URL with 'episode': - 'url': 'http://www.telebaern.tv/118-show-news/13735-episode-donnerstag-15-dezember-2016', - 'info_dict': { - 'id': '118-show-news/13735-episode-donnerstag-15-dezember-2016', - 'title': 'News', - }, - 'playlist_count': 9, - }, { - # URL with 'show' only: - 'url': 'http://www.telezueri.ch/86-show-talktaeglich', - 'only_matching': True - }] - - def _real_extract(self, url): - show_id = self._match_id(url) - webpage = self._download_webpage(url, show_id) - - title = get_element_by_class('title-block-cell', webpage) - if title: - title = title.strip() - - entries = [self.url_result(m.group('url'), ie=AZMedienTVIE.ie_key()) for m in re.finditer( - r'<a href=["\']#["\'][^>]+data-real=["\'](?P<url>.+?)["\']', webpage)] - - return self.playlist_result( - entries, show_id, title) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 4cfb3c70f..de5f94738 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -77,9 +77,9 @@ from .awaan import ( AWAANLiveIE, AWAANSeasonIE, ) -from .azmedientv import ( - AZMedienTVIE, - AZMedienTVShowIE, +from .azmedien import ( + AZMedienIE, + AZMedienShowIE, ) from .azubu import AzubuIE, AzubuLiveIE from .baidu import BaiduVideoIE -- cgit 1.4.1 From 8d1fbe0cb20fdfab8487bb478c2a002f12c1a5d9 Mon Sep 17 00:00:00 2001 From: einstein95 <einstein95@users.noreply.github.com> Date: Sat, 21 Jan 2017 20:02:55 +1300 Subject: [pornflip] Add extractor (closes #11556) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/pornflip.py | 59 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 60 insertions(+) create mode 100644 youtube_dl/extractor/pornflip.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index de5f94738..cfddf5b92 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -724,6 +724,7 @@ from .polskieradio import ( ) from .porn91 import Porn91IE from .porncom import PornComIE +from .pornflip import PornFlipIE from .pornhd import PornHdIE from .pornhub import ( PornHubIE, diff --git a/youtube_dl/extractor/pornflip.py b/youtube_dl/extractor/pornflip.py new file mode 100644 index 000000000..b6077f7cb --- /dev/null +++ b/youtube_dl/extractor/pornflip.py @@ -0,0 +1,59 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import ( + compat_parse_qs, +) +from ..utils import ( + int_or_none, + try_get, + RegexNotFoundError, +) + + +class PornFlipIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?pornflip\.com/v/(?P<id>[0-9A-Za-z]{11})' + _TEST = { + 'url': 'https://www.pornflip.com/v/wz7DfNhMmep', + 'md5': '98c46639849145ae1fd77af532a9278c', + 'info_dict': { + 'id': 'wz7DfNhMmep', + 'ext': 'mp4', + 'title': '2 Amateurs swallow make his dream cumshots true', + 'uploader': 'figifoto', + 'thumbnail': r're:^https?://.*\.jpg$', + 'age_limit': 18, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + uploader = self._html_search_regex( + r'<span class="name">\s+<a class="ajax" href=".+>\s+<strong>([^<]+)<', webpage, 'uploader', fatal=False) + flashvars = compat_parse_qs(self._html_search_regex( + r'<embed.+?flashvars="([^"]+)"', + webpage, 'flashvars')) + title = flashvars['video_vars[title]'][0] + thumbnail = try_get(flashvars, lambda x: x['video_vars[big_thumb]'][0]) + formats = [] + for k, v in flashvars.items(): + height = self._search_regex(r'video_vars\[video_urls\]\[(\d+).+?\]', k, 'height', default=None) + if height: + url = v[0] + formats.append({ + 'height': int_or_none(height), + 'url': url + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'formats': formats, + 'title': title, + 'uploader': uploader, + 'thumbnail': thumbnail, + 'age_limit': 18, + } -- cgit 1.4.1 From bc35ed3fb6fcae88d59fd440b505b9e1a7cf112e Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Mon, 23 Jan 2017 23:33:30 +0800 Subject: [nextmedia] Add support for NextTV (壹電視) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- ChangeLog | 6 +++++ youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/nextmedia.py | 54 +++++++++++++++++++++++++++++++++++++- 3 files changed, 60 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index beea17e54..ba2f5cffc 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version <unreleased> + +Extractors ++ [nextmedia] Add support for NextTV (壹電視) + + version 2017.01.22 Extractors diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index cfddf5b92..e23b5d0f6 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -598,6 +598,7 @@ from .nextmedia import ( NextMediaIE, NextMediaActionNewsIE, AppleDailyIE, + NextTVIE, ) from .nfb import NFBIE from .nfl import NFLIE diff --git a/youtube_dl/extractor/nextmedia.py b/youtube_dl/extractor/nextmedia.py index 626ed8b49..680f03aad 100644 --- a/youtube_dl/extractor/nextmedia.py +++ b/youtube_dl/extractor/nextmedia.py @@ -3,7 +3,14 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse -from ..utils import parse_iso8601 +from ..utils import ( + clean_html, + get_element_by_class, + int_or_none, + parse_iso8601, + remove_start, + unified_timestamp, +) class NextMediaIE(InfoExtractor): @@ -184,3 +191,48 @@ class AppleDailyIE(NextMediaIE): def _fetch_description(self, page): return self._html_search_meta('description', page, 'news description') + + +class NextTVIE(InfoExtractor): + IE_DESC = '壹電視' + _VALID_URL = r'https?://(?:www\.)?nexttv\.com\.tw/(?:[^/]+/)+(?P<id>\d+)' + + _TEST = { + 'url': 'http://www.nexttv.com.tw/news/realtime/politics/11779671', + 'info_dict': { + 'id': '11779671', + 'ext': 'mp4', + 'title': '「超收稅」近4千億! 藍議員籲發消費券', + 'thumbnail': r're:^https?://.*\.jpg$', + 'timestamp': 1484825400, + 'upload_date': '20170119', + 'view_count': int, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'<h1[^>]*>([^<]+)</h1>', webpage, 'title') + + data = self._hidden_inputs(webpage) + + video_url = data['ntt-vod-src-detailview'] + + date_str = get_element_by_class('date', webpage) + timestamp = unified_timestamp(date_str + '+0800') if date_str else None + + view_count = int_or_none(remove_start( + clean_html(get_element_by_class('click', webpage)), '點閱:')) + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + 'thumbnail': data.get('ntt-vod-img-src'), + 'timestamp': timestamp, + 'view_count': view_count, + } -- cgit 1.4.1 From c60089c0222433775dcc1305d85b42fc6158c8df Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Wed, 25 Jan 2017 07:38:17 +0100 Subject: [afreecatv:global] Add new extractor(closes #11807) --- youtube_dl/extractor/afreecatv.py | 92 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 5 ++- 2 files changed, 96 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/afreecatv.py b/youtube_dl/extractor/afreecatv.py index 75b366993..4f6cdb8a2 100644 --- a/youtube_dl/extractor/afreecatv.py +++ b/youtube_dl/extractor/afreecatv.py @@ -18,6 +18,7 @@ from ..utils import ( class AfreecaTVIE(InfoExtractor): + IE_NAME = 'afreecatv' IE_DESC = 'afreecatv.com' _VALID_URL = r'''(?x) https?:// @@ -143,3 +144,94 @@ class AfreecaTVIE(InfoExtractor): expected=True) return info + + +class AfreecaTVGlobalIE(AfreecaTVIE): + IE_NAME = 'afreecatv:global' + _VALID_URL = r'https?://(?:www\.)?afreeca\.tv/(?P<channel_id>\d+)(?:/v/(?P<video_id>\d+))?' + _TESTS = [{ + 'url': 'http://afreeca.tv/36853014/v/58301', + 'info_dict': { + 'id': '58301', + 'title': 'tryhard top100', + 'uploader_id': '36853014', + 'uploader': 'makgi Hearthstone Live!', + }, + 'playlist_count': 3, + }] + + def _real_extract(self, url): + channel_id, video_id = re.match(self._VALID_URL, url).groups() + video_type = 'video' if video_id else 'live' + query = { + 'pt': 'view', + 'bid': channel_id, + } + if video_id: + query['vno'] = video_id + video_data = self._download_json( + 'http://api.afreeca.tv/%s/view_%s.php' % (video_type, video_type), + video_id or channel_id, query=query)['channel'] + + if video_data.get('result') != 1: + raise ExtractorError('%s said: %s' % (self.IE_NAME, video_data['remsg'])) + + title = video_data['title'] + + info = { + 'thumbnail': video_data.get('thumb'), + 'view_count': int_or_none(video_data.get('vcnt')), + 'age_limit': int_or_none(video_data.get('grade')), + 'uploader_id': channel_id, + 'uploader': video_data.get('cname'), + } + + if video_id: + entries = [] + for i, f in enumerate(video_data.get('flist', [])): + video_key = self.parse_video_key(f.get('key', '')) + f_url = f.get('file') + if not video_key or not f_url: + continue + entries.append({ + 'id': '%s_%s' % (video_id, video_key.get('part', i + 1)), + 'title': title, + 'upload_date': video_key.get('upload_date'), + 'duration': int_or_none(f.get('length')), + 'url': f_url, + 'protocol': 'm3u8_native', + 'ext': 'mp4', + }) + + info.update({ + 'id': video_id, + 'title': title, + 'duration': int_or_none(video_data.get('length')), + }) + if len(entries) > 1: + info['_type'] = 'multi_video' + info['entries'] = entries + elif len(entries) == 1: + i = entries[0].copy() + i.update(info) + info = i + else: + formats = [] + for s in video_data.get('strm', []): + s_url = s.get('purl') + if not s_url: + continue + # TODO: extract rtmp formats + if s.get('stype') == 'HLS': + formats.extend(self._extract_m3u8_formats( + s_url, channel_id, 'mp4', fatal=False)) + self._sort_formats(formats) + + info.update({ + 'id': channel_id, + 'title': self._live_title(title), + 'is_live': True, + 'formats': formats, + }) + + return info diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e23b5d0f6..f09b4cf2c 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -30,7 +30,10 @@ from .aenetworks import ( AENetworksIE, HistoryTopicIE, ) -from .afreecatv import AfreecaTVIE +from .afreecatv import ( + AfreecaTVIE, + AfreecaTVGlobalIE, +) from .airmozilla import AirMozillaIE from .aljazeera import AlJazeeraIE from .alphaporno import AlphaPornoIE -- cgit 1.4.1 From b3277115a192b88df34692e42f62f39bd4a65bac Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 26 Jan 2017 16:14:42 +0100 Subject: [disney] Add new extractor(closes #7409)(closes #11801)(#4975)(#11000) --- youtube_dl/extractor/disney.py | 115 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 116 insertions(+) create mode 100644 youtube_dl/extractor/disney.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/disney.py b/youtube_dl/extractor/disney.py new file mode 100644 index 000000000..396873c6d --- /dev/null +++ b/youtube_dl/extractor/disney.py @@ -0,0 +1,115 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + unified_strdate, + compat_str, + determine_ext, +) + + +class DisneyIE(InfoExtractor): + _VALID_URL = r'''(?x) + https?://(?P<domain>(?:[^/]+\.)?(?:disney\.[a-z]{2,3}(?:\.[a-z]{2})?|disney(?:(?:me|latino)\.com|turkiye\.com\.tr)|starwars\.com))/(?:embed/|(?:[^/]+/)+[\w-]+-)(?P<id>[a-z0-9]{24})''' + _TESTS = [{ + 'url': 'http://video.disney.com/watch/moana-trailer-545ed1857afee5a0ec239977', + 'info_dict': { + 'id': '545ed1857afee5a0ec239977', + 'ext': 'mp4', + 'title': 'Moana - Trailer', + 'description': 'A fun adventure for the entire Family! Bring home Moana on Digital HD Feb 21 & Blu-ray March 7', + 'upload_date': '20170112', + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + }, { + 'url': 'http://videos.disneylatino.com/ver/spider-man-de-regreso-a-casa-primer-adelanto-543a33a1850bdcfcca13bae2', + 'only_matching': True, + }, { + 'url': 'http://video.en.disneyme.com/watch/future-worm/robo-carp-2001-544b66002aa7353cdd3f5114', + 'only_matching': True, + }, { + 'url': 'http://video.disneyturkiye.com.tr/izle/7c-7-cuceler/kimin-sesi-zaten-5456f3d015f6b36c8afdd0e2', + 'only_matching': True, + }, { + 'url': 'http://disneyjunior.disney.com/embed/546a4798ddba3d1612e4005d', + 'only_matching': True, + }, { + 'url': 'http://www.starwars.com/embed/54690d1e6c42e5f09a0fb097', + 'only_matching': True, + }] + + def _real_extract(self, url): + domain, video_id = re.match(self._VALID_URL, url).groups() + webpage = self._download_webpage( + 'http://%s/embed/%s' % (domain, video_id), video_id) + video_data = self._parse_json(self._search_regex( + r'Disney\.EmbedVideo=({.+});', webpage, 'embed data'), video_id)['video'] + + for external in video_data.get('externals', []): + if external.get('source') == 'vevo': + return self.url_result('vevo:' + external['data_id'], 'Vevo') + + title = video_data['title'] + + formats = [] + for flavor in video_data.get('flavors', []): + flavor_format = flavor.get('format') + flavor_url = flavor.get('url') + if not flavor_url or not re.match(r'https?://', flavor_url): + continue + tbr = int_or_none(flavor.get('bitrate')) + if tbr == 99999: + formats.extend(self._extract_m3u8_formats( + flavor_url, video_id, 'mp4', m3u8_id=flavor_format, fatal=False)) + continue + format_id = [] + if flavor_format: + format_id.append(flavor_format) + if tbr: + format_id.append(compat_str(tbr)) + ext = determine_ext(flavor_url) + if flavor_format == 'applehttp' or ext == 'm3u8': + ext = 'mp4' + width = int_or_none(flavor.get('width')) + height = int_or_none(flavor.get('height')) + formats.append({ + 'format_id': '-'.join(format_id), + 'url': flavor_url, + 'width': width, + 'height': height, + 'tbr': tbr, + 'ext': ext, + 'vcodec': 'none' if (width == 0 and height == 0) else None, + }) + self._sort_formats(formats) + + subtitles = {} + for caption in video_data.get('captions', []): + caption_url = caption.get('url') + caption_format = caption.get('format') + if not caption_url or caption_format.startswith('unknown'): + continue + subtitles.setdefault(caption.get('language', 'en'), []).append({ + 'url': caption_url, + 'ext': { + 'webvtt': 'vtt', + }.get(caption_format, caption_format), + }) + + return { + 'id': video_id, + 'title': title, + 'description': video_data.get('description') or video_data.get('short_desc'), + 'thumbnail': video_data.get('thumb') or video_data.get('thumb_secure'), + 'duration': int_or_none(video_data.get('duration_sec')), + 'upload_date': unified_strdate(video_data.get('publish_date')), + 'formats': formats, + 'subtitles': subtitles, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index f09b4cf2c..0c3e081ad 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -251,6 +251,7 @@ from .dumpert import DumpertIE from .defense import DefenseGouvFrIE from .discovery import DiscoveryIE from .discoverygo import DiscoveryGoIE +from .disney import DisneyIE from .dispeak import DigitallySpeakingIE from .dropbox import DropboxIE from .dw import ( -- cgit 1.4.1 From 9463637887ba784e3499410ab0945dcd68002bc1 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 26 Jan 2017 18:36:28 +0100 Subject: [tva] Add new extractor(closes #11842) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tva.py | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 youtube_dl/extractor/tva.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 0c3e081ad..81366f933 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -985,6 +985,7 @@ from .tv2 import ( ) from .tv3 import TV3IE from .tv4 import TV4IE +from .tva import TVAIE from .tvanouvelles import ( TVANouvellesIE, TVANouvellesArticleIE, diff --git a/youtube_dl/extractor/tva.py b/youtube_dl/extractor/tva.py new file mode 100644 index 000000000..3ced098f9 --- /dev/null +++ b/youtube_dl/extractor/tva.py @@ -0,0 +1,54 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, + smuggle_url, +) + + +class TVAIE(InfoExtractor): + _VALID_URL = r'https?://videos\.tva\.ca/episode/(?P<id>\d+)' + _TEST = { + 'url': 'http://videos.tva.ca/episode/85538', + 'info_dict': { + 'id': '85538', + 'ext': 'mp4', + 'title': 'Épisode du 25 janvier 2017', + 'description': 'md5:e9e7fb5532ab37984d2dc87229cadf98', + 'upload_date': '20170126', + 'timestamp': 1485442329, + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._download_json( + "https://d18jmrhziuoi7p.cloudfront.net/isl/api/v1/dataservice/Items('%s')" % video_id, + video_id, query={ + '$expand': 'Metadata,CustomId', + '$select': 'Metadata,Id,Title,ShortDescription,LongDescription,CreatedDate,CustomId,AverageUserRating,Categories,ShowName', + '$format': 'json', + }) + metadata = video_data.get('Metadata', {}) + + return { + '_type': 'url_transparent', + 'id': video_id, + 'title': video_data['Title'], + 'url': smuggle_url('ooyala:' + video_data['CustomId'], {'supportedformats': 'm3u8,hds'}), + 'description': video_data.get('LongDescription') or video_data.get('ShortDescription'), + 'series': video_data.get('ShowName'), + 'episode': metadata.get('EpisodeTitle'), + 'episode_number': int_or_none(metadata.get('EpisodeNumber')), + 'categories': video_data.get('Categories'), + 'average_rating': video_data.get('AverageUserRating'), + 'timestamp': parse_iso8601(video_data.get('CreatedDate')), + 'ie_key': 'Ooyala', + } -- cgit 1.4.1 From b92d3c5343536eb0a865afa79e3787fc384ec0ec Mon Sep 17 00:00:00 2001 From: ping <liping.ong@gmail.com> Date: Tue, 24 Jan 2017 13:52:17 +0800 Subject: [vlive] Add support for channels --- youtube_dl/extractor/extractors.py | 5 ++- youtube_dl/extractor/vlive.py | 68 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 72 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 81366f933..c781c9b87 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1129,7 +1129,10 @@ from .vk import ( VKUserVideosIE, VKWallPostIE, ) -from .vlive import VLiveIE +from .vlive import ( + VLiveIE, + VLiveChannelIE +) from .vodlocker import VodlockerIE from .vodplatform import VODPlatformIE from .voicerepublic import VoiceRepublicIE diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py index 540246c79..70bab1f04 100644 --- a/youtube_dl/extractor/vlive.py +++ b/youtube_dl/extractor/vlive.py @@ -2,6 +2,8 @@ from __future__ import unicode_literals import re +import time +import itertools from .common import InfoExtractor from ..utils import ( @@ -169,3 +171,69 @@ class VLiveIE(InfoExtractor): 'subtitles': subtitles, }) return info + + +class VLiveChannelIE(InfoExtractor): + IE_NAME = 'vlive:channel' + _VALID_URL = r'https?://channels\.vlive\.tv/(?P<id>[0-9A-Z]+)/video' + _TEST = { + 'url': 'http://channels.vlive.tv/FCD4B/video', + 'info_dict': { + 'id': 'FCD4B', + 'title': 'MAMAMOO', + }, + 'playlist_mincount': 110 + } + _APP_ID = '8c6cc7b45d2568fb668be6e05b6e5a3b' + + def _real_extract(self, url): + channel_code = self._match_id(url) + + webpage = self._download_webpage( + 'http://channels.vlive.tv/%s/video' % channel_code, channel_code) + app_js_url = self._search_regex( + r'(http[^\'\"\s]+app\.js)', webpage, 'app js', default='') + + if app_js_url: + app_js = self._download_webpage(app_js_url, channel_code, 'app js') + app_id = self._search_regex( + r'Global\.VFAN_APP_ID\s*=\s*[\'"]([^\'"]+)[\'"]', + app_js, 'app id', default=self._APP_ID) + else: + app_id = self._APP_ID + + channel_info = self._download_json( + 'http://api.vfan.vlive.tv/vproxy/channelplus/decodeChannelCode', + channel_code, note='decode channel code', + query={'app_id': app_id, 'channelCode': channel_code, '_': int(time.time())}) + + channel_seq = channel_info['result']['channelSeq'] + channel_name = None + entries = [] + + for page_num in itertools.count(1): + video_list = self._download_json( + 'http://api.vfan.vlive.tv/vproxy/channelplus/getChannelVideoList', + channel_code, note='channel list %d' % page_num, + query={ + 'app_id': app_id, + 'channelSeq': channel_seq, + 'maxNumOfRows': 1000, + '_': int(time.time()), + 'pageNo': page_num + } + ) + if not channel_name: + channel_name = video_list['result']['channelInfo']['channelName'] + + if not video_list['result'].get('videoList'): + break + + for video in video_list['result']['videoList']: + video_id = str(video['videoSeq']) + entries.append( + self.url_result( + 'http://www.vlive.tv/video/%s' % video_id, 'Vlive', video_id)) + + return self.playlist_result( + entries, channel_code, channel_name) -- cgit 1.4.1 From 008f247077027f10c947060d8f3bb886c9af6aa7 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 28 Jan 2017 20:29:22 +0700 Subject: [mtv81] Add extractor (closes #7619) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/mtv.py | 29 +++++++++++++++++++++++++++++ 2 files changed, 30 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index c781c9b87..915291f74 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -553,6 +553,7 @@ from .mtv import ( MTVVideoIE, MTVServicesEmbeddedIE, MTVDEIE, + MTV81IE, ) from .muenchentv import MuenchenTVIE from .musicplayon import MusicPlayOnIE diff --git a/youtube_dl/extractor/mtv.py b/youtube_dl/extractor/mtv.py index 855c3996f..8acea1461 100644 --- a/youtube_dl/extractor/mtv.py +++ b/youtube_dl/extractor/mtv.py @@ -327,6 +327,35 @@ class MTVIE(MTVServicesInfoExtractor): }] +class MTV81IE(InfoExtractor): + IE_NAME = 'mtv81' + _VALID_URL = r'https?://(?:www\.)?mtv81\.com/videos/(?P<id>[^/?#.]+)' + + _TEST = { + 'url': 'http://www.mtv81.com/videos/artist-to-watch/the-godfather-of-japanese-hip-hop-segment-1/', + 'md5': '1edbcdf1e7628e414a8c5dcebca3d32b', + 'info_dict': { + 'id': '5e14040d-18a4-47c4-a582-43ff602de88e', + 'ext': 'mp4', + 'title': 'Unlocking The Truth|July 18, 2016|1|101|Trailer', + 'description': '"Unlocking the Truth" premieres August 17th at 11/10c.', + 'timestamp': 1468846800, + 'upload_date': '20160718', + }, + } + + def _extract_mgid(self, webpage): + return self._search_regex( + r'getTheVideo\((["\'])(?P<id>mgid:.+?)\1', webpage, + 'mgid', group='id') + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + mgid = self._extract_mgid(webpage) + return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid) + + class MTVVideoIE(MTVServicesInfoExtractor): IE_NAME = 'mtv:video' _VALID_URL = r'''(?x)^https?:// -- cgit 1.4.1 From a71b8d3b3bb399acb82f3ccfbd8a19d411848db4 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 28 Jan 2017 15:51:52 +0100 Subject: [itv] Add new extractor(closes #9240) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/itv.py | 181 +++++++++++++++++++++++++++++++++++++ 2 files changed, 182 insertions(+) create mode 100644 youtube_dl/extractor/itv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 915291f74..086a2296d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -415,6 +415,7 @@ from .internetvideoarchive import InternetVideoArchiveIE from .iprima import IPrimaIE from .iqiyi import IqiyiIE from .ir90tv import Ir90TvIE +from .itv import ITVIE from .ivi import ( IviIE, IviCompilationIE diff --git a/youtube_dl/extractor/itv.py b/youtube_dl/extractor/itv.py new file mode 100644 index 000000000..d029609c3 --- /dev/null +++ b/youtube_dl/extractor/itv.py @@ -0,0 +1,181 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import uuid +import xml.etree.ElementTree as etree +import json + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + extract_attributes, + xpath_with_ns, + xpath_element, + xpath_text, + int_or_none, + parse_duration, + ExtractorError, + determine_ext, +) + + +class ITVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?itv\.com/hub/[^/]+/(?P<id>[0-9a-z]+)' + _TEST = { + 'url': 'http://www.itv.com/hub/mr-bean-animated-series/2a2936a0053', + 'info_dict': { + 'id': '2a2936a0053', + 'ext': 'flv', + 'title': 'Home Movie', + }, + 'params': { + # rtmp download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + params = extract_attributes(self._search_regex( + r'(?s)(<[^>]+id="video"[^>]*>)', webpage, 'params')) + + ns_map = { + 'soapenv': 'http://schemas.xmlsoap.org/soap/envelope/', + 'tem': 'http://tempuri.org/', + 'itv': 'http://schemas.datacontract.org/2004/07/Itv.BB.Mercury.Common.Types', + 'com': 'http://schemas.itv.com/2009/05/Common', + } + for ns, full_ns in ns_map.items(): + etree.register_namespace(ns, full_ns) + + def _add_ns(name): + return xpath_with_ns(name, ns_map) + + def _add_sub_element(element, name): + return etree.SubElement(element, _add_ns(name)) + + req_env = etree.Element(_add_ns('soapenv:Envelope')) + _add_sub_element(req_env, 'soapenv:Header') + body = _add_sub_element(req_env, 'soapenv:Body') + get_playlist = _add_sub_element(body, ('tem:GetPlaylist')) + request = _add_sub_element(get_playlist, 'tem:request') + _add_sub_element(request, 'itv:ProductionId').text = params['data-video-id'] + _add_sub_element(request, 'itv:RequestGuid').text = compat_str(uuid.uuid4()).upper() + vodcrid = _add_sub_element(request, 'itv:Vodcrid') + _add_sub_element(vodcrid, 'com:Id') + _add_sub_element(request, 'itv:Partition') + user_info = _add_sub_element(get_playlist, 'tem:userInfo') + _add_sub_element(user_info, 'itv:Broadcaster').text = 'Itv' + _add_sub_element(user_info, 'itv:DM') + _add_sub_element(user_info, 'itv:RevenueScienceValue') + _add_sub_element(user_info, 'itv:SessionId') + _add_sub_element(user_info, 'itv:SsoToken') + _add_sub_element(user_info, 'itv:UserToken') + site_info = _add_sub_element(get_playlist, 'tem:siteInfo') + _add_sub_element(site_info, 'itv:AdvertisingRestriction').text = 'None' + _add_sub_element(site_info, 'itv:AdvertisingSite').text = 'ITV' + _add_sub_element(site_info, 'itv:AdvertisingType').text = 'Any' + _add_sub_element(site_info, 'itv:Area').text = 'ITVPLAYER.VIDEO' + _add_sub_element(site_info, 'itv:Category') + _add_sub_element(site_info, 'itv:Platform').text = 'DotCom' + _add_sub_element(site_info, 'itv:Site').text = 'ItvCom' + device_info = _add_sub_element(get_playlist, 'tem:deviceInfo') + _add_sub_element(device_info, 'itv:ScreenSize').text = 'Big' + player_info = _add_sub_element(get_playlist, 'tem:playerInfo') + _add_sub_element(player_info, 'itv:Version').text = '2' + + headers = self.geo_verification_headers() + headers.update({ + 'Content-Type': 'text/xml; charset=utf-8', + 'SOAPAction': 'http://tempuri.org/PlaylistService/GetPlaylist', + }) + resp_env = self._download_xml( + params['data-playlist-url'], video_id, + headers=headers, data=etree.tostring(req_env)) + playlist = xpath_element(resp_env, './/Playlist') + if playlist is None: + fault_string = xpath_text(resp_env, './/faultstring') + raise ExtractorError('%s said: %s' % (self.IE_NAME, fault_string)) + title = xpath_text(playlist, 'EpisodeTitle', fatal=True) + media_files = xpath_element(playlist, 'VideoEntries/Video/MediaFiles', fatal=True) + rtmp_url = media_files.attrib['base'] + + formats = [] + for media_file in media_files.findall('MediaFile'): + play_path = xpath_text(media_file, 'URL') + if not play_path: + continue + tbr = int_or_none(media_file.get('bitrate'), 1000) + formats.append({ + 'format_id': 'rtmp' + ('-%d' % tbr if tbr else ''), + 'url': rtmp_url, + 'play_path': play_path, + 'tbr': tbr, + 'ext': 'flv', + }) + + ios_playlist_url = params.get('data-video-playlist') + hmac = params.get('data-video-hmac') + if ios_playlist_url and hmac: + headers = self.geo_verification_headers() + headers.update({ + 'Accept': 'application/vnd.itv.vod.playlist.v2+json', + 'Content-Type': 'application/json', + 'hmac': hmac.upper(), + }) + ios_playlist = self._download_json( + ios_playlist_url, video_id, data=json.dumps({ + 'user': { + 'itvUserId': '', + 'entitlements': [], + 'token': '' + }, + 'device': { + 'manufacturer': 'Apple', + 'model': 'iPad', + 'os': { + 'name': 'iPhone OS', + 'version': '9.3', + 'type': 'ios' + } + }, + 'client': { + 'version': '4.1', + 'id': 'browser' + }, + 'variantAvailability': { + 'featureset': { + 'min': ['hls', 'aes'], + 'max': ['hls', 'aes'] + }, + 'platformTag': 'mobile' + } + }).encode(), headers=headers, fatal=False) + if ios_playlist: + video_data = ios_playlist.get('Playlist', {}).get('Video', {}) + ios_base_url = video_data.get('Base') + for media_file in video_data.get('MediaFiles', []): + href = media_file.get('Href') + if not href: + continue + if ios_base_url: + href = ios_base_url + href + ext = determine_ext(href) + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats(href, video_id, 'mp4', m3u8_id='hls', fatal=False)) + else: + formats.append({ + 'url': href, + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'episode_title': title, + 'episode_number': int_or_none(xpath_text(playlist, 'EpisodeNumber')), + 'series': xpath_text(playlist, 'ProgrammeTitle'), + 'duartion': parse_duration(xpath_text(playlist, 'Duration')), + } -- cgit 1.4.1 From f13da8af289d7d9365e34ef705a53ac62aa3b570 Mon Sep 17 00:00:00 2001 From: Alex Seiler <seileralex@gmail.com> Date: Sat, 28 Jan 2017 17:52:07 +0100 Subject: [azmedien:playlist] Add support for topic and themen playlists --- youtube_dl/extractor/azmedien.py | 39 ++++++++++++++++++++++++++++++++------ youtube_dl/extractor/extractors.py | 2 +- 2 files changed, 34 insertions(+), 7 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/azmedien.py b/youtube_dl/extractor/azmedien.py index a89f71c20..cbc3ed564 100644 --- a/youtube_dl/extractor/azmedien.py +++ b/youtube_dl/extractor/azmedien.py @@ -5,8 +5,9 @@ import re from .common import InfoExtractor from .kaltura import KalturaIE from ..utils import ( - get_element_by_class, + get_element_by_id, strip_or_none, + urljoin, ) @@ -83,8 +84,8 @@ class AZMedienIE(AZMedienBaseIE): return self._kaltura_video(partner_id, entry_id) -class AZMedienShowIE(AZMedienBaseIE): - IE_DESC = 'AZ Medien shows' +class AZMedienPlaylistIE(AZMedienBaseIE): + IE_DESC = 'AZ Medien playlists' _VALID_URL = r'''(?x) https?:// (?:www\.)? @@ -93,7 +94,12 @@ class AZMedienShowIE(AZMedienBaseIE): telebaern\.tv| telem1\.ch )/ - (?P<id>[0-9]+-show-[^/\#]+ + (?P<id>[0-9]+- + (?: + show| + topic| + themen + )-[^/\#]+ (?: /[0-9]+-episode-[^/\#]+ )? @@ -108,6 +114,18 @@ class AZMedienShowIE(AZMedienBaseIE): 'title': 'News - Donnerstag, 15. Dezember 2016', }, 'playlist_count': 9, + }, { + # URL with 'themen' + 'url': 'http://www.telem1.ch/258-themen-tele-m1-classics', + 'info_dict': { + 'id': '258-themen-tele-m1-classics', + 'title': 'Tele M1 Classics', + }, + 'playlist_mincount': 15, + }, { + # URL with 'topic', contains nested playlists + 'url': 'http://www.telezueri.ch/219-topic-aera-trump-hat-offiziell-begonnen', + 'only_matching': True, }, { # URL with 'show' only 'url': 'http://www.telezueri.ch/86-show-talktaeglich', @@ -136,10 +154,19 @@ class AZMedienShowIE(AZMedienBaseIE): for m in re.finditer( r'<a[^>]+data-real=(["\'])(?P<url>http.+?)\1', webpage)] + if not entries: + entries = [ + # May contain nested playlists (e.g. [1]) thus no explicit + # ie_key + # 1. http://www.telezueri.ch/219-topic-aera-trump-hat-offiziell-begonnen) + self.url_result(urljoin(url, m.group('url'))) + for m in re.finditer( + r'<a[^>]+name=[^>]+href=(["\'])(?P<url>/.+?)\1', webpage)] + title = self._search_regex( r'episodeShareTitle\s*=\s*(["\'])(?P<title>(?:(?!\1).)+)\1', webpage, 'title', - default=strip_or_none(get_element_by_class( - 'title-block-cell', webpage)), group='title') + default=strip_or_none(get_element_by_id( + 'video-title', webpage)), group='title') return self.playlist_result(entries, show_id, title) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 086a2296d..2590b5e1b 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -82,7 +82,7 @@ from .awaan import ( ) from .azmedien import ( AZMedienIE, - AZMedienShowIE, + AZMedienPlaylistIE, ) from .azubu import AzubuIE, AzubuLiveIE from .baidu import BaiduVideoIE -- cgit 1.4.1 From 8fd65faece98139def3a6538e98053bebd400263 Mon Sep 17 00:00:00 2001 From: Odd Stråbø <oddstr13@openshell.no> Date: Sat, 14 Jan 2017 02:36:04 +0100 Subject: [NRKTV] Added NRKTVSeriesIE [NRKTV] Added season and episode number to metadata. [NRKTV] Added category to metadata. [NRKTV] Added tests to NRKTVSeries. [NRKTV] Fixed whitespace issues (flake8). --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/nrk.py | 49 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 50 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 2590b5e1b..06e6d4620 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -668,6 +668,7 @@ from .nrk import ( NRKTVIE, NRKTVDirekteIE, NRKTVEpisodesIE, + NRKTVSeriesIE, ) from .ntvde import NTVDeIE from .ntvru import NTVRuIE diff --git a/youtube_dl/extractor/nrk.py b/youtube_dl/extractor/nrk.py index ea7be005a..26604f84f 100644 --- a/youtube_dl/extractor/nrk.py +++ b/youtube_dl/extractor/nrk.py @@ -128,6 +128,18 @@ class NRKBaseIE(InfoExtractor): series = conviva.get('seriesName') or data.get('seriesTitle') episode = conviva.get('episodeName') or data.get('episodeNumberOrDate') + season_number = None + episode_number = None + if data.get('mediaElementType') == 'Episode': + _season_episode = data.get('scoresStatistics', {}).get('springStreamStream') or \ + data.get('relativeOriginUrl', '') + EPISODENUM_RE = [ + r'/s(?P<season>\d+)e(?P<episode>\d+)\.', + r'/sesong-(?P<season>\d+)/episode-(?P<episode>\d+)', + ] + season_number = int_or_none(self._search_regex(EPISODENUM_RE, _season_episode, "S##E##", fatal=False, group='season')) + episode_number = int_or_none(self._search_regex(EPISODENUM_RE, _season_episode, "S##E##", fatal=False, group='episode')) + thumbnails = None images = data.get('images') if images and isinstance(images, dict): @@ -140,11 +152,15 @@ class NRKBaseIE(InfoExtractor): } for image in web_images if image.get('imageUrl')] description = data.get('description') + category = data.get('mediaAnalytics', {}).get('category') common_info = { 'description': description, 'series': series, 'episode': episode, + 'season_number': season_number, + 'episode_number': episode_number, + 'categories': [category] if category else None, 'age_limit': parse_age_limit(data.get('legalAge')), 'thumbnails': thumbnails, } @@ -360,6 +376,39 @@ class NRKTVEpisodesIE(NRKPlaylistBaseIE): r'<h1>([^<]+)</h1>', webpage, 'title', fatal=False) +class NRKTVSeriesIE(InfoExtractor): + _VALID_URL = r'https?://tv\.nrk\.no/serie/(?P<id>[^/]+)/?' + _ITEM_RE = r'data-season=["\'](?P<id>\d+)["\']' + _TESTS = [{ + 'url': 'https://tv.nrk.no/serie/broedrene-dal-og-spektralsteinene', + 'playlist_count': 1, + }, { + 'url': 'https://tv.nrk.no/serie/saving-the-human-race', + 'playlist_count': 1, + }, { + 'url': 'https://tv.nrk.no/serie/postmann-pat', + 'playlist_count': 3, + }, { + 'url': 'https://tv.nrk.no/serie/groenn-glede', + 'playlist_count': 9, + }] + + def _real_extract(self, url): + series_id = self._match_id(url) + + webpage = self._download_webpage(url, series_id) + + entries = [ + self.url_result('https://tv.nrk.no/program/Episodes/{series}/{season}'.format( + series=series_id, + season=season_id + )) + for season_id in re.findall(self._ITEM_RE, webpage) + ] + + return self.playlist_result(entries) + + class NRKSkoleIE(InfoExtractor): IE_DESC = 'NRK Skole' _VALID_URL = r'https?://(?:www\.)?nrk\.no/skole/?\?.*\bmediaId=(?P<id>\d+)' -- cgit 1.4.1 From bd8f48c78b952ebe3bf335185c819e265f63cb50 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Thu, 2 Feb 2017 21:51:31 +0800 Subject: [bilibili] Support new Bangumi URLs (closes #11845) To reduce complexity, I don't support old Bangumi URLs directly via _VALID_URL. Instead, I choose to let it go to generic redirection. An example can be found in #10190: http://bangumi.bilibili.com/anime/v/40062 --- ChangeLog | 5 ++ youtube_dl/extractor/bilibili.py | 135 ++++++++++++++++++++++++++++++++++--- youtube_dl/extractor/extractors.py | 5 +- 3 files changed, 134 insertions(+), 11 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 8e3a04d7d..c27907f51 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,8 @@ +version <unreleased> + +Extractors ++ [bilibili] Support new Bangumi URLs (#11845) + version 2017.02.01 Extractors diff --git a/youtube_dl/extractor/bilibili.py b/youtube_dl/extractor/bilibili.py index 85ea5e6ee..80dd8382e 100644 --- a/youtube_dl/extractor/bilibili.py +++ b/youtube_dl/extractor/bilibili.py @@ -5,19 +5,27 @@ import hashlib import re from .common import InfoExtractor -from ..compat import compat_parse_qs +from ..compat import ( + compat_parse_qs, + compat_urlparse, +) from ..utils import ( + ExtractorError, int_or_none, float_or_none, + parse_iso8601, + smuggle_url, + strip_jsonp, unified_timestamp, + unsmuggle_url, urlencode_postdata, ) class BiliBiliIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.|bangumi\.|)bilibili\.(?:tv|com)/(?:video/av|anime/v/)(?P<id>\d+)' + _VALID_URL = r'https?://(?:www\.|bangumi\.|)bilibili\.(?:tv|com)/(?:video/av|anime/(?P<anime_id>\d+)/play#)(?P<id>\d+)' - _TEST = { + _TESTS = [{ 'url': 'http://www.bilibili.tv/video/av1074402/', 'md5': '9fa226fe2b8a9a4d5a69b4c6a183417e', 'info_dict': { @@ -32,25 +40,61 @@ class BiliBiliIE(InfoExtractor): 'uploader': '菊子桑', 'uploader_id': '156160', }, - } + }, { + # Tested in BiliBiliBangumiIE + 'url': 'http://bangumi.bilibili.com/anime/1869/play#40062', + 'only_matching': True, + }, { + 'url': 'http://bangumi.bilibili.com/anime/5802/play#100643', + 'md5': '3f721ad1e75030cc06faf73587cfec57', + 'info_dict': { + 'id': '100643', + 'ext': 'mp4', + 'title': 'CHAOS;CHILD', + 'description': '如果你是神明,并且能够让妄想成为现实。那你会进行怎么样的妄想?是淫靡的世界?独裁社会?毁灭性的制裁?还是……2015年,涩谷。从6年前发生的大灾害“涩谷地震”之后复兴了的这个街区里新设立的私立高中...', + }, + 'skip': 'Geo-restricted to China', + }] _APP_KEY = '84956560bc028eb7' _BILIBILI_KEY = '94aba54af9065f71de72f5508f1cd42e' + def _report_error(self, result): + if 'message' in result: + raise ExtractorError('%s said: %s' % (self.IE_NAME, result['message']), expected=True) + elif 'code' in result: + raise ExtractorError('%s returns error %d' % (self.IE_NAME, result['code']), expected=True) + else: + raise ExtractorError('Can\'t extract Bangumi episode ID') + def _real_extract(self, url): - video_id = self._match_id(url) + url, smuggled_data = unsmuggle_url(url, {}) + + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + anime_id = mobj.group('anime_id') webpage = self._download_webpage(url, video_id) - if 'anime/v' not in url: + if 'anime/' not in url: cid = compat_parse_qs(self._search_regex( [r'EmbedPlayer\([^)]+,\s*"([^"]+)"\)', r'<iframe[^>]+src="https://secure\.bilibili\.com/secure,([^"]+)"'], webpage, 'player parameters'))['cid'][0] else: + if 'no_bangumi_tip' not in smuggled_data: + self.to_screen('Downloading episode %s. To download all videos in anime %s, re-run youtube-dl with %s' % ( + video_id, anime_id, compat_urlparse.urljoin(url, '//bangumi.bilibili.com/anime/%s' % anime_id))) + headers = { + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + } + headers.update(self.geo_verification_headers()) + js = self._download_json( 'http://bangumi.bilibili.com/web_api/get_source', video_id, data=urlencode_postdata({'episode_id': video_id}), - headers={'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8'}) + headers=headers) + if 'result' not in js: + self._report_error(js) cid = js['result']['cid'] payload = 'appkey=%s&cid=%s&otype=json&quality=2&type=mp4' % (self._APP_KEY, cid) @@ -58,7 +102,11 @@ class BiliBiliIE(InfoExtractor): video_info = self._download_json( 'http://interface.bilibili.com/playurl?%s&sign=%s' % (payload, sign), - video_id, note='Downloading video info page') + video_id, note='Downloading video info page', + headers=self.geo_verification_headers()) + + if 'durl' not in video_info: + self._report_error(video_info) entries = [] @@ -85,7 +133,7 @@ class BiliBiliIE(InfoExtractor): title = self._html_search_regex('<h1[^>]+title="([^"]+)">', webpage, 'title') description = self._html_search_meta('description', webpage) timestamp = unified_timestamp(self._html_search_regex( - r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', fatal=False)) + r'<time[^>]+datetime="([^"]+)"', webpage, 'upload time', default=None)) thumbnail = self._html_search_meta(['og:image', 'thumbnailUrl'], webpage) # TODO 'view_count' requires deobfuscating Javascript @@ -99,7 +147,7 @@ class BiliBiliIE(InfoExtractor): } uploader_mobj = re.search( - r'<a[^>]+href="https?://space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"', + r'<a[^>]+href="(?:https?:)?//space\.bilibili\.com/(?P<id>\d+)"[^>]+title="(?P<name>[^"]+)"', webpage) if uploader_mobj: info.update({ @@ -123,3 +171,70 @@ class BiliBiliIE(InfoExtractor): 'description': description, 'entries': entries, } + + +class BiliBiliBangumiIE(InfoExtractor): + _VALID_URL = r'https?://bangumi\.bilibili\.com/anime/(?P<id>\d+)' + + IE_NAME = 'bangumi.bilibili.com' + IE_DESC = 'BiliBili番剧' + + _TESTS = [{ + 'url': 'http://bangumi.bilibili.com/anime/1869', + 'info_dict': { + 'id': '1869', + 'title': '混沌武士', + 'description': 'md5:6a9622b911565794c11f25f81d6a97d2', + }, + 'playlist_count': 26, + }, { + 'url': 'http://bangumi.bilibili.com/anime/1869', + 'info_dict': { + 'id': '1869', + 'title': '混沌武士', + 'description': 'md5:6a9622b911565794c11f25f81d6a97d2', + }, + 'playlist': [{ + 'md5': '91da8621454dd58316851c27c68b0c13', + 'info_dict': { + 'id': '40062', + 'ext': 'mp4', + 'title': '混沌武士', + 'description': '故事发生在日本的江户时代。风是一个小酒馆的打工女。一日,酒馆里来了一群恶霸,虽然他们的举动令风十分不满,但是毕竟风只是一届女流,无法对他们采取什么行动,只能在心里嘟哝。这时,酒家里又进来了个“不良份子...', + 'timestamp': 1414538739, + 'upload_date': '20141028', + 'episode': '疾风怒涛 Tempestuous Temperaments', + 'episode_number': 1, + }, + }], + 'params': { + 'playlist_items': '1', + }, + }] + + @classmethod + def suitable(cls, url): + return False if BiliBiliIE.suitable(url) else super(BiliBiliBangumiIE, cls).suitable(url) + + def _real_extract(self, url): + bangumi_id = self._match_id(url) + + # Sometimes this API returns a JSONP response + season_info = self._download_json( + 'http://bangumi.bilibili.com/jsonp/seasoninfo/%s.ver' % bangumi_id, + bangumi_id, transform_source=strip_jsonp)['result'] + + entries = [{ + '_type': 'url_transparent', + 'url': smuggle_url(episode['webplay_url'], {'no_bangumi_tip': 1}), + 'ie_key': BiliBiliIE.ie_key(), + 'timestamp': parse_iso8601(episode.get('update_time'), delimiter=' '), + 'episode': episode.get('index_title'), + 'episode_number': int_or_none(episode.get('index')), + } for episode in season_info['episodes']] + + entries = sorted(entries, key=lambda entry: entry.get('episode_number')) + + return self.playlist_result( + entries, bangumi_id, + season_info.get('bangumi_title'), season_info.get('evaluate')) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 06e6d4620..1d1c05d42 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -103,7 +103,10 @@ from .beatport import BeatportIE from .bet import BetIE from .bigflix import BigflixIE from .bild import BildIE -from .bilibili import BiliBiliIE +from .bilibili import ( + BiliBiliIE, + BiliBiliBangumiIE, +) from .biobiochiletv import BioBioChileTVIE from .biqle import BIQLEIE from .bleacherreport import ( -- cgit 1.4.1 From 4ce3407d089ae8c34341e6d68267910683d4b500 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Fri, 3 Feb 2017 10:15:03 +0100 Subject: [filmon] improve extraction --- youtube_dl/extractor/extractors.py | 5 +- youtube_dl/extractor/filmon.py | 222 +++++++++++++++++++++---------------- 2 files changed, 132 insertions(+), 95 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index c9b9ebd23..e4ee43ee3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -287,7 +287,10 @@ from .fc2 import ( FC2EmbedIE, ) from .fczenit import FczenitIE -from .filmon import FilmOnIE, FilmOnVODIE +from .filmon import ( + FilmOnIE, + FilmOnChannelIE, +) from .firstpost import FirstpostIE from .firsttv import FirstTVIE from .fivemin import FiveMinIE diff --git a/youtube_dl/extractor/filmon.py b/youtube_dl/extractor/filmon.py index 987792fec..f775fe0ba 100644 --- a/youtube_dl/extractor/filmon.py +++ b/youtube_dl/extractor/filmon.py @@ -2,143 +2,177 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import qualities -from ..compat import compat_urllib_request - - -_QUALITY = qualities(('low', 'high')) +from ..compat import ( + compat_str, + compat_HTTPError, +) +from ..utils import ( + qualities, + strip_or_none, + int_or_none, + ExtractorError, +) class FilmOnIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?filmon\.com/(?:tv|channel)/(?P<id>[a-z0-9-]+)' + IE_NAME = 'filmon' + _VALID_URL = r'(?:https?://(?:www\.)?filmon\.com/vod/view/|filmon:)(?P<id>\d+)' _TESTS = [{ - 'url': 'https://www.filmon.com/channel/filmon-sports', - 'only_matching': True, + 'url': 'https://www.filmon.com/vod/view/24869-0-plan-9-from-outer-space', + 'info_dict': { + 'id': '24869', + 'ext': 'mp4', + 'title': 'Plan 9 From Outer Space', + 'description': 'Dead human, zombies and vampires', + }, }, { - 'url': 'https://www.filmon.com/tv/2894', - 'only_matching': True, + 'url': 'https://www.filmon.com/vod/view/2825-1-popeye-series-1', + 'info_dict': { + 'id': '2825', + 'title': 'Popeye Series 1', + 'description': 'The original series of Popeye.', + }, + 'playlist_mincount': 8, }] def _real_extract(self, url): - channel_id = self._match_id(url) + video_id = self._match_id(url) - request = compat_urllib_request.Request('https://www.filmon.com/channel/%s' % (channel_id)) - request.add_header('X-Requested-With', 'XMLHttpRequest') - channel_info = self._download_json(request, channel_id) - now_playing = channel_info['now_playing'] + try: + response = self._download_json( + 'https://www.filmon.com/api/vod/movie?id=%s' % video_id, + video_id)['response'] + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError): + errmsg = self._parse_json(e.cause.read().decode(), video_id)['reason'] + raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True) + raise - thumbnails = [] - for thumb in now_playing.get('images', ()): - if thumb['type'] != '2': - continue - thumbnails.append({ - 'url': thumb['url'], - 'width': int(thumb['width']), - 'height': int(thumb['height']), - }) + title = response['title'] + description = strip_or_none(response.get('description')) - formats = [] + if response.get('type_id') == 1: + entries = [self.url_result('filmon:' + episode_id) for episode_id in response.get('episodes', [])] + return self.playlist_result(entries, video_id, title, description) - for stream in channel_info['streams']: + QUALITY = qualities(('low', 'high')) + formats = [] + for format_id, stream in response.get('streams', {}).items(): + stream_url = stream.get('url') + if not stream_url: + continue formats.append({ - 'format_id': str(stream['id']), - # this is an m3u8 stream, but we are deliberately not using _extract_m3u8_formats - # because 0) it doesn't have bitrate variants anyway, and 1) the ids generated - # by that method are highly unstable (because the bitrate is variable) - 'url': stream['url'], - 'resolution': stream['name'], - 'format_note': 'expires after %u seconds' % int(stream['watch-timeout']), + 'format_id': format_id, + 'url': stream_url, 'ext': 'mp4', - 'quality': _QUALITY(stream['quality']), - 'preference': int(stream['watch-timeout']), + 'quality': QUALITY(stream.get('quality')), + 'protocol': 'm3u8_native', }) self._sort_formats(formats) + thumbnails = [] + poster = response.get('poster', {}) + thumbs = poster.get('thumbs', {}) + thumbs['poster'] = poster + for thumb_id, thumb in thumbs.items(): + thumb_url = thumb.get('url') + if not thumb_url: + continue + thumbnails.append({ + 'id': thumb_id, + 'url': thumb_url, + 'width': int_or_none(thumb.get('width')), + 'height': int_or_none(thumb.get('height')), + }) + return { - 'id': str(channel_info['id']), - 'display_id': channel_info['alias'], + 'id': video_id, + 'title': title, 'formats': formats, - # XXX: use the channel description (channel_info['description'])? - 'uploader_id': channel_info['alias'], - 'uploader': channel_info['title'], # XXX: kinda stretching it... - 'title': now_playing.get('programme_name') or channel_info['title'], - 'description': now_playing.get('programme_description'), + 'description': description, 'thumbnails': thumbnails, - 'is_live': True, } -class FilmOnVODIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?filmon\.com/vod/view/(?P<id>\d+)' +class FilmOnChannelIE(InfoExtractor): + IE_NAME = 'filmon:channel' + _VALID_URL = r'https?://(?:www\.)?filmon\.com/(?:tv|channel)/(?P<id>[a-z0-9-]+)' _TESTS = [{ - 'url': 'https://www.filmon.com/vod/view/24869-0-plan-9-from-outer-space', + # VOD + 'url': 'http://www.filmon.com/tv/sports-haters', 'info_dict': { - 'id': '24869', + 'id': '4190', 'ext': 'mp4', - 'title': 'Plan 9 From Outer Space', - 'description': 'Dead human, zombies and vampires', + 'title': 'Sports Haters', + 'description': 'md5:dabcb4c1d9cfc77085612f1a85f8275d', }, }, { - 'url': 'https://www.filmon.com/vod/view/2825-1-popeye-series-1', - 'info_dict': { - 'id': '2825', - 'title': 'Popeye Series 1', - }, - 'playlist_count': 8, + # LIVE + 'url': 'https://www.filmon.com/channel/filmon-sports', + 'only_matching': True, + }, { + 'url': 'https://www.filmon.com/tv/2894', + 'only_matching': True, }] - def _real_extract(self, url): - video_id = self._match_id(url) + _THUMBNAIL_RES = [ + ('logo', 56, 28), + ('big_logo', 106, 106), + ('extra_big_logo', 300, 300), + ] - result = self._download_json('https://www.filmon.com/api/vod/movie?id=%s' % (video_id), video_id) - if result['code'] != 200: - raise ExtractorError('FilmOn said: %s' % (result['reason']), expected=True) + def _real_extract(self, url): + channel_id = self._match_id(url) - response = result['response'] + try: + channel_data = self._download_json( + 'http://www.filmon.com/api-v2/channel/' + channel_id, channel_id)['data'] + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError): + errmsg = self._parse_json(e.cause.read().decode(), channel_id)['message'] + raise ExtractorError('%s said: %s' % (self.IE_NAME, errmsg), expected=True) + raise - if response.get('episodes'): - return { - '_type': 'playlist', - 'id': video_id, - 'title': response['title'], - 'entries': [{ - '_type': 'url', - 'url': 'https://www.filmon.com/vod/view/%s' % (ep), - } for ep in response['episodes']] - } + channel_id = compat_str(channel_data['id']) + is_live = not channel_data.get('is_vod') and not channel_data.get('is_vox') + title = channel_data['title'] + QUALITY = qualities(('low', 'high')) formats = [] - for (id, stream) in response['streams'].items(): + for stream in channel_data.get('streams', []): + stream_url = stream.get('url') + if not stream_url: + continue + if not is_live: + formats.extend(self._extract_wowza_formats( + stream_url, channel_id, skip_protocols=['dash', 'rtmp', 'rtsp'])) + continue + quality = stream.get('quality') formats.append({ - 'format_id': id, - 'url': stream['url'], - 'resolution': stream['name'], - 'format_note': 'expires after %u seconds' % int(stream['watch-timeout']), + 'format_id': quality, + # this is an m3u8 stream, but we are deliberately not using _extract_m3u8_formats + # because it doesn't have bitrate variants anyway + 'url': stream_url, 'ext': 'mp4', - 'quality': _QUALITY(stream['quality']), - 'preference': int(stream['watch-timeout']), + 'quality': QUALITY(quality), }) self._sort_formats(formats) - poster = response['poster'] - thumbnails = [{ - 'id': 'poster', - 'url': poster['url'], - 'width': poster['width'], - 'height': poster['height'], - }] - for (id, thumb) in poster['thumbs'].items(): + thumbnails = [] + for name, width, height in self._THUMBNAIL_RES: thumbnails.append({ - 'id': id, - 'url': thumb['url'], - 'width': thumb['width'], - 'height': thumb['height'], + 'id': name, + 'url': 'http://static.filmon.com/assets/channels/%s/%s.png' % (channel_id, name), + 'width': width, + 'height': height, }) return { - 'id': video_id, - 'title': response['title'], - 'formats': formats, - 'description': response['description'], + 'id': channel_id, + 'display_id': channel_data.get('alias'), + 'title': self._live_title(title) if is_live else title, + 'description': channel_data.get('description'), 'thumbnails': thumbnails, + 'formats': formats, + 'is_live': is_live, } -- cgit 1.4.1 From b7cc5f078eca4d90b3e3d31d1247452953dba1fb Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Fri, 3 Feb 2017 21:56:10 +0700 Subject: [extractors] Remove remnants of sportbox extractor (#11954) --- youtube_dl/extractor/extractors.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index aa235bec1..eaf3676df 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -888,10 +888,7 @@ from .spiegeltv import SpiegeltvIE from .spike import SpikeIE from .stitcher import StitcherIE from .sport5 import Sport5IE -from .sportbox import ( - SportBoxIE, - SportBoxEmbedIE, -) +from .sportbox import SportBoxEmbedIE from .sportdeutschland import SportDeutschlandIE from .sportschau import SportschauIE from .srgssr import ( -- cgit 1.4.1 From 2c15db829c1bd8311ed82e2884661271f0cf73ed Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 4 Feb 2017 08:38:28 +0100 Subject: [drtv] add support for live and radio sections(closes #1827)(closes #3427) --- youtube_dl/extractor/drtv.py | 74 ++++++++++++++++++++++++++++++++++---- youtube_dl/extractor/extractors.py | 5 ++- 2 files changed, 72 insertions(+), 7 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/drtv.py b/youtube_dl/extractor/drtv.py index 88d096b30..e966d7483 100644 --- a/youtube_dl/extractor/drtv.py +++ b/youtube_dl/extractor/drtv.py @@ -9,12 +9,13 @@ from ..utils import ( mimetype2ext, parse_iso8601, remove_end, + update_url_query, ) class DRTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv/se|nyheder)/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)' - + _VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv/se|nyheder|radio/ondemand)/(?:[^/]+/)*(?P<id>[\da-z-]+)(?:[/#?]|$)' + IE_NAME = 'drtv' _TESTS = [{ 'url': 'https://www.dr.dk/tv/se/boern/ultra/klassen-ultra/klassen-darlig-taber-10', 'md5': '25e659cccc9a2ed956110a299fdf5983', @@ -79,9 +80,10 @@ class DRTVIE(InfoExtractor): subtitles = {} for asset in data['Assets']: - if asset.get('Kind') == 'Image': + kind = asset.get('Kind') + if kind == 'Image': thumbnail = asset.get('Uri') - elif asset.get('Kind') == 'VideoResource': + elif kind in ('VideoResource', 'AudioResource'): duration = float_or_none(asset.get('DurationInMilliseconds'), 1000) restricted_to_denmark = asset.get('RestrictedToDenmark') spoken_subtitles = asset.get('Target') == 'SpokenSubtitles' @@ -96,9 +98,13 @@ class DRTVIE(InfoExtractor): preference = -1 format_id += '-spoken-subtitles' if target == 'HDS': - formats.extend(self._extract_f4m_formats( + f4m_formats = self._extract_f4m_formats( uri + '?hdcore=3.3.0&plugin=aasp-3.3.0.99.43', - video_id, preference, f4m_id=format_id)) + video_id, preference, f4m_id=format_id) + if kind == 'AudioResource': + for f in f4m_formats: + f['vcodec'] = 'none' + formats.extend(f4m_formats) elif target == 'HLS': formats.extend(self._extract_m3u8_formats( uri, video_id, 'mp4', entry_protocol='m3u8_native', @@ -112,6 +118,7 @@ class DRTVIE(InfoExtractor): 'format_id': format_id, 'tbr': int_or_none(bitrate), 'ext': link.get('FileFormat'), + 'vcodec': 'none' if kind == 'AudioResource' else None, }) subtitles_list = asset.get('SubtitlesList') if isinstance(subtitles_list, list): @@ -144,3 +151,58 @@ class DRTVIE(InfoExtractor): 'formats': formats, 'subtitles': subtitles, } + + +class DRTVLiveIE(InfoExtractor): + IE_NAME = 'drtv:live' + _VALID_URL = r'https?://(?:www\.)?dr\.dk/(?:tv|TV)/live/(?P<id>[\da-z-]+)' + _TEST = { + 'url': 'https://www.dr.dk/tv/live/dr1', + 'info_dict': { + 'id': 'dr1', + 'ext': 'mp4', + 'title': 're:^DR1 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + + def _real_extract(self, url): + channel_id = self._match_id(url) + channel_data = self._download_json( + 'https://www.dr.dk/mu-online/api/1.0/channel/' + channel_id, + channel_id) + title = self._live_title(channel_data['Title']) + + formats = [] + for streaming_server in channel_data.get('StreamingServers', []): + server = streaming_server.get('Server') + if not server: + continue + link_type = streaming_server.get('LinkType') + for quality in streaming_server.get('Qualities', []): + for stream in quality.get('Streams', []): + stream_path = stream.get('Stream') + if not stream_path: + continue + stream_url = update_url_query( + '%s/%s' % (server, stream_path), {'b': ''}) + if link_type == 'HLS': + formats.extend(self._extract_m3u8_formats( + stream_url, channel_id, 'mp4', + m3u8_id=link_type, fatal=False, live=True)) + elif link_type == 'HDS': + formats.extend(self._extract_f4m_formats(update_url_query( + '%s/%s' % (server, stream_path), {'hdcore': '3.7.0'}), + channel_id, f4m_id=link_type, fatal=False)) + self._sort_formats(formats) + + return { + 'id': channel_id, + 'title': title, + 'thumbnail': channel_data.get('PrimaryImageUri'), + 'formats': formats, + 'is_live': True, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index eaf3676df..32420937c 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -248,7 +248,10 @@ from .dramafever import ( from .dreisat import DreiSatIE from .drbonanza import DRBonanzaIE from .drtuber import DrTuberIE -from .drtv import DRTVIE +from .drtv import ( + DRTVIE, + DRTVLiveIE, +) from .dvtv import DVTVIE from .dumpert import DumpertIE from .defense import DefenseGouvFrIE -- cgit 1.4.1 From 6ef3e65a7b244d5e432e764772177c7d48cab237 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 5 Feb 2017 13:37:27 +0700 Subject: [videopress] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/generic.py | 22 +++++++++ youtube_dl/extractor/videopress.py | 99 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 122 insertions(+) create mode 100644 youtube_dl/extractor/videopress.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 32420937c..cf608faee 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1095,6 +1095,7 @@ from .videomore import ( VideomoreSeasonIE, ) from .videopremium import VideoPremiumIE +from .videopress import VideoPressIE from .vidio import VidioIE from .vidme import ( VidmeIE, diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index a23486620..4156cf27d 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -81,6 +81,7 @@ from .videa import VideaIE from .twentymin import TwentyMinutenIE from .ustream import UstreamIE from .openload import OpenloadIE +from .videopress import VideoPressIE class GenericIE(InfoExtractor): @@ -1473,6 +1474,21 @@ class GenericIE(InfoExtractor): 'skip_download': True, }, 'add_ie': [TwentyMinutenIE.ie_key()], + }, + { + # VideoPress embed + 'url': 'https://en.support.wordpress.com/videopress/', + 'info_dict': { + 'id': 'OcobLTqC', + 'ext': 'm4v', + 'title': 'IMG_5786', + 'timestamp': 1435711927, + 'upload_date': '20150701', + }, + 'params': { + 'skip_download': True, + }, + 'add_ie': [VideoPressIE.ie_key()], } # { # # TODO: find another test @@ -2438,6 +2454,12 @@ class GenericIE(InfoExtractor): return _playlist_from_matches( openload_urls, ie=OpenloadIE.ie_key()) + # Look for VideoPress embeds + videopress_urls = VideoPressIE._extract_urls(webpage) + if videopress_urls: + return _playlist_from_matches( + videopress_urls, ie=VideoPressIE.ie_key()) + # Looking for http://schema.org/VideoObject json_ld = self._search_json_ld( webpage, video_id, default={}, expected_type='VideoObject') diff --git a/youtube_dl/extractor/videopress.py b/youtube_dl/extractor/videopress.py new file mode 100644 index 000000000..049db25a5 --- /dev/null +++ b/youtube_dl/extractor/videopress.py @@ -0,0 +1,99 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import random +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + determine_ext, + float_or_none, + parse_age_limit, + qualities, + try_get, + unified_timestamp, + urljoin, +) + + +class VideoPressIE(InfoExtractor): + _VALID_URL = r'https?://videopress\.com/embed/(?P<id>[\da-zA-Z]+)' + _TESTS = [{ + 'url': 'https://videopress.com/embed/kUJmAcSf', + 'md5': '706956a6c875873d51010921310e4bc6', + 'info_dict': { + 'id': 'kUJmAcSf', + 'ext': 'mp4', + 'title': 'VideoPress Demo', + 'thumbnail': r're:^https?://.*\.jpg', + 'duration': 634.6, + 'timestamp': 1434983935, + 'upload_date': '20150622', + 'age_limit': 0, + }, + }, { + # 17+, requires birth_* params + 'url': 'https://videopress.com/embed/iH3gstfZ', + 'only_matching': True, + }] + + @staticmethod + def _extract_urls(webpage): + return re.findall( + r'<iframe[^>]+src=["\']((?:https?://)?videopress\.com/embed/[\da-zA-Z]+)', + webpage) + + def _real_extract(self, url): + video_id = self._match_id(url) + + video = self._download_json( + 'https://public-api.wordpress.com/rest/v1.1/videos/%s' % video_id, + video_id, query={ + 'birth_month': random.randint(1, 12), + 'birth_day': random.randint(1, 31), + 'birth_year': random.randint(1950, 1995), + }) + + title = video['title'] + + def base_url(scheme): + return try_get( + video, lambda x: x['file_url_base'][scheme], compat_str) + + base_url = base_url('https') or base_url('http') + + QUALITIES = ('std', 'dvd', 'hd') + quality = qualities(QUALITIES) + + formats = [] + for format_id, f in video['files'].items(): + if not isinstance(f, dict): + continue + for ext, path in f.items(): + if ext in ('mp4', 'ogg'): + formats.append({ + 'url': urljoin(base_url, path), + 'format_id': '%s-%s' % (format_id, ext), + 'ext': determine_ext(path, ext), + 'quality': quality(format_id), + }) + original_url = try_get(video, lambda x: x['original'], compat_str) + if original_url: + formats.append({ + 'url': original_url, + 'format_id': 'original', + 'quality': len(QUALITIES), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': video.get('description'), + 'thumbnail': video.get('poster'), + 'duration': float_or_none(video.get('duration'), 1000), + 'timestamp': unified_timestamp(video.get('upload_date')), + 'age_limit': parse_age_limit(video.get('rating')), + 'formats': formats, + } -- cgit 1.4.1 From e705755739e32779ef819370124d78981b04414c Mon Sep 17 00:00:00 2001 From: motophil <phil@rrr.de> Date: Sun, 5 Feb 2017 17:19:37 +0100 Subject: [gaskrank] Add new extractor (#11685) * [gaskrank] Add new extractor * [gaskrank] Add new extractor - fixes as requested * [gaskrank] Add new extractor - style fix * [Gaskrank] Add new extractor - requested fixes * [Gaskrank] Add new extractor - fix md5 checksum * [gaskrank] Add new extractor - more requested fixes * [Gaskrank] Add new extractor - fixed all but one quantified code issues * [Gaskrank] add new extractor - more fields extracted, added second test * [Gaskrank] Add new extractor - requested fixes. * [Gaskrank] Add new extractor - requested changes. * [Gaskrank] Add new extractor - final(?) fixes. --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/gaskrank.py | 121 +++++++++++++++++++++++++++++++++++++ 2 files changed, 122 insertions(+) create mode 100644 youtube_dl/extractor/gaskrank.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index cf608faee..12cda36cc 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -349,6 +349,7 @@ from .gameone import ( from .gamersyde import GamersydeIE from .gamespot import GameSpotIE from .gamestar import GameStarIE +from .gaskrank import GaskrankIE from .gazeta import GazetaIE from .gdcvault import GDCVaultIE from .generic import GenericIE diff --git a/youtube_dl/extractor/gaskrank.py b/youtube_dl/extractor/gaskrank.py new file mode 100644 index 000000000..972b47bf2 --- /dev/null +++ b/youtube_dl/extractor/gaskrank.py @@ -0,0 +1,121 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re +from .common import InfoExtractor +from ..utils import float_or_none +from ..utils import int_or_none +from ..utils import js_to_json +from ..utils import unified_strdate + + +class GaskrankIE(InfoExtractor): + """InfoExtractor for gaskrank.tv""" + _VALID_URL = r'https?://(?:www\.)?gaskrank\.tv/tv/(?P<categories>[^/]+)/(?P<id>[^/]+)\.html?' + _TESTS = [ + { + 'url': 'http://www.gaskrank.tv/tv/motorrad-fun/strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden.htm', + 'md5': '1ae88dbac97887d85ebd1157a95fc4f9', + 'info_dict': { + 'id': '201601/26955', + 'ext': 'mp4', + 'title': 'Strike! Einparken können nur Männer - Flurschaden hält sich in Grenzen *lol*', + 'thumbnail': r're:^https?://.*\.jpg$', + 'categories': ['motorrad-fun'], + 'display_id': 'strike-einparken-durch-anfaenger-crash-mit-groesserem-flurschaden', + 'uploader_id': 'Bikefun', + 'upload_date': '20170110', + 'uploader_url': None, + } + }, + { + 'url': 'http://www.gaskrank.tv/tv/racing/isle-of-man-tt-2011-michael-du-15920.htm', + 'md5': 'c33ee32c711bc6c8224bfcbe62b23095', + 'info_dict': { + 'id': '201106/15920', + 'ext': 'mp4', + 'title': 'Isle of Man - Michael Dunlop vs Guy Martin - schwindelig kucken', + 'thumbnail': r're:^https?://.*\.jpg$', + 'categories': ['racing'], + 'display_id': 'isle-of-man-tt-2011-michael-du-15920', + 'uploader_id': 'IOM', + 'upload_date': '20160506', + 'uploader_url': 'www.iomtt.com', + } + } + ] + + def _real_extract(self, url): + """extract information from gaskrank.tv""" + def fix_json(code): + """Removes trailing comma in json: {{},} --> {{}}""" + return re.sub(r',\s*}', r'}', js_to_json(code)) + + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + categories = [re.match(self._VALID_URL, url).group('categories')] + title = self._search_regex( + r'movieName\s*:\s*\'([^\']*)\'', + webpage, 'title') + thumbnail = self._search_regex( + r'poster\s*:\s*\'([^\']*)\'', + webpage, 'thumbnail', default=None) + + mobj = re.search( + r'Video von:\s*(?P<uploader_id>[^|]*?)\s*\|\s*vom:\s*(?P<upload_date>[0-9][0-9]\.[0-9][0-9]\.[0-9][0-9][0-9][0-9])', + webpage) + if mobj is not None: + uploader_id = mobj.groupdict().get('uploader_id') + upload_date = unified_strdate(mobj.groupdict().get('upload_date')) + + uploader_url = self._search_regex( + r'Homepage:\s*<[^>]*>(?P<uploader_url>[^<]*)', + webpage, 'uploader_url', default=None) + tags = re.findall( + r'/tv/tags/[^/]+/"\s*>(?P<tag>[^<]*?)<', + webpage) + + view_count = self._search_regex( + r'class\s*=\s*"gkRight"(?:[^>]*>\s*<[^>]*)*icon-eye-open(?:[^>]*>\s*<[^>]*)*>\s*(?P<view_count>[0-9\.]*)', + webpage, 'view_count', default=None) + if view_count: + view_count = int_or_none(view_count.replace('.', '')) + + average_rating = self._search_regex( + r'itemprop\s*=\s*"ratingValue"[^>]*>\s*(?P<average_rating>[0-9,]+)', + webpage, 'average_rating') + if average_rating: + average_rating = float_or_none(average_rating.replace(',', '.')) + + playlist = self._parse_json( + self._search_regex( + r'playlist\s*:\s*\[([^\]]*)\]', + webpage, 'playlist', default='{}'), + display_id, transform_source=fix_json, fatal=False) + + video_id = self._search_regex( + r'https?://movies\.gaskrank\.tv/([^-]*?)(-[^\.]*)?\.mp4', + playlist.get('0').get('src'), 'video id') + + formats = [] + for key in playlist: + formats.append({ + 'url': playlist[key]['src'], + 'format_id': key, + 'quality': playlist[key].get('quality')}) + self._sort_formats(formats, field_preference=['format_id']) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnail': thumbnail, + 'categories': categories, + 'display_id': display_id, + 'uploader_id': uploader_id, + 'upload_date': upload_date, + 'uploader_url': uploader_url, + 'tags': tags, + 'view_count': view_count, + 'average_rating': average_rating, + } -- cgit 1.4.1 From 829801827383324f1375b16276435781992a6619 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Wed, 8 Feb 2017 20:44:23 +0100 Subject: [scrippsnetworks:watch] Add new extractor(closes #10765) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/scrippsnetworks.py | 60 +++++++++++++++++++++++++++++++++ 2 files changed, 61 insertions(+) create mode 100644 youtube_dl/extractor/scrippsnetworks.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 12cda36cc..795843dae 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -838,6 +838,7 @@ from .sbs import SBSIE from .scivee import SciVeeIE from .screencast import ScreencastIE from .screencastomatic import ScreencastOMaticIE +from .scrippsnetworks import ScrippsNetworksWatchIE from .seeker import SeekerIE from .senateisvp import SenateISVPIE from .sendtonews import SendtoNewsIE diff --git a/youtube_dl/extractor/scrippsnetworks.py b/youtube_dl/extractor/scrippsnetworks.py new file mode 100644 index 000000000..597d6f543 --- /dev/null +++ b/youtube_dl/extractor/scrippsnetworks.py @@ -0,0 +1,60 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .adobepass import AdobePassIE +from ..utils import ( + int_or_none, + smuggle_url, + update_url_query, +) + + +class ScrippsNetworksWatchIE(AdobePassIE): + IE_NAME = 'scrippsnetworks:watch' + _VALID_URL = r'https?://watch\.(?:hgtv|foodnetwork|travelchannel|diynetwork|cookingchanneltv)\.com/player\.[A-Z0-9]+\.html#(?P<id>\d+)' + _TEST = { + 'url': 'http://watch.hgtv.com/player.HNT.html#0256538', + 'md5': '26545fd676d939954c6808274bdb905a', + 'info_dict': { + 'id': '0256538', + 'ext': 'mp4', + 'title': 'Seeking a Wow House', + 'description': 'Buyers retiring in Palm Springs, California, want a modern house with major wow factor. They\'re also looking for a pool and a large, open floorplan with tall windows looking out at the views.', + 'uploader': 'SCNI', + 'upload_date': '20170207', + 'timestamp': 1486450493, + }, + 'skip': 'requires TV provider authentication', + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + channel = self._parse_json(self._search_regex( + r'"channels"\s*:\s*(\[.+\])', + webpage, 'channels'), video_id)[0] + video_data = next(v for v in channel['videos'] if v.get('nlvid') == video_id) + title = video_data['title'] + release_url = video_data['releaseUrl'] + if video_data.get('restricted'): + requestor_id = self._search_regex( + r'requestorId\s*=\s*"([^"]+)";', webpage, 'requestor id') + resource = self._get_mvpd_resource( + requestor_id, title, video_id, + video_data.get('ratings', [{}])[0].get('rating')) + auth = self._extract_mvpd_auth( + url, video_id, requestor_id, resource) + release_url = update_url_query(release_url, {'auth': auth}) + + return { + '_type': 'url_transparent', + 'id': video_id, + 'title': title, + 'url': smuggle_url(release_url, {'force_smil_url': True}), + 'description': video_data.get('description'), + 'thumbnail': video_data.get('thumbnailUrl'), + 'series': video_data.get('showTitle'), + 'season_number': int_or_none(video_data.get('season')), + 'episode_number': int_or_none(video_data.get('episodeNumber')), + 'ie_key': 'ThePlatform', + } -- cgit 1.4.1 From b04975733c02f3bc0043c02d170bd3a042c67b89 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 9 Feb 2017 09:13:29 +0100 Subject: [sprout] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/sprout.py | 52 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 youtube_dl/extractor/sprout.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 795843dae..3445e7d40 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -896,6 +896,7 @@ from .sport5 import Sport5IE from .sportbox import SportBoxEmbedIE from .sportdeutschland import SportDeutschlandIE from .sportschau import SportschauIE +from .sprout import SproutIE from .srgssr import ( SRGSSRIE, SRGSSRPlayIE, diff --git a/youtube_dl/extractor/sprout.py b/youtube_dl/extractor/sprout.py new file mode 100644 index 000000000..8467bf49d --- /dev/null +++ b/youtube_dl/extractor/sprout.py @@ -0,0 +1,52 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .adobepass import AdobePassIE +from ..utils import ( + extract_attributes, + update_url_query, + smuggle_url, +) + + +class SproutIE(AdobePassIE): + _VALID_URL = r'https?://(?:www\.)?sproutonline\.com/watch/(?P<id>[^/?#]+)' + _TEST = { + 'url': 'http://www.sproutonline.com/watch/cowboy-adventure', + 'md5': '74bf14128578d1e040c3ebc82088f45f', + 'info_dict': { + 'id': '9dexnwtmh8_X', + 'ext': 'mp4', + 'title': 'A Cowboy Adventure', + 'description': 'Ruff-Ruff, Tweet and Dave get to be cowboys for the day at Six Cow Corral.', + 'timestamp': 1437758640, + 'upload_date': '20150724', + 'uploader': 'NBCU-SPROUT-NEW', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + video_component = self._search_regex( + r'(?s)(<div[^>]+data-component="video"[^>]*?>)', + webpage, 'video component', default=None) + if video_component: + options = self._parse_json(extract_attributes( + video_component)['data-options'], video_id) + theplatform_url = options['video'] + query = { + 'mbr': 'true', + 'manifest': 'm3u', + } + if options.get('protected'): + query['auth'] = self._extract_mvpd_auth(url, options['pid'], 'sprout', 'sprout') + theplatform_url = smuggle_url(update_url_query( + theplatform_url, query), {'force_smil_url': True}) + else: + iframe = self._search_regex( + r'(<iframe[^>]+id="sproutVideoIframe"[^>]*?>)', + webpage, 'iframe') + theplatform_url = extract_attributes(iframe)['src'] + + return self.url_result(theplatform_url, 'ThePlatform') -- cgit 1.4.1 From 4d32b6385160e8cf9117839022ed795ce02b107d Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 9 Feb 2017 23:07:43 +0100 Subject: [tvplayer] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tvplayer.py | 75 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 76 insertions(+) create mode 100644 youtube_dl/extractor/tvplayer.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 3445e7d40..5115e1a0c 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1019,6 +1019,7 @@ from .tvplay import ( TVPlayIE, ViafreeIE, ) +from .tvplayer import TVPlayerIE from .tweakers import TweakersIE from .twentyfourvideo import TwentyFourVideoIE from .twentymin import TwentyMinutenIE diff --git a/youtube_dl/extractor/tvplayer.py b/youtube_dl/extractor/tvplayer.py new file mode 100644 index 000000000..b6537141a --- /dev/null +++ b/youtube_dl/extractor/tvplayer.py @@ -0,0 +1,75 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_HTTPError +from ..utils import ( + extract_attributes, + urlencode_postdata, + ExtractorError, +) + + +class TVPlayerIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?tvplayer\.com/watch/(?P<id>[^/?#]+)' + _TEST = { + 'url': 'http://tvplayer.com/watch/bbcone', + 'info_dict': { + 'id': '89', + 'ext': 'mp4', + 'title': r're:^BBC One [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + current_channel = extract_attributes(self._search_regex( + r'(<div[^>]+class="[^"]*current-channel[^"]*"[^>]*>)', + webpage, 'channel element')) + title = current_channel['data-name'] + + resource_id = self._search_regex( + r'resourceId\s*=\s*"(\d+)"', webpage, 'resource id') + platform = self._search_regex( + r'platform\s*=\s*"([^"]+)"', webpage, 'platform') + token = self._search_regex( + r'token\s*=\s*"([^"]+)"', webpage, 'token', default='null') + validate = self._search_regex( + r'validate\s*=\s*"([^"]+)"', webpage, 'validate', default='null') + + try: + response = self._download_json( + 'http://api.tvplayer.com/api/v2/stream/live', + resource_id, headers={ + 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8', + }, data=urlencode_postdata({ + 'service': 1, + 'platform': platform, + 'id': resource_id, + 'token': token, + 'validate': validate, + }))['tvplayer']['response'] + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError): + response = self._parse_json( + e.cause.read().decode(), resource_id)['tvplayer']['response'] + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, response['error']), expected=True) + raise + + formats = self._extract_m3u8_formats(response['stream'], resource_id, 'mp4') + self._sort_formats(formats) + + return { + 'id': resource_id, + 'display_id': display_id, + 'title': self._live_title(title), + 'formats': formats, + 'is_live': True, + } -- cgit 1.4.1 From c7d6f614f35c81cf64941affdf683a478078274b Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Fri, 10 Feb 2017 16:59:49 +0100 Subject: [corus] Add new extractor(closes #12060)(#9164) --- youtube_dl/extractor/corus.py | 72 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 6 ++-- youtube_dl/extractor/hgtv.py | 44 ----------------------- youtube_dl/extractor/theplatform.py | 5 +-- 4 files changed, 77 insertions(+), 50 deletions(-) create mode 100644 youtube_dl/extractor/corus.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/corus.py b/youtube_dl/extractor/corus.py new file mode 100644 index 000000000..7b2f5008b --- /dev/null +++ b/youtube_dl/extractor/corus.py @@ -0,0 +1,72 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .theplatform import ThePlatformFeedIE +from ..utils import int_or_none + + +class CorusIE(ThePlatformFeedIE): + _VALID_URL = r'https?://(?:www\.)?(?P<domain>(?:globaltv|etcanada)\.com|(?:hgtv|foodnetwork|slice)\.ca)/(?:video/|(?:[^/]+/)+(?:videos/[a-z0-9-]+-|video\.html\?.*?\bv=))(?P<id>\d+)' + _TESTS = [{ + 'url': 'http://www.hgtv.ca/shows/bryan-inc/videos/movie-night-popcorn-with-bryan-870923331648/', + 'md5': '05dcbca777bf1e58c2acbb57168ad3a6', + 'info_dict': { + 'id': '870923331648', + 'ext': 'mp4', + 'title': 'Movie Night Popcorn with Bryan', + 'description': 'Bryan whips up homemade popcorn, the old fashion way for Jojo and Lincoln.', + 'uploader': 'SHWM-NEW', + 'upload_date': '20170206', + 'timestamp': 1486392197, + }, + }, { + 'url': 'http://www.foodnetwork.ca/shows/chopped/video/episode/chocolate-obsession/video.html?v=872683587753', + 'only_matching': True, + }, { + 'url': 'http://etcanada.com/video/873675331955/meet-the-survivor-game-changers-castaways-part-2/', + 'only_matching': True, + }] + + _TP_FEEDS = { + 'globaltv': { + 'feed_id': 'ChQqrem0lNUp', + 'account_id': 2269680845, + }, + 'etcanada': { + 'feed_id': 'ChQqrem0lNUp', + 'account_id': 2269680845, + }, + 'hgtv': { + 'feed_id': 'L0BMHXi2no43', + 'account_id': 2414428465, + }, + 'foodnetwork': { + 'feed_id': 'ukK8o58zbRmJ', + 'account_id': 2414429569, + }, + 'slice': { + 'feed_id': '5tUJLgV2YNJ5', + 'account_id': 2414427935, + }, + } + + def _real_extract(self, url): + domain, video_id = re.match(self._VALID_URL, url).groups() + feed_info = self._TP_FEEDS[domain.split('.')[0]] + return self._extract_feed_info('dtjsEC', feed_info['feed_id'], 'byId=' + video_id, video_id, lambda e: { + 'episode_number': int_or_none(e.get('pl1$episode')), + 'season_number': int_or_none(e.get('pl1$season')), + 'series': e.get('pl1$show'), + }, { + 'HLS': { + 'manifest': 'm3u', + }, + 'DesktopHLS Default': { + 'manifest': 'm3u', + }, + 'MP4 MBR': { + 'manifest': 'm3u', + }, + }, feed_info['account_id']) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 5115e1a0c..76ad7c40b 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -202,6 +202,7 @@ from .commonprotocols import ( RtmpIE, ) from .condenast import CondeNastIE +from .corus import CorusIE from .cracked import CrackedIE from .crackle import CrackleIE from .criterion import CriterionIE @@ -381,10 +382,7 @@ from .heise import HeiseIE from .hellporno import HellPornoIE from .helsinki import HelsinkiIE from .hentaistigma import HentaiStigmaIE -from .hgtv import ( - HGTVIE, - HGTVComShowIE, -) +from .hgtv import HGTVComShowIE from .historicfilms import HistoricFilmsIE from .hitbox import HitboxIE, HitboxLiveIE from .hitrecord import HitRecordIE diff --git a/youtube_dl/extractor/hgtv.py b/youtube_dl/extractor/hgtv.py index 69543bff2..e854300c7 100644 --- a/youtube_dl/extractor/hgtv.py +++ b/youtube_dl/extractor/hgtv.py @@ -2,50 +2,6 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..utils import ( - int_or_none, - js_to_json, - smuggle_url, -) - - -class HGTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?hgtv\.ca/[^/]+/video/(?P<id>[^/]+)/video.html' - _TEST = { - 'url': 'http://www.hgtv.ca/homefree/video/overnight-success/video.html?v=738081859718&p=1&s=da#video', - 'md5': '', - 'info_dict': { - 'id': 'aFH__I_5FBOX', - 'ext': 'mp4', - 'title': 'Overnight Success', - 'description': 'After weeks of hard work, high stakes, breakdowns and pep talks, the final 2 contestants compete to win the ultimate dream.', - 'uploader': 'SHWM-NEW', - 'timestamp': 1470320034, - 'upload_date': '20160804', - }, - 'params': { - # m3u8 download - 'skip_download': True, - }, - } - - def _real_extract(self, url): - display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - embed_vars = self._parse_json(self._search_regex( - r'(?s)embed_vars\s*=\s*({.*?});', - webpage, 'embed vars'), display_id, js_to_json) - return { - '_type': 'url_transparent', - 'url': smuggle_url( - 'http://link.theplatform.com/s/dtjsEC/%s?mbr=true&manifest=m3u' % embed_vars['pid'], { - 'force_smil_url': True - }), - 'series': embed_vars.get('show'), - 'season_number': int_or_none(embed_vars.get('season')), - 'episode_number': int_or_none(embed_vars.get('episode')), - 'ie_key': 'ThePlatform', - } class HGTVComShowIE(InfoExtractor): diff --git a/youtube_dl/extractor/theplatform.py b/youtube_dl/extractor/theplatform.py index 192d8fa29..5c5987c6a 100644 --- a/youtube_dl/extractor/theplatform.py +++ b/youtube_dl/extractor/theplatform.py @@ -306,9 +306,10 @@ class ThePlatformFeedIE(ThePlatformBaseIE): }, }] - def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}): + def _extract_feed_info(self, provider_id, feed_id, filter_query, video_id, custom_fields=None, asset_types_query={}, account_id=None): real_url = self._URL_TEMPLATE % (self.http_scheme(), provider_id, feed_id, filter_query) entry = self._download_json(real_url, video_id)['entries'][0] + main_smil_url = 'http://link.theplatform.com/s/%s/media/guid/%d/%s' % (provider_id, account_id, entry['guid']) if account_id else None formats = [] subtitles = {} @@ -333,7 +334,7 @@ class ThePlatformFeedIE(ThePlatformBaseIE): if asset_type in asset_types_query: query.update(asset_types_query[asset_type]) cur_formats, cur_subtitles = self._extract_theplatform_smil(update_url_query( - smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type) + main_smil_url or smil_url, query), video_id, 'Downloading SMIL data for %s' % asset_type) formats.extend(cur_formats) subtitles = self._merge_subtitles(subtitles, cur_subtitles) -- cgit 1.4.1 From 22ce9ad2bdad2bf79b22f82cfff7f58156c9d349 Mon Sep 17 00:00:00 2001 From: Marek Rusinowski <marekrusinowski@gmail.com> Date: Mon, 13 Feb 2017 21:42:26 +0100 Subject: [vod.pl] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vodpl.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 youtube_dl/extractor/vodpl.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 76ad7c40b..657e45e6f 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1147,6 +1147,7 @@ from .vlive import ( VLiveChannelIE ) from .vodlocker import VodlockerIE +from .vodpl import VODPlIE from .vodplatform import VODPlatformIE from .voicerepublic import VoiceRepublicIE from .voxmedia import VoxMediaIE diff --git a/youtube_dl/extractor/vodpl.py b/youtube_dl/extractor/vodpl.py new file mode 100644 index 000000000..f612347ce --- /dev/null +++ b/youtube_dl/extractor/vodpl.py @@ -0,0 +1,36 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .onet import OnetBaseIE +from ..utils import clean_html + + +class VODPlIE(OnetBaseIE): + _VALID_URL = r'https?://vod\.pl/(?:.*/)?(?P<id>[0-9a-zA-Z]+)' + + _TEST = { + 'url': 'https://vod.pl/filmy/chlopaki-nie-placza/3ep3jns', + 'md5': 'a7dc3b2f7faa2421aefb0ecaabf7ec74', + 'info_dict': { + 'id': '3ep3jns', + 'ext': 'mp4', + 'title': 'Chłopaki nie płaczą', + 'description': 'Kuba Brenner aby pomóc swojemu nieśmiałemu przyjacielowi Oskarowi wynajmuje w agencji towarzyskiej dwie panie. Po upojnej nocy okazuje się, że chłopcy nie byli przygotowani finansowo. "Opiekun artystyczny" dziewczyn zabiera w ramach rekompensaty drogocenną rzeźbę należącą do wujka Oskara. Kłopoty chłopców zaczynają się, gdy Kuba udaje się do agencji aby wykupić figurkę i trafia w sam środek mafijnej transakcji... Idiotyczny przypadek sprawia, że w klubie dochodzi do strzelaniny podczas której Grucha i Bolec zostają ranni, ginie również walizka z pieniędzmi... Podejrzenie pada na Kubę.', + 'timestamp': 1463415154, + 'duration': 5765, + 'upload_date': '20160516', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + mvp_id = self._search_mvp_id(webpage) + + info_dict = self._extract_from_id(mvp_id, webpage) + info_dict.update({ + 'id': video_id, + 'description': clean_html(info_dict['description']).strip().replace('\r', '\n') + }) + + return info_dict -- cgit 1.4.1 From d31aa74fdb3f69071ba869feba03525f67e974f0 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Wed, 15 Feb 2017 00:58:18 +0700 Subject: [onetmvp] Add shortcut extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/onet.py | 20 +++++++++++++++++--- 2 files changed, 18 insertions(+), 3 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 657e45e6f..b2ee0c1b0 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -694,6 +694,7 @@ from .ondemandkorea import OnDemandKoreaIE from .onet import ( OnetIE, OnetChannelIE, + OnetMVPIE, ) from .onionstudios import OnionStudiosIE from .ooyala import ( diff --git a/youtube_dl/extractor/onet.py b/youtube_dl/extractor/onet.py index 0a501b3e5..46bad492a 100644 --- a/youtube_dl/extractor/onet.py +++ b/youtube_dl/extractor/onet.py @@ -23,7 +23,7 @@ class OnetBaseIE(InfoExtractor): return self._search_regex( r'id=(["\'])mvp:(?P<id>.+?)\1', webpage, 'mvp id', group='id') - def _extract_from_id(self, video_id, webpage): + def _extract_from_id(self, video_id, webpage=None): response = self._download_json( 'http://qi.ckm.onetapi.pl/', video_id, query={ @@ -74,8 +74,10 @@ class OnetBaseIE(InfoExtractor): meta = video.get('meta', {}) - title = self._og_search_title(webpage, default=None) or meta['title'] - description = self._og_search_description(webpage, default=None) or meta.get('description') + title = (self._og_search_title( + webpage, default=None) if webpage else None) or meta['title'] + description = (self._og_search_description( + webpage, default=None) if webpage else None) or meta.get('description') duration = meta.get('length') or meta.get('lenght') timestamp = parse_iso8601(meta.get('addDate'), ' ') @@ -89,6 +91,18 @@ class OnetBaseIE(InfoExtractor): } +class OnetMVPIE(OnetBaseIE): + _VALID_URL = r'onetmvp:(?P<id>\d+\.\d+)' + + _TEST = { + 'url': 'onetmvp:381027.1509591944', + 'only_matching': True, + } + + def _real_extract(self, url): + return self._extract_from_id(self._match_id(url)) + + class OnetIE(OnetBaseIE): _VALID_URL = r'https?://(?:www\.)?onet\.tv/[a-z]/[a-z]+/(?P<display_id>[0-9a-z-]+)/(?P<id>[0-9a-z]+)' IE_NAME = 'onet.tv' -- cgit 1.4.1 From 43a3d9edfcdad8eb33758c4a7f4f912322001b8a Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Wed, 15 Feb 2017 01:14:06 +0700 Subject: [onetpl] Add support for onet.pl (closes #10507) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/onet.py | 32 ++++++++++++++++++++++++++++++++ 2 files changed, 33 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index b2ee0c1b0..be3688d5a 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -695,6 +695,7 @@ from .onet import ( OnetIE, OnetChannelIE, OnetMVPIE, + OnetPlIE, ) from .onionstudios import OnionStudiosIE from .ooyala import ( diff --git a/youtube_dl/extractor/onet.py b/youtube_dl/extractor/onet.py index 46bad492a..801aadbff 100644 --- a/youtube_dl/extractor/onet.py +++ b/youtube_dl/extractor/onet.py @@ -181,3 +181,35 @@ class OnetChannelIE(OnetBaseIE): channel_title = strip_or_none(get_element_by_class('o_channelName', webpage)) channel_description = strip_or_none(get_element_by_class('o_channelDesc', webpage)) return self.playlist_result(entries, channel_id, channel_title, channel_description) + + +class OnetPlIE(InfoExtractor): + _VALID_URL = r'https?://(?:[^/]+\.)?onet\.pl/(?:[^/]+/)+(?P<id>[0-9a-z]+)' + IE_NAME = 'onet.pl' + + _TESTS = [{ + 'url': 'http://eurosport.onet.pl/zimowe/skoki-narciarskie/ziobro-wygral-kwalifikacje-w-pjongczangu/9ckrly', + 'md5': 'b94021eb56214c3969380388b6e73cb0', + 'info_dict': { + 'id': '1561707.1685479', + 'ext': 'mp4', + 'title': 'Ziobro wygrał kwalifikacje w Pjongczangu', + 'description': 'md5:61fb0740084d2d702ea96512a03585b4', + 'upload_date': '20170214', + 'timestamp': 1487078046, + }, + }, { + 'url': 'http://film.onet.pl/zwiastuny/ghost-in-the-shell-drugi-zwiastun-pl/5q6yl3', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + mvp_id = self._search_regex( + r'data-params-mvp=["\'](\d+\.\d+)', webpage, 'mvp id') + + return self.url_result( + 'onetmvp:%s' % mvp_id, OnetMVPIE.ie_key(), video_id=mvp_id) -- cgit 1.4.1 From 02d9b82a233abcb778f3f8601b229f996fd7df94 Mon Sep 17 00:00:00 2001 From: Jakub Wilk <jwilk@jwilk.net> Date: Wed, 11 Jan 2017 18:49:40 +0100 Subject: [tvn24] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tvn24.py | 47 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) create mode 100644 youtube_dl/extractor/tvn24.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index be3688d5a..55b4782d3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1009,6 +1009,7 @@ from .tvc import ( ) from .tvigle import TvigleIE from .tvland import TVLandIE +from .tvn24 import TVN24IE from .tvnoe import TVNoeIE from .tvp import ( TVPEmbedIE, diff --git a/youtube_dl/extractor/tvn24.py b/youtube_dl/extractor/tvn24.py new file mode 100644 index 000000000..225ee4a6a --- /dev/null +++ b/youtube_dl/extractor/tvn24.py @@ -0,0 +1,47 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class TVN24IE(InfoExtractor): + _VALID_URL = r'http://(?:tvn24bis|(?:www|fakty)\.tvn24)\.pl/.+/(?P<id>[^/]+)\.html' + _TEST = { + 'url': 'http://www.tvn24.pl/wiadomosci-z-kraju,3/oredzie-artura-andrusa,702428.html', + 'md5': 'fbdec753d7bc29d96036808275f2130c', + 'info_dict': { + 'id': '1584444', + 'ext': 'mp4', + 'title': '"Święta mają być wesołe, dlatego, ludziska, wszyscy pod jemiołę"', + 'description': 'Wyjątkowe orędzie Artura Andrusa, jednego z gości "Szkła kontaktowego".', + 'thumbnail': 're:http://.*[.]jpeg', + } + } + + def _real_extract(self, url): + page_id = self._match_id(url) + webpage = self._download_webpage(url, page_id) + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + thumbnail = self._html_search_regex(r'\bdata-poster="(.+?)"', webpage, 'data-poster') + share_params = self._html_search_regex(r'\bdata-share-params="(.+?)"', webpage, 'data-share-params') + share_params = self._parse_json(share_params, page_id) + video_id = share_params['id'] + quality_data = self._html_search_regex(r'\bdata-quality="(.+?)"', webpage, 'data-quality') + quality_data = self._parse_json(quality_data, page_id) + formats = [] + for format_id, url in quality_data.items(): + formats.append({ + 'format_id': format_id, + 'height': int(format_id.rstrip('p')), + 'url': url, + 'ext': 'mp4', + }) + self._sort_formats(formats) + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'formats': formats, + } -- cgit 1.4.1 From d5fd9a3be305aa8fead8fb70aae64703afe49e43 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Thu, 23 Feb 2017 18:44:04 +0700 Subject: [skylinewebcams] Add extractor (closes #12221) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/skylinewebcams.py | 42 ++++++++++++++++++++++++++++++++++ 2 files changed, 43 insertions(+) create mode 100644 youtube_dl/extractor/skylinewebcams.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 55b4782d3..83a170fa7 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -852,6 +852,7 @@ from .shared import ( from .showroomlive import ShowRoomLiveIE from .sina import SinaIE from .sixplay import SixPlayIE +from .skylinewebcams import SkylineWebcamsIE from .skynewsarabia import ( SkyNewsArabiaIE, SkyNewsArabiaArticleIE, diff --git a/youtube_dl/extractor/skylinewebcams.py b/youtube_dl/extractor/skylinewebcams.py new file mode 100644 index 000000000..5b4aaac6f --- /dev/null +++ b/youtube_dl/extractor/skylinewebcams.py @@ -0,0 +1,42 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class SkylineWebcamsIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?skylinewebcams\.com/[^/]+/webcam/(?:[^/]+/)+(?P<id>[^/]+)\.html' + _TEST = { + 'url': 'https://www.skylinewebcams.com/it/webcam/italia/lazio/roma/scalinata-piazza-di-spagna-barcaccia.html', + 'info_dict': { + 'id': 'scalinata-piazza-di-spagna-barcaccia', + 'ext': 'mp4', + 'title': 're:^Live Webcam Scalinata di Piazza di Spagna - La Barcaccia [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$', + 'description': 'Roma, veduta sulla Scalinata di Piazza di Spagna e sulla Barcaccia', + 'is_live': True, + }, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + stream_url = self._search_regex( + r'url\s*:\s*(["\'])(?P<url>(?:https?:)?//.+?\.m3u8.*?)\1', webpage, + 'stream url', group='url') + + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + + return { + 'id': video_id, + 'url': stream_url, + 'ext': 'mp4', + 'title': self._live_title(title), + 'description': description, + 'is_live': True, + } -- cgit 1.4.1 From db182c63fb4a58974e425a56d235131fd9efc531 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Sat, 25 Feb 2017 18:44:39 +0800 Subject: [njpwworld] Add new extractor (closes #11561) --- ChangeLog | 6 +++ youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/njpwworld.py | 83 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 90 insertions(+) create mode 100644 youtube_dl/extractor/njpwworld.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index add8a6758..e0e1f52d0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version <unreleased> + +Extractors ++ [njpwworld] Add new extractor (#11561) + + version 2017.02.24.1 Extractors diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 83a170fa7..703bc5d34 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -637,6 +637,7 @@ from .ninecninemedia import ( from .ninegag import NineGagIE from .ninenow import NineNowIE from .nintendo import NintendoIE +from .njpwworld import NJPWWorldIE from .nobelprize import NobelPrizeIE from .noco import NocoIE from .normalboots import NormalbootsIE diff --git a/youtube_dl/extractor/njpwworld.py b/youtube_dl/extractor/njpwworld.py new file mode 100644 index 000000000..f5e3f6815 --- /dev/null +++ b/youtube_dl/extractor/njpwworld.py @@ -0,0 +1,83 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_urlparse +from ..utils import ( + get_element_by_class, + urlencode_postdata, +) + + +class NJPWWorldIE(InfoExtractor): + _VALID_URL = r'https?://njpwworld\.com/p/(?P<id>[a-z0-9_]+)' + IE_DESC = '新日本プロレスワールド' + _NETRC_MACHINE = 'njpwworld' + + _TEST = { + 'url': 'http://njpwworld.com/p/s_series_00155_1_9/', + 'info_dict': { + 'id': 's_series_00155_1_9', + 'ext': 'mp4', + 'title': '第9試合 ランディ・サベージ vs リック・スタイナー', + 'tags': list, + }, + 'params': { + 'skip_download': True, # AES-encrypted m3u8 + }, + 'skip': 'Requires login', + } + + def _real_initialize(self): + self._login() + + def _login(self): + username, password = self._get_login_info() + # No authentication to be performed + if not username: + return True + + webpage, urlh = self._download_webpage_handle( + 'https://njpwworld.com/auth/login', None, + note='Logging in', errnote='Unable to login', + data=urlencode_postdata({'login_id': username, 'pw': password})) + # /auth/login will return 302 for successful logins + if urlh.geturl() == 'https://njpwworld.com/auth/login': + self.report_warning('unable to login') + return False + + return True + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + formats = [] + for player_url, kind in re.findall(r'<a[^>]+href="(/player[^"]+)".+?<img[^>]+src="[^"]+qf_btn_([^".]+)', webpage): + player_url = compat_urlparse.urljoin(url, player_url) + + player_page = self._download_webpage( + player_url, video_id, note='Downloading player page') + + entries = self._parse_html5_media_entries( + player_url, player_page, video_id, m3u8_id='hls-%s' % kind, + m3u8_entry_protocol='m3u8_native', + preference=2 if 'hq' in kind else 1) + formats.extend(entries[0]['formats']) + + self._sort_formats(formats) + + post_content = get_element_by_class('post-content', webpage) + tags = re.findall( + r'<li[^>]+class="tag-[^"]+"><a[^>]*>([^<]+)</a></li>', post_content + ) if post_content else None + + return { + 'id': video_id, + 'title': self._og_search_title(webpage), + 'formats': formats, + 'tags': tags, + } -- cgit 1.4.1 From 922ab7840b01bd108887849063572cffa855cdc1 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 25 Feb 2017 20:16:40 +0700 Subject: [etonline] Add extractor (closes #12236) --- youtube_dl/extractor/etonline.py | 39 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 40 insertions(+) create mode 100644 youtube_dl/extractor/etonline.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/etonline.py b/youtube_dl/extractor/etonline.py new file mode 100644 index 000000000..17d7cfec6 --- /dev/null +++ b/youtube_dl/extractor/etonline.py @@ -0,0 +1,39 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class ETOnlineIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?etonline\.com/(?:[^/]+/)*(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'http://www.etonline.com/tv/211130_dove_cameron_liv_and_maddie_emotional_episode_series_finale/', + 'info_dict': { + 'id': '211130_dove_cameron_liv_and_maddie_emotional_episode_series_finale', + 'title': 'md5:a21ec7d3872ed98335cbd2a046f34ee6', + 'description': 'md5:8b94484063f463cca709617c79618ccd', + }, + 'playlist_count': 2, + }, { + 'url': 'http://www.etonline.com/media/video/here_are_the_stars_who_love_bringing_their_moms_as_dates_to_the_oscars-211359/', + 'only_matching': True, + }] + BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1242911076001/default_default/index.html?videoId=ref:%s' + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + entries = [ + self.url_result( + self.BRIGHTCOVE_URL_TEMPLATE % video_id, 'BrightcoveNew', video_id) + for video_id in re.findall( + r'site\.brightcove\s*\([^,]+,\s*["\'](title_\d+)', webpage)] + + return self.playlist_result( + entries, playlist_id, + self._og_search_title(webpage, fatal=False), + self._og_search_description(webpage)) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 703bc5d34..9f5aaf1d8 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -288,6 +288,7 @@ from .espn import ( ESPNArticleIE, ) from .esri import EsriVideoIE +from .etonline import ETOnlineIE from .europa import EuropaIE from .everyonesmixtape import EveryonesMixtapeIE from .expotv import ExpoTVIE -- cgit 1.4.1 From 5fc8d893612285ddba082f381006337573b36afa Mon Sep 17 00:00:00 2001 From: Ricardo Constantino <wiiaboo@gmail.com> Date: Sat, 18 Feb 2017 17:42:31 +0000 Subject: [freshlive] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/freshlive.py | 64 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 65 insertions(+) create mode 100644 youtube_dl/extractor/freshlive.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 9f5aaf1d8..860b8f422 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -339,6 +339,7 @@ from .francetv import ( ) from .freesound import FreesoundIE from .freespeech import FreespeechIE +from .freshlive import FreshliveIE from .funimation import FunimationIE from .funnyordie import FunnyOrDieIE from .fusion import FusionIE diff --git a/youtube_dl/extractor/freshlive.py b/youtube_dl/extractor/freshlive.py new file mode 100644 index 000000000..113f8f4fa --- /dev/null +++ b/youtube_dl/extractor/freshlive.py @@ -0,0 +1,64 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + +from ..utils import ( + int_or_none, + parse_iso8601 +) + +class FreshliveIE(InfoExtractor): + _VALID_URL = r'https?://freshlive\.tv/(?P<streamer>[^/]+)/(?P<id>[0-9]+)' + _TEST = { + 'url': 'https://freshlive.tv/satotv/74712', + 'md5': '224f50d268b6b9f94e4198deccd55d6d', + 'info_dict': { + 'description': 'テスト', + 'duration': 1511, + 'id': '74712', + 'ext': 'mp4', + 'timestamp': 1483621764, + 'title': 'テスト', + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20170105', + 'uploader': 'サトTV', + 'uploader_id': 'satotv', + 'view_count': int, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + options = self._parse_json( + self._search_regex( + r'window\.__CONTEXT__\s*=\s*({.+?});\s*</script>', + webpage, 'initial context'), + video_id) + + programs = options['context']['dispatcher']['stores']['ProgramStore']['programs'] + info = programs.get(video_id, {}) + + video_url = info.get('liveStreamUrl') or info.get('archiveStreamUrl') + if not video_url: + raise ExtractorError('%s not a valid broadcast ID' % video_id, expected=True) + + formats = self._extract_m3u8_formats( + video_url, video_id, ext='mp4', m3u8_id='hls') + + return { + 'id': video_id, + 'formats': formats, + 'title': info.get('title'), + 'description': info.get('description'), + 'duration': int_or_none(info.get('airTime')), + 'is_live': int_or_none(info.get('airTime')) == None, + 'thumbnail': info.get('thumbnailUrl'), + 'uploader': info.get('channel', {}).get('title'), + 'uploader_id': info.get('channel', {}).get('code'), + 'uploader_url': info.get('channel', {}).get('permalink'), + 'timestamp': parse_iso8601(info.get('startAt')), + 'view_count': int_or_none(info.get('viewCount')), + } \ No newline at end of file -- cgit 1.4.1 From e498758b9cfa983d05b10a185ecdef480a93cf30 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 25 Feb 2017 22:56:42 +0700 Subject: [freshlive] Fix issues and improve (closes #12175) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/freshlive.py | 64 +++++++++++++++++++++++++------------- 2 files changed, 43 insertions(+), 23 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 860b8f422..58139ee4e 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -339,7 +339,7 @@ from .francetv import ( ) from .freesound import FreesoundIE from .freespeech import FreespeechIE -from .freshlive import FreshliveIE +from .freshlive import FreshLiveIE from .funimation import FunimationIE from .funnyordie import FunnyOrDieIE from .fusion import FusionIE diff --git a/youtube_dl/extractor/freshlive.py b/youtube_dl/extractor/freshlive.py index 113f8f4fa..a90f9156c 100644 --- a/youtube_dl/extractor/freshlive.py +++ b/youtube_dl/extractor/freshlive.py @@ -2,34 +2,40 @@ from __future__ import unicode_literals from .common import InfoExtractor - +from ..compat import compat_str from ..utils import ( + ExtractorError, int_or_none, - parse_iso8601 + try_get, + unified_timestamp, ) -class FreshliveIE(InfoExtractor): - _VALID_URL = r'https?://freshlive\.tv/(?P<streamer>[^/]+)/(?P<id>[0-9]+)' + +class FreshLiveIE(InfoExtractor): + _VALID_URL = r'https?://freshlive\.tv/[^/]+/(?P<id>\d+)' _TEST = { 'url': 'https://freshlive.tv/satotv/74712', - 'md5': '224f50d268b6b9f94e4198deccd55d6d', + 'md5': '9f0cf5516979c4454ce982df3d97f352', 'info_dict': { - 'description': 'テスト', - 'duration': 1511, 'id': '74712', 'ext': 'mp4', - 'timestamp': 1483621764, 'title': 'テスト', + 'description': 'テスト', 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 1511, + 'timestamp': 1483619655, 'upload_date': '20170105', 'uploader': 'サトTV', 'uploader_id': 'satotv', 'view_count': int, + 'comment_count': int, + 'is_live': False, } } def _real_extract(self, url): video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) options = self._parse_json( @@ -38,27 +44,41 @@ class FreshliveIE(InfoExtractor): webpage, 'initial context'), video_id) - programs = options['context']['dispatcher']['stores']['ProgramStore']['programs'] - info = programs.get(video_id, {}) + info = options['context']['dispatcher']['stores']['ProgramStore']['programs'][video_id] - video_url = info.get('liveStreamUrl') or info.get('archiveStreamUrl') - if not video_url: - raise ExtractorError('%s not a valid broadcast ID' % video_id, expected=True) + title = info['title'] + + if info.get('status') == 'upcoming': + raise ExtractorError('Stream %s is upcoming' % video_id, expected=True) + + stream_url = info.get('liveStreamUrl') or info['archiveStreamUrl'] + + is_live = info.get('liveStreamUrl') is not None formats = self._extract_m3u8_formats( - video_url, video_id, ext='mp4', m3u8_id='hls') + stream_url, video_id, ext='mp4', + entry_protocol='m3u8' if is_live else 'm3u8_native', + m3u8_id='hls') + + if is_live: + title = self._live_title(title) return { 'id': video_id, 'formats': formats, - 'title': info.get('title'), + 'title': title, 'description': info.get('description'), - 'duration': int_or_none(info.get('airTime')), - 'is_live': int_or_none(info.get('airTime')) == None, 'thumbnail': info.get('thumbnailUrl'), - 'uploader': info.get('channel', {}).get('title'), - 'uploader_id': info.get('channel', {}).get('code'), - 'uploader_url': info.get('channel', {}).get('permalink'), - 'timestamp': parse_iso8601(info.get('startAt')), + 'duration': int_or_none(info.get('airTime')), + 'timestamp': unified_timestamp(info.get('createdAt')), + 'uploader': try_get( + info, lambda x: x['channel']['title'], compat_str), + 'uploader_id': try_get( + info, lambda x: x['channel']['code'], compat_str), + 'uploader_url': try_get( + info, lambda x: x['channel']['permalink'], compat_str), 'view_count': int_or_none(info.get('viewCount')), - } \ No newline at end of file + 'comment_count': int_or_none(info.get('commentCount')), + 'tags': info.get('tags', []), + 'is_live': is_live, + } -- cgit 1.4.1 From 892b47ab6c9147e785c562229e6dab305fffba61 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Mon, 27 Feb 2017 21:34:33 +0800 Subject: [scivee] Remove extractor (#9315) The Wikipedia page is changed from active to down: https://en.wikipedia.org/w/index.php?title=SciVee&diff=prev&oldid=723161154 Some other interesting bits: $ nslookup www.scivee.tv Server: 8.8.8.8 Address: 8.8.8.8#53 Non-authoritative answer: www.scivee.tv canonical name = scivee.rcsb.org. Name: scivee.rcsb.org Address: 132.249.231.211 $ nslookup rcsb.org Server: 8.8.8.8 Address: 8.8.8.8#53 Non-authoritative answer: Name: rcsb.org Address: 132.249.231.77 Both IPs are from UCSD. I guess it's maintained by a lab and they don't maintain it anymore. --- ChangeLog | 1 + youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/scivee.py | 57 -------------------------------------- 3 files changed, 1 insertion(+), 58 deletions(-) delete mode 100644 youtube_dl/extractor/scivee.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 4009acf37..4ed9cb4e0 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version <unreleased> Extractors +- [scivee] Remove extractor (#9315) + [MDR] Recognize more URL patterns (#12169) * [vevo] Fix extraction for videos with the new streams/streamsV3 format (#11719) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 58139ee4e..d09104096 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -838,7 +838,6 @@ from .safari import ( from .sapo import SapoIE from .savefrom import SaveFromIE from .sbs import SBSIE -from .scivee import SciVeeIE from .screencast import ScreencastIE from .screencastomatic import ScreencastOMaticIE from .scrippsnetworks import ScrippsNetworksWatchIE diff --git a/youtube_dl/extractor/scivee.py b/youtube_dl/extractor/scivee.py deleted file mode 100644 index b1ca12fde..000000000 --- a/youtube_dl/extractor/scivee.py +++ /dev/null @@ -1,57 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..utils import int_or_none - - -class SciVeeIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?scivee\.tv/node/(?P<id>\d+)' - - _TEST = { - 'url': 'http://www.scivee.tv/node/62352', - 'md5': 'b16699b74c9e6a120f6772a44960304f', - 'info_dict': { - 'id': '62352', - 'ext': 'mp4', - 'title': 'Adam Arkin at the 2014 DOE JGI Genomics of Energy & Environment Meeting', - 'description': 'md5:81f1710638e11a481358fab1b11059d7', - }, - 'skip': 'Not accessible from Travis CI server', - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - # annotations XML is malformed - annotations = self._download_webpage( - 'http://www.scivee.tv/assets/annotations/%s' % video_id, video_id, 'Downloading annotations') - - title = self._html_search_regex(r'<title>([^<]+)', annotations, 'title') - description = self._html_search_regex(r'([^<]+)', annotations, 'abstract', fatal=False) - filesize = int_or_none(self._html_search_regex( - r'([^<]+)', annotations, 'filesize', fatal=False)) - - formats = [ - { - 'url': 'http://www.scivee.tv/assets/audio/%s' % video_id, - 'ext': 'mp3', - 'format_id': 'audio', - }, - { - 'url': 'http://www.scivee.tv/assets/video/%s' % video_id, - 'ext': 'mp4', - 'format_id': 'video', - 'filesize': filesize, - }, - ] - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': 'http://www.scivee.tv/assets/videothumb/%s' % video_id, - 'formats': formats, - } -- cgit 1.4.1 From 0dc5a86a329314f551f86c2ef3202342b7506667 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Mon, 27 Feb 2017 22:43:19 +0700 Subject: [npo] Add support for hetklokhuis.nl (closes #12293) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/npo.py | 44 ++++++++++++++++++++++++++++---------- 2 files changed, 34 insertions(+), 11 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d09104096..0910b7b05 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -669,6 +669,7 @@ from .npo import ( NPORadioIE, NPORadioFragmentIE, SchoolTVIE, + HetKlokhuisIE, VPROIE, WNLIE, ) diff --git a/youtube_dl/extractor/npo.py b/youtube_dl/extractor/npo.py index 962437145..09e8d9987 100644 --- a/youtube_dl/extractor/npo.py +++ b/youtube_dl/extractor/npo.py @@ -416,7 +416,21 @@ class NPORadioFragmentIE(InfoExtractor): } -class SchoolTVIE(InfoExtractor): +class NPODataMidEmbedIE(InfoExtractor): + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_id = self._search_regex( + r'data-mid=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'video_id', group='id') + return { + '_type': 'url_transparent', + 'ie_key': 'NPO', + 'url': 'npo:%s' % video_id, + 'display_id': display_id + } + + +class SchoolTVIE(NPODataMidEmbedIE): IE_NAME = 'schooltv' _VALID_URL = r'https?://(?:www\.)?schooltv\.nl/video/(?P[^/?#&]+)' @@ -435,17 +449,25 @@ class SchoolTVIE(InfoExtractor): } } - def _real_extract(self, url): - display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - video_id = self._search_regex( - r'data-mid=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'video_id', group='id') - return { - '_type': 'url_transparent', - 'ie_key': 'NPO', - 'url': 'npo:%s' % video_id, - 'display_id': display_id + +class HetKlokhuisIE(NPODataMidEmbedIE): + IE_NAME = 'schooltv' + _VALID_URL = r'https?://(?:www\.)?hetklokhuis.nl/[^/]+/\d+/(?P[^/?#&]+)' + + _TEST = { + 'url': 'http://hetklokhuis.nl/tv-uitzending/3471/Zwaartekrachtsgolven', + 'info_dict': { + 'id': 'VPWON_1260528', + 'display_id': 'Zwaartekrachtsgolven', + 'ext': 'm4v', + 'title': 'Het Klokhuis: Zwaartekrachtsgolven', + 'description': 'md5:c94f31fb930d76c2efa4a4a71651dd48', + 'upload_date': '20170223', + }, + 'params': { + 'skip_download': True } + } class NPOPlaylistBaseIE(NPOIE): -- cgit 1.4.1 From 9bd05b5a18c535f5517158d505af5dced498d23e Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Mon, 27 Feb 2017 18:56:45 +0800 Subject: [daisuki] Add new extractor (closes #4738) --- ChangeLog | 1 + youtube_dl/extractor/daisuki.py | 144 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 4 ++ 3 files changed, 149 insertions(+) create mode 100644 youtube_dl/extractor/daisuki.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index f9986ae61..401c5885e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version Extractors ++ [daisuki] Add new extractor (#2486, #3186, #4738, #6175, #7776, #10060) * [douyu] Fix extraction (#12301) diff --git a/youtube_dl/extractor/daisuki.py b/youtube_dl/extractor/daisuki.py new file mode 100644 index 000000000..6fd09faf1 --- /dev/null +++ b/youtube_dl/extractor/daisuki.py @@ -0,0 +1,144 @@ +from __future__ import unicode_literals + +import base64 +import json +import random +import re + +from .common import InfoExtractor +from ..aes import ( + aes_cbc_decrypt, + aes_cbc_encrypt, +) +from ..utils import ( + bytes_to_intlist, + bytes_to_long, + clean_html, + ExtractorError, + intlist_to_bytes, + get_element_by_id, + js_to_json, + int_or_none, + long_to_bytes, + pkcs1pad, + remove_end, +) + + +class DaisukiIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?daisuki\.net/[^/]+/[^/]+/[^/]+/watch\.[^.]+\.(?P\d+)\.html' + + _TEST = { + 'url': 'http://www.daisuki.net/tw/en/anime/watch.TheIdolMasterCG.11213.html', + 'info_dict': { + 'id': '11213', + 'ext': 'mp4', + 'title': '#01 Who is in the pumpkin carriage? - THE IDOLM@STER CINDERELLA GIRLS', + 'creator': 'BANDAI NAMCO Entertainment', + }, + 'params': { + 'skip_download': True, # AES-encrypted HLS stream + }, + } + + # The public key in PEM format can be found in clientlibs_anime_watch.min.js + _RSA_KEY = (0xc5524c25e8e14b366b3754940beeb6f96cb7e2feef0b932c7659a0c5c3bf173d602464c2df73d693b513ae06ff1be8f367529ab30bf969c5640522181f2a0c51ea546ae120d3d8d908595e4eff765b389cde080a1ef7f1bbfb07411cc568db73b7f521cedf270cbfbe0ddbc29b1ac9d0f2d8f4359098caffee6d07915020077d, 65537) + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + flashvars = self._parse_json(self._search_regex( + r'(?s)var\s+flashvars\s*=\s*({.+?});', webpage, 'flashvars'), + video_id, transform_source=js_to_json) + + iv = [0] * 16 + + data = {} + for key in ('device_cd', 'mv_id', 'ss1_prm', 'ss2_prm', 'ss3_prm', 'ss_id'): + data[key] = flashvars.get(key, '') + + encrypted_rtn = None + + # Some AES keys are rejected. Try it with different AES keys + for idx in range(5): + aes_key = [random.randint(0, 254) for _ in range(32)] + padded_aeskey = intlist_to_bytes(pkcs1pad(aes_key, 128)) + + n, e = self._RSA_KEY + encrypted_aeskey = long_to_bytes(pow(bytes_to_long(padded_aeskey), e, n)) + init_data = self._download_json('http://www.daisuki.net/bin/bgn/init', video_id, query={ + 's': flashvars.get('s', ''), + 'c': flashvars.get('ss3_prm', ''), + 'e': url, + 'd': base64.b64encode(intlist_to_bytes(aes_cbc_encrypt( + bytes_to_intlist(json.dumps(data)), + aes_key, iv))).decode('ascii'), + 'a': base64.b64encode(encrypted_aeskey).decode('ascii'), + }, note='Downloading JSON metadata' + (' (try #%d)' % (idx + 1) if idx > 0 else '')) + + if 'rtn' in init_data: + encrypted_rtn = init_data['rtn'] + break + + self._sleep(5, video_id) + + if encrypted_rtn is None: + raise ExtractorError('Failed to fetch init data') + + rtn = self._parse_json( + intlist_to_bytes(aes_cbc_decrypt(bytes_to_intlist( + base64.b64decode(encrypted_rtn)), + aes_key, iv)).decode('utf-8').rstrip('\0'), + video_id) + + formats = self._extract_m3u8_formats( + rtn['play_url'], video_id, ext='mp4', entry_protocol='m3u8_native') + + title = remove_end(self._og_search_title(webpage), ' - DAISUKI') + + creator = self._html_search_regex( + r'Creator\s*:\s*([^<]+)', webpage, 'creator', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'creator': creator, + } + + +class DaisukiPlaylistIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)daisuki\.net/[^/]+/[^/]+/[^/]+/detail\.(?P[a-zA-Z0-9]+)\.html' + + _TEST = { + 'url': 'http://www.daisuki.net/tw/en/anime/detail.TheIdolMasterCG.html', + 'info_dict': { + 'id': 'TheIdolMasterCG', + 'title': 'THE IDOLM@STER CINDERELLA GIRLS', + 'description': 'md5:0f2c028a9339f7a2c7fbf839edc5c5d8', + }, + 'playlist_count': 26, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + episode_pattern = r'''(?sx) + ]+delay="[^"]+/(\d+)/movie\.jpg".+? + ]+class=".*?\bepisodeNumber\b.*?">(?:]+>)?([^<]+)''' + entries = [{ + '_type': 'url_transparent', + 'url': url.replace('detail', 'watch').replace('.html', '.' + movie_id + '.html'), + 'episode_id': episode_id, + 'episode_number': int_or_none(episode_id), + } for movie_id, episode_id in re.findall(episode_pattern, webpage)] + + playlist_title = remove_end( + self._og_search_title(webpage, fatal=False), ' - Anime - DAISUKI') + playlist_description = clean_html(get_element_by_id('synopsisTxt', webpage)) + + return self.playlist_result(entries, playlist_id, playlist_title, playlist_description) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 0910b7b05..e251d8478 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -227,6 +227,10 @@ from .dailymotion import ( DailymotionUserIE, DailymotionCloudIE, ) +from .daisuki import ( + DaisukiIE, + DaisukiPlaylistIE, +) from .daum import ( DaumIE, DaumClipIE, -- cgit 1.4.1 From 948519b35dec420c3b3ca5369bcba1dead31fcc6 Mon Sep 17 00:00:00 2001 From: Alex Seiler Date: Fri, 17 Feb 2017 04:49:25 +0100 Subject: [azmedien:showplaylist] Add support for all episodes playlists --- youtube_dl/extractor/azmedien.py | 49 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 50 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/azmedien.py b/youtube_dl/extractor/azmedien.py index cbc3ed564..817fc6d57 100644 --- a/youtube_dl/extractor/azmedien.py +++ b/youtube_dl/extractor/azmedien.py @@ -5,6 +5,7 @@ import re from .common import InfoExtractor from .kaltura import KalturaIE from ..utils import ( + get_element_by_class, get_element_by_id, strip_or_none, urljoin, @@ -170,3 +171,51 @@ class AZMedienPlaylistIE(AZMedienBaseIE): 'video-title', webpage)), group='title') return self.playlist_result(entries, show_id, title) + + +class AZMedienShowPlaylistIE(AZMedienBaseIE): + IE_DESC = 'AZ Medien Show playlists' + _VALID_URL = r'''(?x) + https?:// + (?:www\.)? + (?P + (?: + telezueri\.ch| + telebaern\.tv| + telem1\.ch + )/ + (?: + all-episodes| + alle-episoden + ) + /[^/]+ + ) + ''' + + _TEST = { + 'url': 'http://www.telezueri.ch/all-episodes/astrotalk', + 'info_dict': { + 'id': 'telezueri.ch/all-episodes/astrotalk', + 'title': 'TeleZüri: AstroTalk - alle episoden', + 'description': 'md5:4c0f7e7d741d906004266e295ceb4a26', + }, + 'playlist_mincount': 13, + 'params': { + 'skip_download': True, + } + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + webpage = self._download_webpage(url, playlist_id) + episodes = get_element_by_class('search-mobile-box', webpage) + entries = [self.url_result( + urljoin(url, m.group('url'))) for m in re.finditer( + r']+href=(["\'])(?P.+?)\1', episodes)] + title = self._og_search_title(webpage) + description = self._og_search_description(webpage) + return self.playlist_result( + entries, + playlist_id=playlist_id, + playlist_title=title, + playlist_description=description) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e251d8478..b1613a9d3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -83,6 +83,7 @@ from .awaan import ( from .azmedien import ( AZMedienIE, AZMedienPlaylistIE, + AZMedienShowPlaylistIE, ) from .azubu import AzubuIE, AzubuLiveIE from .baidu import BaiduVideoIE -- cgit 1.4.1 From cbb127568a6182df2c5a2d65426de523f1f7b43f Mon Sep 17 00:00:00 2001 From: Olivier Bilodeau Date: Thu, 15 Dec 2016 20:14:04 -0500 Subject: [vrak] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vrak.py | 68 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 69 insertions(+) create mode 100644 youtube_dl/extractor/vrak.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index b1613a9d3..0ac42138a 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1165,6 +1165,7 @@ from .voicerepublic import VoiceRepublicIE from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE +from .vrak import VrakIE from .vube import VubeIE from .vuclip import VuClipIE from .vvvvid import VVVVIDIE diff --git a/youtube_dl/extractor/vrak.py b/youtube_dl/extractor/vrak.py new file mode 100644 index 000000000..692e2fcfc --- /dev/null +++ b/youtube_dl/extractor/vrak.py @@ -0,0 +1,68 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + +from .brightcove import BrightcoveNewIE + + +class VrakIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?vrak\.tv/videos\?.*?target=(?P[0-9\.]+).*' + _TEST = { + 'url': 'http://www.vrak.tv/videos?target=1.2240923&filtre=emission&id=1.1806721', + 'md5': 'c5d5ce237bca3b1e990ce1b48d1f0948', + 'info_dict': { + 'id': '5231040869001', + 'ext': 'mp4', + 'title': 'Référendums américains, animés japonais et hooligans russes', + 'upload_date': '20161201', + 'description': 'This video file has been uploaded automatically using Oprah. It should be updated with real description soon.', + 'timestamp': 1480628425, + 'uploader_id': '2890187628001', + } + } + + def _real_extract(self, url): + url_id = self._match_id(url) + webpage = self._download_webpage(url, url_id) + + result = {} + result['title'] = self._html_search_regex( + r'

(.+?)

', webpage, 'title') + + # Inspired from BrightcoveNewIE._extract_url() + entries = [] + for account_id, player_id, _, video_id in re.findall( + # account_id, player_id and embed from: + #
]+ + data-publisher-id=["\'](\d+)["\'] + [^>]* + data-player-id=["\']([^"\']+)["\'] + [^>]* + refId":"([^&]+)" + [^>]* + >.*? +
.*? + RW\ java\.lang\.String\ value\ =\ \'brightcove\.article\.\d+\.\3\' + [^>]* + RW\ java\.lang\.String\ value\ =\ \'(\d+)\' + ''', webpage): + + entries.append( + 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' + % (account_id, player_id, 'default', video_id)) + + if entries: + result = self.url_result(entries[0], BrightcoveNewIE.ie_key()) + + return result -- cgit 1.4.1 From 64b7ccef3e3144a50f2cc01772a5ea5e81d4494d Mon Sep 17 00:00:00 2001 From: Juanjo Benages Date: Thu, 2 Feb 2017 17:33:09 +0100 Subject: [redbulltv] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/redbull.py | 50 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 51 insertions(+) create mode 100644 youtube_dl/extractor/redbull.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 0ac42138a..e42a069b5 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -793,6 +793,7 @@ from .rai import ( ) from .rbmaradio import RBMARadioIE from .rds import RDSIE +from .redbull import RedBullIE from .redtube import RedTubeIE from .regiotv import RegioTVIE from .rentv import ( diff --git a/youtube_dl/extractor/redbull.py b/youtube_dl/extractor/redbull.py new file mode 100644 index 000000000..e3d978a53 --- /dev/null +++ b/youtube_dl/extractor/redbull.py @@ -0,0 +1,50 @@ +# coding: utf-8 +from __future__ import unicode_literals +import re + +from .common import InfoExtractor + + +class RedBullIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?redbull\.tv/video/(?PAP-\w+)' + _TEST = { + 'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc', + 'md5': '78e860f631d7a846e712fab8c5fe2c38', + 'info_dict': { + 'id': 'AP-1Q756YYX51W11', + 'ext': 'mp4', + 'title': 'ABC of...WRC', + 'description': 'Buckle up for a crash course in the terminology, rules, drivers, and courses of the World Rally Championship.' + } + } + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id = mobj.group('id') + + access_token = self._download_json( + 'http://api-v2.redbull.tv/start?build=4.0.9&category=smartphone&os_version=23&os_family=android', + video_id, note='Downloading access token', + )['auth']['access_token'] + + info = self._download_json( + 'https://api-v2.redbull.tv/views/%s' % video_id, + video_id, note='Downloading video information', + headers={'Authorization': 'Bearer ' + access_token} + )['blocks'][0]['top'][0] + + m3u8_url = info['video_product']['url'] + title = info['title'] + + formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', + m3u8_id='hls') + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': info.get('short_description'), + 'genre': info.get('genre'), + 'duration': info.get('duration') + } -- cgit 1.4.1 From 054a587de8fe2860bcb93a19f8c628b4ddd9ad56 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sat, 4 Mar 2017 23:25:09 +0700 Subject: [redbulltv] Improve extraction (closes #11948, closes #3919) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/redbull.py | 50 ----------------- youtube_dl/extractor/redbulltv.py | 106 +++++++++++++++++++++++++++++++++++++ 3 files changed, 107 insertions(+), 51 deletions(-) delete mode 100644 youtube_dl/extractor/redbull.py create mode 100644 youtube_dl/extractor/redbulltv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e42a069b5..caf1dc766 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -793,7 +793,7 @@ from .rai import ( ) from .rbmaradio import RBMARadioIE from .rds import RDSIE -from .redbull import RedBullIE +from .redbulltv import RedBullTVIE from .redtube import RedTubeIE from .regiotv import RegioTVIE from .rentv import ( diff --git a/youtube_dl/extractor/redbull.py b/youtube_dl/extractor/redbull.py deleted file mode 100644 index e3d978a53..000000000 --- a/youtube_dl/extractor/redbull.py +++ /dev/null @@ -1,50 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals -import re - -from .common import InfoExtractor - - -class RedBullIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?redbull\.tv/video/(?PAP-\w+)' - _TEST = { - 'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc', - 'md5': '78e860f631d7a846e712fab8c5fe2c38', - 'info_dict': { - 'id': 'AP-1Q756YYX51W11', - 'ext': 'mp4', - 'title': 'ABC of...WRC', - 'description': 'Buckle up for a crash course in the terminology, rules, drivers, and courses of the World Rally Championship.' - } - } - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - video_id = mobj.group('id') - - access_token = self._download_json( - 'http://api-v2.redbull.tv/start?build=4.0.9&category=smartphone&os_version=23&os_family=android', - video_id, note='Downloading access token', - )['auth']['access_token'] - - info = self._download_json( - 'https://api-v2.redbull.tv/views/%s' % video_id, - video_id, note='Downloading video information', - headers={'Authorization': 'Bearer ' + access_token} - )['blocks'][0]['top'][0] - - m3u8_url = info['video_product']['url'] - title = info['title'] - - formats = self._extract_m3u8_formats( - m3u8_url, video_id, 'mp4', 'm3u8_native', - m3u8_id='hls') - - return { - 'id': video_id, - 'title': title, - 'formats': formats, - 'description': info.get('short_description'), - 'genre': info.get('genre'), - 'duration': info.get('duration') - } diff --git a/youtube_dl/extractor/redbulltv.py b/youtube_dl/extractor/redbulltv.py new file mode 100644 index 000000000..5c73d5bca --- /dev/null +++ b/youtube_dl/extractor/redbulltv.py @@ -0,0 +1,106 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + float_or_none, + int_or_none, + try_get, + unified_timestamp, +) + + +class RedBullTVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?redbull\.tv/(?:video|film)/(?PAP-\w+)' + _TESTS = [{ + # film + 'url': 'https://www.redbull.tv/video/AP-1Q756YYX51W11/abc-of-wrc', + 'md5': '78e860f631d7a846e712fab8c5fe2c38', + 'info_dict': { + 'id': 'AP-1Q756YYX51W11', + 'ext': 'mp4', + 'title': 'ABC of...WRC', + 'description': 'md5:5c7ed8f4015c8492ecf64b6ab31e7d31', + 'duration': 1582.04, + 'timestamp': 1488405786, + 'upload_date': '20170301', + }, + }, { + # episode + 'url': 'https://www.redbull.tv/video/AP-1PMT5JCWH1W11/grime?playlist=shows:shows-playall:web', + 'info_dict': { + 'id': 'AP-1PMT5JCWH1W11', + 'ext': 'mp4', + 'title': 'Grime - Hashtags S2 E4', + 'description': 'md5:334b741c8c1ce65be057eab6773c1cf5', + 'duration': 904.6, + 'timestamp': 1487290093, + 'upload_date': '20170217', + 'series': 'Hashtags', + 'season_number': 2, + 'episode_number': 4, + }, + }, { + 'url': 'https://www.redbull.tv/film/AP-1MSKKF5T92111/in-motion', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + access_token = self._download_json( + 'https://api-v2.redbull.tv/start', video_id, + note='Downloading access token', query={ + 'build': '4.0.9', + 'category': 'smartphone', + 'os_version': 23, + 'os_family': 'android', + })['auth']['access_token'] + + info = self._download_json( + 'https://api-v2.redbull.tv/views/%s' % video_id, + video_id, note='Downloading video information', + headers={'Authorization': 'Bearer ' + access_token} + )['blocks'][0]['top'][0] + + video = info['video_product'] + + title = info['title'].strip() + m3u8_url = video['url'] + + formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls') + + subtitles = {} + for _, captions in (try_get( + video, lambda x: x['attachments']['captions'], + dict) or {}).items(): + if not captions or not isinstance(captions, list): + continue + for caption in captions: + caption_url = caption.get('url') + if not caption_url: + continue + subtitles.setdefault(caption.get('lang') or 'en', []).append({ + 'url': caption_url, + 'ext': caption.get('format'), + }) + + subheading = info.get('subheading') + if subheading: + title += ' - %s' % subheading + + return { + 'id': video_id, + 'title': title, + 'description': info.get('long_description') or info.get( + 'short_description'), + 'duration': float_or_none(video.get('duration'), scale=1000), + 'timestamp': unified_timestamp(info.get('published')), + 'series': info.get('show_title'), + 'season_number': int_or_none(info.get('season_number')), + 'episode_number': int_or_none(info.get('episode_number')), + 'formats': formats, + 'subtitles': subtitles, + } -- cgit 1.4.1 From 5dd376345b8c87e8c2130b80e73e690c5e721a28 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sun, 5 Mar 2017 23:31:38 +0700 Subject: [tunepk] Add extractor (closes #12197, closes #12243) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tunepk.py | 90 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 91 insertions(+) create mode 100644 youtube_dl/extractor/tunepk.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index caf1dc766..b056dff53 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1000,6 +1000,7 @@ from .tunein import ( TuneInTopicIE, TuneInShortenerIE, ) +from .tunepk import TunePkIE from .turbo import TurboIE from .tutv import TutvIE from .tv2 import ( diff --git a/youtube_dl/extractor/tunepk.py b/youtube_dl/extractor/tunepk.py new file mode 100644 index 000000000..9d42651ce --- /dev/null +++ b/youtube_dl/extractor/tunepk.py @@ -0,0 +1,90 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + int_or_none, + try_get, + unified_timestamp, +) + + +class TunePkIE(InfoExtractor): + _VALID_URL = r'''(?x) + https?:// + (?: + (?:www\.)?tune\.pk/(?:video/|player/embed_player.php?.*?\bvid=)| + embed\.tune\.pk/play/ + ) + (?P\d+) + ''' + _TESTS = [{ + 'url': 'https://tune.pk/video/6919541/maudie-2017-international-trailer-1-ft-ethan-hawke-sally-hawkins', + 'md5': '0c537163b7f6f97da3c5dd1e3ef6dd55', + 'info_dict': { + 'id': '6919541', + 'ext': 'mp4', + 'title': 'Maudie (2017) | International Trailer # 1 ft Ethan Hawke, Sally Hawkins', + 'description': 'md5:eb5a04114fafef5cec90799a93a2d09c', + 'thumbnail': r're:^https?://.*\.jpg$', + 'timestamp': 1487327564, + 'upload_date': '20170217', + 'uploader': 'Movie Trailers', + 'duration': 107, + 'view_count': int, + } + }, { + 'url': 'https://tune.pk/player/embed_player.php?vid=6919541&folder=2017/02/17/&width=600&height=350&autoplay=no', + 'only_matching': True, + }, { + 'url': 'https://embed.tune.pk/play/6919541?autoplay=no&ssl=yes&inline=true', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + 'https://tune.pk/video/%s' % video_id, video_id) + + details = self._parse_json( + self._search_regex( + r'new\s+TunePlayer\(({.+?})\)\s*;\s*\n', webpage, 'tune player'), + video_id)['details'] + + video = details['video'] + title = video.get('title') or self._og_search_title( + webpage, default=None) or self._html_search_meta( + 'title', webpage, 'title', fatal=True) + + formats = self._parse_jwplayer_formats( + details['player']['sources'], video_id) + self._sort_formats(formats) + + description = self._og_search_description( + webpage, default=None) or self._html_search_meta( + 'description', webpage, 'description') + + thumbnail = video.get('thumb') or self._og_search_thumbnail( + webpage, default=None) or self._html_search_meta( + 'thumbnail', webpage, 'thumbnail') + + timestamp = unified_timestamp(video.get('date_added')) + uploader = try_get( + video, lambda x: x['uploader']['name'], + compat_str) or self._html_search_meta('author', webpage, 'author') + + duration = int_or_none(video.get('duration')) + view_count = int_or_none(video.get('views')) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'uploader': uploader, + 'duration': duration, + 'view_count': view_count, + 'formats': formats, + } -- cgit 1.4.1 From 0cf2352e858982ed811122cf867fb5e25694d97a Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Wed, 8 Mar 2017 21:19:08 +0700 Subject: [dplayit] Separate and rewrite extractor and bypass geo restriction (closes #12393) --- youtube_dl/extractor/dplay.py | 117 ++++++++++++++++++++++++++++++------- youtube_dl/extractor/extractors.py | 5 +- 2 files changed, 100 insertions(+), 22 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/dplay.py b/youtube_dl/extractor/dplay.py index 32028bc3b..62e676389 100644 --- a/youtube_dl/extractor/dplay.py +++ b/youtube_dl/extractor/dplay.py @@ -6,37 +6,24 @@ import re import time from .common import InfoExtractor -from ..compat import compat_urlparse +from ..compat import ( + compat_urlparse, + compat_HTTPError, +) from ..utils import ( USER_AGENTS, + ExtractorError, int_or_none, + unified_strdate, + remove_end, update_url_query, ) class DPlayIE(InfoExtractor): - _VALID_URL = r'https?://(?Pit\.dplay\.com|www\.dplay\.(?:dk|se|no))/[^/]+/(?P[^/?#]+)' + _VALID_URL = r'https?://(?Pwww\.dplay\.(?:dk|se|no))/[^/]+/(?P[^/?#]+)' _TESTS = [{ - # geo restricted, via direct unsigned hls URL - 'url': 'http://it.dplay.com/take-me-out/stagione-1-episodio-25/', - 'info_dict': { - 'id': '1255600', - 'display_id': 'stagione-1-episodio-25', - 'ext': 'mp4', - 'title': 'Episodio 25', - 'description': 'md5:cae5f40ad988811b197d2d27a53227eb', - 'duration': 2761, - 'timestamp': 1454701800, - 'upload_date': '20160205', - 'creator': 'RTIT', - 'series': 'Take me out', - 'season_number': 1, - 'episode_number': 25, - 'age_limit': 0, - }, - 'expected_warnings': ['Unable to download f4m manifest'], - }, { # non geo restricted, via secure api, unsigned download hls URL 'url': 'http://www.dplay.se/nugammalt-77-handelser-som-format-sverige/season-1-svensken-lar-sig-njuta-av-livet/', 'info_dict': { @@ -168,3 +155,91 @@ class DPlayIE(InfoExtractor): 'formats': formats, 'subtitles': subtitles, } + + +class DPlayItIE(InfoExtractor): + _VALID_URL = r'https?://it\.dplay\.com/[^/]+/[^/]+/(?P[^/?#]+)' + _GEO_COUNTRIES = ['IT'] + _TEST = { + 'url': 'http://it.dplay.com/nove/biografie-imbarazzanti/luigi-di-maio-la-psicosi-di-stanislawskij/', + 'md5': '2b808ffb00fc47b884a172ca5d13053c', + 'info_dict': { + 'id': '6918', + 'display_id': 'luigi-di-maio-la-psicosi-di-stanislawskij', + 'ext': 'mp4', + 'title': 'Biografie imbarazzanti: Luigi Di Maio: la psicosi di Stanislawskij', + 'description': 'md5:3c7a4303aef85868f867a26f5cc14813', + 'thumbnail': r're:^https?://.*\.jpe?g', + 'upload_date': '20160524', + 'series': 'Biografie imbarazzanti', + 'season_number': 1, + 'episode': 'Luigi Di Maio: la psicosi di Stanislawskij', + 'episode_number': 1, + }, + } + + def _real_extract(self, url): + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + video_id = self._search_regex( + r'url\s*:\s*["\']https://dplay-south-prod\.disco-api\.com/playback/videoPlaybackInfo/(\d+)', + webpage, 'video id') + + title = remove_end(self._og_search_title(webpage), ' | Dplay') + + try: + info = self._download_json( + 'https://dplay-south-prod.disco-api.com/playback/videoPlaybackInfo/%s' % video_id, + display_id, headers={ + 'Authorization': 'Bearer %s' % self._get_cookies(url).get( + 'dplayit_token').value, + 'Referer': url, + }) + except ExtractorError as e: + if isinstance(e.cause, compat_HTTPError) and e.cause.code in (400, 403): + info = self._parse_json(e.cause.read().decode('utf-8'), display_id) + error = info['errors'][0] + if error.get('code') == 'access.denied.geoblocked': + self.raise_geo_restricted( + msg=error.get('detail'), countries=self._GEO_COUNTRIES) + raise ExtractorError(info['errors'][0]['detail'], expected=True) + raise + + hls_url = info['data']['attributes']['streaming']['hls']['url'] + + formats = self._extract_m3u8_formats( + hls_url, display_id, ext='mp4', entry_protocol='m3u8_native', + m3u8_id='hls') + + series = self._html_search_regex( + r'(?s)]+class=["\'].*?\bshow_title\b.*?["\'][^>]*>(.+?)', + webpage, 'series', fatal=False) + episode = self._search_regex( + r']+class=["\'].*?\bdesc_ep\b.*?["\'][^>]*>\s*
\s*([^<]+)', + webpage, 'episode', fatal=False) + + mobj = re.search( + r'(?s)]+class=["\']dates["\'][^>]*>.+?\bS\.(?P\d+)\s+E\.(?P\d+)\s*-\s*(?P\d{2}/\d{2}/\d{4})', + webpage) + if mobj: + season_number = int(mobj.group('season_number')) + episode_number = int(mobj.group('episode_number')) + upload_date = unified_strdate(mobj.group('upload_date')) + else: + season_number = episode_number = upload_date = None + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': self._og_search_description(webpage), + 'thumbnail': self._og_search_thumbnail(webpage), + 'series': series, + 'season_number': season_number, + 'episode': episode, + 'episode_number': episode_number, + 'upload_date': upload_date, + 'formats': formats, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index b056dff53..bd2762e47 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -246,7 +246,10 @@ from .dfb import DFBIE from .dhm import DHMIE from .dotsub import DotsubIE from .douyutv import DouyuTVIE -from .dplay import DPlayIE +from .dplay import ( + DPlayIE, + DPlayItIE, +) from .dramafever import ( DramaFeverIE, DramaFeverSeriesIE, -- cgit 1.4.1 From 70497994707b6bcbd6c0ecc3fb1073be4d75d970 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sat, 11 Mar 2017 23:16:51 +0700 Subject: [discoverygo:playlist] Add extractor (closes #12424) --- youtube_dl/extractor/discoverygo.py | 59 ++++++++++++++++++++++++++++++++++--- youtube_dl/extractor/extractors.py | 5 +++- 2 files changed, 59 insertions(+), 5 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/discoverygo.py b/youtube_dl/extractor/discoverygo.py index 2042493a8..d14de8f00 100644 --- a/youtube_dl/extractor/discoverygo.py +++ b/youtube_dl/extractor/discoverygo.py @@ -1,17 +1,21 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor from ..compat import compat_str from ..utils import ( extract_attributes, + ExtractorError, int_or_none, parse_age_limit, - ExtractorError, + remove_end, + unescapeHTML, ) -class DiscoveryGoIE(InfoExtractor): - _VALID_URL = r'''(?x)https?://(?:www\.)?(?: +class DiscoveryGoBaseIE(InfoExtractor): + _VALID_URL_TEMPLATE = r'''(?x)https?://(?:www\.)?(?: discovery| investigationdiscovery| discoverylife| @@ -21,7 +25,11 @@ class DiscoveryGoIE(InfoExtractor): sciencechannel| tlc| velocitychannel - )go\.com/(?:[^/]+/)*(?P[^/?#&]+)''' + )go\.com/%s(?P[^/?#&]+)''' + + +class DiscoveryGoIE(DiscoveryGoBaseIE): + _VALID_URL = DiscoveryGoBaseIE._VALID_URL_TEMPLATE % r'(?:[^/]+/)+' _TEST = { 'url': 'https://www.discoverygo.com/love-at-first-kiss/kiss-first-ask-questions-later/', 'info_dict': { @@ -113,3 +121,46 @@ class DiscoveryGoIE(InfoExtractor): 'formats': formats, 'subtitles': subtitles, } + + +class DiscoveryGoPlaylistIE(DiscoveryGoBaseIE): + _VALID_URL = DiscoveryGoBaseIE._VALID_URL_TEMPLATE % '' + _TEST = { + 'url': 'https://www.discoverygo.com/bering-sea-gold/', + 'info_dict': { + 'id': 'bering-sea-gold', + 'title': 'Bering Sea Gold', + 'description': 'md5:cc5c6489835949043c0cc3ad66c2fa0e', + }, + 'playlist_mincount': 6, + } + + @classmethod + def suitable(cls, url): + return False if DiscoveryGoIE.suitable(url) else super( + DiscoveryGoPlaylistIE, cls).suitable(url) + + def _real_extract(self, url): + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + entries = [] + for mobj in re.finditer(r'data-json=(["\'])(?P{.+?})\1', webpage): + data = self._parse_json( + mobj.group('json'), display_id, + transform_source=unescapeHTML, fatal=False) + if not isinstance(data, dict) or data.get('type') != 'episode': + continue + episode_url = data.get('socialUrl') + if not episode_url: + continue + entries.append(self.url_result( + episode_url, ie=DiscoveryGoIE.ie_key(), + video_id=data.get('id'))) + + return self.playlist_result( + entries, display_id, + remove_end(self._og_search_title( + webpage, fatal=False), ' | Discovery GO'), + self._og_search_description(webpage)) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index bd2762e47..24c478932 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -265,7 +265,10 @@ from .dvtv import DVTVIE from .dumpert import DumpertIE from .defense import DefenseGouvFrIE from .discovery import DiscoveryIE -from .discoverygo import DiscoveryGoIE +from .discoverygo import ( + DiscoveryGoIE, + DiscoveryGoPlaylistIE, +) from .disney import DisneyIE from .dispeak import DigitallySpeakingIE from .dropbox import DropboxIE -- cgit 1.4.1 From 3e5856d860bb94b4dbe2fa38d9c50a6a92bb7401 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Fri, 17 Mar 2017 09:53:44 +0100 Subject: [discoverynetworks] add support for more domains and bypass geo restiction --- youtube_dl/extractor/discoverynetworks.py | 52 +++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/tlc.py | 43 ------------------------- 3 files changed, 53 insertions(+), 44 deletions(-) create mode 100644 youtube_dl/extractor/discoverynetworks.py delete mode 100644 youtube_dl/extractor/tlc.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/discoverynetworks.py b/youtube_dl/extractor/discoverynetworks.py new file mode 100644 index 000000000..b6653784c --- /dev/null +++ b/youtube_dl/extractor/discoverynetworks.py @@ -0,0 +1,52 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .brightcove import BrightcoveLegacyIE +from ..compat import ( + compat_parse_qs, + compat_urlparse, +) +from ..utils import smuggle_url + + +class DiscoveryNetworksDeIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?(?:discovery|tlc|animalplanet|dmax)\.de/(?:.*#(?P\d+)|(?:[^/]+/)*videos/(?P[^/?#]+))' + + _TESTS = [{ + 'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001', + 'info_dict': { + 'id': '3235167922001', + 'ext': 'mp4', + 'title': 'Breaking Amish: Die Welt da draußen', + 'description': ( + 'Vier Amische und eine Mennonitin wagen in New York' + ' den Sprung in ein komplett anderes Leben. Begleitet sie auf' + ' ihrem spannenden Weg.'), + 'timestamp': 1396598084, + 'upload_date': '20140404', + 'uploader_id': '1659832546', + }, + }, { + 'url': 'http://www.dmax.de/programme/storage-hunters-uk/videos/storage-hunters-uk-episode-6/', + 'only_matching': True, + }, { + 'url': 'http://www.discovery.de/#5332316765001', + 'only_matching': True, + }] + BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1659832546/default_default/index.html?videoId=%s' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + brightcove_id = mobj.group('id') + if not brightcove_id: + title = mobj.group('title') + webpage = self._download_webpage(url, title) + brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) + brightcove_id = compat_parse_qs(compat_urlparse.urlparse( + brightcove_legacy_url).query)['@videoPlayer'][0] + return self.url_result(smuggle_url( + self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, {'geo_countries': ['DE']}), + 'BrightcoveNew', brightcove_id) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 24c478932..79405b468 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -269,6 +269,7 @@ from .discoverygo import ( DiscoveryGoIE, DiscoveryGoPlaylistIE, ) +from .discoverynetworks import DiscoveryNetworksDeIE from .disney import DisneyIE from .dispeak import DigitallySpeakingIE from .dropbox import DropboxIE @@ -973,7 +974,6 @@ from .thisav import ThisAVIE from .thisoldhouse import ThisOldHouseIE from .threeqsdn import ThreeQSDNIE from .tinypic import TinyPicIE -from .tlc import TlcDeIE from .tmz import ( TMZIE, TMZArticleIE, diff --git a/youtube_dl/extractor/tlc.py b/youtube_dl/extractor/tlc.py deleted file mode 100644 index fd145ba42..000000000 --- a/youtube_dl/extractor/tlc.py +++ /dev/null @@ -1,43 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from .brightcove import BrightcoveLegacyIE -from ..compat import ( - compat_parse_qs, - compat_urlparse, -) - - -class TlcDeIE(InfoExtractor): - IE_NAME = 'tlc.de' - _VALID_URL = r'https?://(?:www\.)?tlc\.de/(?:[^/]+/)*videos/(?P<title>[^/?#]+)?(?:.*#(?P<id>\d+))?' - - _TEST = { - 'url': 'http://www.tlc.de/sendungen/breaking-amish/videos/#3235167922001', - 'info_dict': { - 'id': '3235167922001', - 'ext': 'mp4', - 'title': 'Breaking Amish: Die Welt da draußen', - 'description': ( - 'Vier Amische und eine Mennonitin wagen in New York' - ' den Sprung in ein komplett anderes Leben. Begleitet sie auf' - ' ihrem spannenden Weg.'), - 'timestamp': 1396598084, - 'upload_date': '20140404', - 'uploader_id': '1659832546', - }, - } - BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/1659832546/default_default/index.html?videoId=%s' - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - brightcove_id = mobj.group('id') - if not brightcove_id: - title = mobj.group('title') - webpage = self._download_webpage(url, title) - brightcove_legacy_url = BrightcoveLegacyIE._extract_brightcove_url(webpage) - brightcove_id = compat_parse_qs(compat_urlparse.urlparse(brightcove_legacy_url).query)['@videoPlayer'][0] - return self.url_result(self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id) -- cgit 1.4.1 From e7a51a4c0235fafefc672d753017c770a306677a Mon Sep 17 00:00:00 2001 From: mrBliss <dewinant@gmail.com> Date: Tue, 31 Jan 2017 13:59:18 +0100 Subject: [vtm] Add extractor (closes #9974) Implementation of the approach described in #9974. --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vtm.py | 136 +++++++++++++++++++++++++++++++++++++ 2 files changed, 137 insertions(+) create mode 100644 youtube_dl/extractor/vtm.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 79405b468..64316d4a8 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1174,6 +1174,7 @@ from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE from .vrak import VrakIE +from .vtm import VTMIE from .vube import VubeIE from .vuclip import VuClipIE from .vvvvid import VVVVIDIE diff --git a/youtube_dl/extractor/vtm.py b/youtube_dl/extractor/vtm.py new file mode 100644 index 000000000..f0a70040b --- /dev/null +++ b/youtube_dl/extractor/vtm.py @@ -0,0 +1,136 @@ +from __future__ import unicode_literals + +import re + +from .generic import GenericIE +from .common import InfoExtractor +from ..utils import ( + urlencode_postdata, + compat_urllib_parse_urlencode, + ExtractorError, + remove_end, +) + + +class VTMIE(InfoExtractor): + """Download full episodes that require an account from vtm.be or q2.be. + + The generic extractor can be used to download clips that do no require an + account. + """ + _VALID_URL = r'https?://(?:www\.)?(?P<site_id>vtm|q2)\.be/video[/?].+?' + _NETRC_MACHINE = 'vtm' + _APIKEY = '3_HZ0FtkMW_gOyKlqQzW5_0FHRC7Nd5XpXJZcDdXY4pk5eES2ZWmejRW5egwVm4ug-' + _TESTS = [ + { + 'url': 'http://vtm.be/video/volledige-afleveringen/id/vtm_20170219_VM0678361_vtmwatch', + 'info_dict': { + 'id': 'vtm_20170219_VM0678361_vtmwatch', + 'ext': 'mp4', + 'title': 'Allemaal Chris afl. 6', + 'description': 'md5:4be86427521e7b07e0adb0c9c554ddb2', + }, + 'skip_download': True, + }, + { + 'url': 'http://vtm.be/video/volledige-afleveringen/id/257107153551000', + 'only_matching': True, + }, + { + 'url': 'http://vtm.be/video?aid=163157', + 'only_matching': True, + }, + { + 'url': 'http://www.q2.be/video/volledige-afleveringen/id/2be_20170301_VM0684442_q2', + 'only_matching': True, + }, + { + 'url': 'http://vtm.be/video?aid=168332', + 'info_dict': { + 'id': 'video?aid=168332', + 'ext': 'mp4', + 'title': 'Videozone', + }, + }, + ] + + def _real_initialize(self): + self._logged_in = False + + def _login(self): + (username, password) = self._get_login_info() + if username is None or password is None: + self.raise_login_required() + + auth_data = { + 'APIKey': self._APIKEY, + 'sdk': 'js_6.1', + 'format': 'json', + 'loginID': username, + 'password': password, + } + + auth_info = self._download_json( + 'https://accounts.eu1.gigya.com/accounts.login', None, + note='Logging in', errnote='Unable to log in', + data=urlencode_postdata(auth_data), fatal=True) + + error_message = auth_info.get('errorDetails') + if error_message: + raise ExtractorError( + 'Unable to login: %s' % error_message, expected=True) + + self._uid = auth_info['UID'] + self._uid_signature = auth_info['UIDSignature'] + self._signature_timestamp = auth_info['signatureTimestamp'] + + self._logged_in = True + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + site_id = mobj.group('site_id') + + webpage = self._download_webpage(url, None, "Downloading webpage") + + # The URL sometimes contains the video id, but not always, e.g., test + # case 3. Fortunately, all webpages of videos requiring authentication + # contain the video id. + video_id = self._search_regex( + r'\\"vodId\\":\\"(.+?)\\"', webpage, 'video_id', default=None) + + # It was most likely a video not requiring authentication. + if not video_id: + return self.url_result(url, 'Generic') + + if not self._logged_in: + self._login() + + title = self._html_search_regex( + r'\\"title\\":\\"(.+?)\\"', webpage, 'title', default=None) + + description = self._html_search_regex( + r'<div[^>]+class="field-item\s+even">\s*<p>(.+?)</p>', + webpage, 'description', default=None) + + data_url = 'http://vod.medialaan.io/api/1.0/item/%s/video' % video_id + m3u8_data = { + 'app_id': 'vtm_watch' if site_id == 'vtm' else 'q2', + 'user_network': 'vtm-sso', + 'UID': self._uid, + 'UIDSignature': self._uid_signature, + 'signatureTimestamp': self._signature_timestamp, + } + data = self._download_json(data_url, video_id, query=m3u8_data) + + formats = self._extract_m3u8_formats( + data['response']['uri'], video_id, entry_protocol='m3u8_native', + ext='mp4', m3u8_id='hls') + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'formats': formats, + } -- cgit 1.4.1 From 2a721cdff2da0a9267c96ff2f4c19cda4ce0ab83 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 18 Mar 2017 05:58:54 +0700 Subject: [medialaan] Fix and improve extraction (closes #11912) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/medialaan.py | 263 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/vtm.py | 136 ------------------- 3 files changed, 264 insertions(+), 137 deletions(-) create mode 100644 youtube_dl/extractor/medialaan.py delete mode 100644 youtube_dl/extractor/vtm.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 64316d4a8..6b4742ed8 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1174,7 +1174,7 @@ from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE from .vrak import VrakIE -from .vtm import VTMIE +from .medialaan import MedialaanIE from .vube import VubeIE from .vuclip import VuClipIE from .vvvvid import VVVVIDIE diff --git a/youtube_dl/extractor/medialaan.py b/youtube_dl/extractor/medialaan.py new file mode 100644 index 000000000..e70d4679d --- /dev/null +++ b/youtube_dl/extractor/medialaan.py @@ -0,0 +1,263 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + ExtractorError, + int_or_none, + parse_duration, + try_get, + unified_timestamp, + urlencode_postdata, +) + + +class MedialaanIE(InfoExtractor): + _VALID_URL = r'''(?x) + https?:// + (?:www\.)? + (?: + (?P<site_id>vtm|q2|vtmkzoom)\.be/ + (?: + video(?:/[^/]+/id/|/?\?.*?\baid=)| + (?:[^/]+/)* + ) + ) + (?P<id>[^/?#&]+) + ''' + _NETRC_MACHINE = 'medialaan' + _APIKEY = '3_HZ0FtkMW_gOyKlqQzW5_0FHRC7Nd5XpXJZcDdXY4pk5eES2ZWmejRW5egwVm4ug-' + _SITE_TO_APP_ID = { + 'vtm': 'vtm_watch', + 'q2': 'q2', + 'vtmkzoom': 'vtmkzoom', + } + _TESTS = [{ + # vod + 'url': 'http://vtm.be/video/volledige-afleveringen/id/vtm_20170219_VM0678361_vtmwatch', + 'info_dict': { + 'id': 'vtm_20170219_VM0678361_vtmwatch', + 'ext': 'mp4', + 'title': 'Allemaal Chris afl. 6', + 'description': 'md5:4be86427521e7b07e0adb0c9c554ddb2', + 'timestamp': 1487533280, + 'upload_date': '20170219', + 'duration': 2562, + 'series': 'Allemaal Chris', + 'season': 'Allemaal Chris', + 'season_number': 1, + 'season_id': '256936078124527', + 'episode': 'Allemaal Chris afl. 6', + 'episode_number': 6, + 'episode_id': '256936078591527', + }, + 'params': { + 'skip_download': True, + }, + 'skip': 'Requires account credentials', + }, { + # clip + 'url': 'http://vtm.be/video?aid=168332', + 'info_dict': { + 'id': '168332', + 'ext': 'mp4', + 'title': '"Veronique liegt!"', + 'description': 'md5:1385e2b743923afe54ba4adc38476155', + 'timestamp': 1489002029, + 'upload_date': '20170308', + 'duration': 96, + }, + }, { + # vod + 'url': 'http://vtm.be/video/volledige-afleveringen/id/257107153551000', + 'only_matching': True, + }, { + # vod + 'url': 'http://vtm.be/video?aid=163157', + 'only_matching': True, + }, { + # vod + 'url': 'http://www.q2.be/video/volledige-afleveringen/id/2be_20170301_VM0684442_q2', + 'only_matching': True, + }, { + # clip + 'url': 'http://vitaya.be/de-jurk/precies-je-hebt-geen-borsten', + 'only_matching': True, + }, { + # clip + 'url': 'http://vtmkzoom.be/k3-dansstudio/een-nieuw-seizoen-van-k3-dansstudio', + 'only_matching': True, + }] + + def _real_initialize(self): + self._logged_in = False + + def _login(self): + username, password = self._get_login_info() + if username is None: + self.raise_login_required() + + auth_data = { + 'APIKey': self._APIKEY, + 'sdk': 'js_6.1', + 'format': 'json', + 'loginID': username, + 'password': password, + } + + auth_info = self._download_json( + 'https://accounts.eu1.gigya.com/accounts.login', None, + note='Logging in', errnote='Unable to log in', + data=urlencode_postdata(auth_data)) + + error_message = auth_info.get('errorDetails') or auth_info.get('errorMessage') + if error_message: + raise ExtractorError( + 'Unable to login: %s' % error_message, expected=True) + + self._uid = auth_info['UID'] + self._uid_signature = auth_info['UIDSignature'] + self._signature_timestamp = auth_info['signatureTimestamp'] + + self._logged_in = True + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + video_id, site_id = mobj.group('id', 'site_id') + + webpage = self._download_webpage(url, video_id) + + config = self._parse_json( + self._search_regex( + r'videoJSConfig\s*=\s*JSON\.parse\(\'({.+?})\'\);', + webpage, 'config', default='{}'), video_id, + transform_source=lambda s: s.replace( + '\\\\', '\\').replace(r'\"', '"').replace(r"\'", "'")) + + vod_id = config.get('vodId') or self._search_regex( + (r'\\"vodId\\"\s*:\s*\\"(.+?)\\"', + r'<[^>]+id=["\']vod-(\d+)'), + webpage, 'video_id', default=None) + + # clip, no authentication required + if not vod_id: + player = self._parse_json( + self._search_regex( + r'vmmaplayer\(({.+?})\);', webpage, 'vmma player', + default=''), + video_id, transform_source=lambda s: '[%s]' % s, fatal=False) + if player: + video = player[-1] + info = { + 'id': video_id, + 'url': video['videoUrl'], + 'title': video['title'], + 'thumbnail': video.get('imageUrl'), + 'timestamp': int_or_none(video.get('createdDate')), + 'duration': int_or_none(video.get('duration')), + } + else: + info = self._parse_html5_media_entries( + url, webpage, video_id, m3u8_id='hls')[0] + info.update({ + 'id': video_id, + 'title': self._html_search_meta('description', webpage), + 'duration': parse_duration(self._html_search_meta('duration', webpage)), + }) + # vod, authentication required + else: + if not self._logged_in: + self._login() + + settings = self._parse_json( + self._search_regex( + r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', + webpage, 'drupal settings', default='{}'), + video_id) + + def get(container, item): + return try_get( + settings, lambda x: x[container][item], + compat_str) or self._search_regex( + r'"%s"\s*:\s*"([^"]+)' % item, webpage, item, + default=None) + + app_id = get('vod', 'app_id') or self._SITE_TO_APP_ID.get(site_id, 'vtm_watch') + sso = get('vod', 'gigyaDatabase') or 'vtm-sso' + + data = self._download_json( + 'http://vod.medialaan.io/api/1.0/item/%s/video' % vod_id, + video_id, query={ + 'app_id': app_id, + 'user_network': sso, + 'UID': self._uid, + 'UIDSignature': self._uid_signature, + 'signatureTimestamp': self._signature_timestamp, + }) + + formats = self._extract_m3u8_formats( + data['response']['uri'], video_id, entry_protocol='m3u8_native', + ext='mp4', m3u8_id='hls') + + self._sort_formats(formats) + + info = { + 'id': vod_id, + 'formats': formats, + } + + api_key = get('vod', 'apiKey') + channel = get('medialaanGigya', 'channel') + + if api_key: + videos = self._download_json( + 'http://vod.medialaan.io/vod/v2/videos', video_id, fatal=False, + query={ + 'channels': channel, + 'ids': vod_id, + 'limit': 1, + 'apikey': api_key, + }) + if videos: + video = try_get( + videos, lambda x: x['response']['videos'][0], dict) + if video: + def get(container, item, expected_type=None): + return try_get( + video, lambda x: x[container][item], expected_type) + + def get_string(container, item): + return get(container, item, compat_str) + + info.update({ + 'series': get_string('program', 'title'), + 'season': get_string('season', 'title'), + 'season_number': int_or_none(get('season', 'number')), + 'season_id': get_string('season', 'id'), + 'episode': get_string('episode', 'title'), + 'episode_number': int_or_none(get('episode', 'number')), + 'episode_id': get_string('episode', 'id'), + 'duration': int_or_none( + video.get('duration')) or int_or_none( + video.get('durationMillis'), scale=1000), + 'title': get_string('episode', 'title'), + 'description': get_string('episode', 'text'), + 'timestamp': unified_timestamp(get_string( + 'publication', 'begin')), + }) + + if not info.get('title'): + info['title'] = try_get( + config, lambda x: x['videoConfig']['title'], + compat_str) or self._html_search_regex( + r'\\"title\\"\s*:\s*\\"(.+?)\\"', webpage, 'title', + default=None) or self._og_search_title(webpage) + + if not info.get('description'): + info['description'] = self._html_search_regex( + r'<div[^>]+class="field-item\s+even">\s*<p>(.+?)</p>', + webpage, 'description', default=None) + + return info diff --git a/youtube_dl/extractor/vtm.py b/youtube_dl/extractor/vtm.py deleted file mode 100644 index f0a70040b..000000000 --- a/youtube_dl/extractor/vtm.py +++ /dev/null @@ -1,136 +0,0 @@ -from __future__ import unicode_literals - -import re - -from .generic import GenericIE -from .common import InfoExtractor -from ..utils import ( - urlencode_postdata, - compat_urllib_parse_urlencode, - ExtractorError, - remove_end, -) - - -class VTMIE(InfoExtractor): - """Download full episodes that require an account from vtm.be or q2.be. - - The generic extractor can be used to download clips that do no require an - account. - """ - _VALID_URL = r'https?://(?:www\.)?(?P<site_id>vtm|q2)\.be/video[/?].+?' - _NETRC_MACHINE = 'vtm' - _APIKEY = '3_HZ0FtkMW_gOyKlqQzW5_0FHRC7Nd5XpXJZcDdXY4pk5eES2ZWmejRW5egwVm4ug-' - _TESTS = [ - { - 'url': 'http://vtm.be/video/volledige-afleveringen/id/vtm_20170219_VM0678361_vtmwatch', - 'info_dict': { - 'id': 'vtm_20170219_VM0678361_vtmwatch', - 'ext': 'mp4', - 'title': 'Allemaal Chris afl. 6', - 'description': 'md5:4be86427521e7b07e0adb0c9c554ddb2', - }, - 'skip_download': True, - }, - { - 'url': 'http://vtm.be/video/volledige-afleveringen/id/257107153551000', - 'only_matching': True, - }, - { - 'url': 'http://vtm.be/video?aid=163157', - 'only_matching': True, - }, - { - 'url': 'http://www.q2.be/video/volledige-afleveringen/id/2be_20170301_VM0684442_q2', - 'only_matching': True, - }, - { - 'url': 'http://vtm.be/video?aid=168332', - 'info_dict': { - 'id': 'video?aid=168332', - 'ext': 'mp4', - 'title': 'Videozone', - }, - }, - ] - - def _real_initialize(self): - self._logged_in = False - - def _login(self): - (username, password) = self._get_login_info() - if username is None or password is None: - self.raise_login_required() - - auth_data = { - 'APIKey': self._APIKEY, - 'sdk': 'js_6.1', - 'format': 'json', - 'loginID': username, - 'password': password, - } - - auth_info = self._download_json( - 'https://accounts.eu1.gigya.com/accounts.login', None, - note='Logging in', errnote='Unable to log in', - data=urlencode_postdata(auth_data), fatal=True) - - error_message = auth_info.get('errorDetails') - if error_message: - raise ExtractorError( - 'Unable to login: %s' % error_message, expected=True) - - self._uid = auth_info['UID'] - self._uid_signature = auth_info['UIDSignature'] - self._signature_timestamp = auth_info['signatureTimestamp'] - - self._logged_in = True - - def _real_extract(self, url): - mobj = re.match(self._VALID_URL, url) - site_id = mobj.group('site_id') - - webpage = self._download_webpage(url, None, "Downloading webpage") - - # The URL sometimes contains the video id, but not always, e.g., test - # case 3. Fortunately, all webpages of videos requiring authentication - # contain the video id. - video_id = self._search_regex( - r'\\"vodId\\":\\"(.+?)\\"', webpage, 'video_id', default=None) - - # It was most likely a video not requiring authentication. - if not video_id: - return self.url_result(url, 'Generic') - - if not self._logged_in: - self._login() - - title = self._html_search_regex( - r'\\"title\\":\\"(.+?)\\"', webpage, 'title', default=None) - - description = self._html_search_regex( - r'<div[^>]+class="field-item\s+even">\s*<p>(.+?)</p>', - webpage, 'description', default=None) - - data_url = 'http://vod.medialaan.io/api/1.0/item/%s/video' % video_id - m3u8_data = { - 'app_id': 'vtm_watch' if site_id == 'vtm' else 'q2', - 'user_network': 'vtm-sso', - 'UID': self._uid, - 'UIDSignature': self._uid_signature, - 'signatureTimestamp': self._signature_timestamp, - } - data = self._download_json(data_url, video_id, query=m3u8_data) - - formats = self._extract_m3u8_formats( - data['response']['uri'], video_id, entry_protocol='m3u8_native', - ext='mp4', m3u8_id='hls') - - self._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'formats': formats, - } -- cgit 1.4.1 From 772b5ff57f702dd76986d1db17068da2116a2800 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sun, 19 Mar 2017 00:45:04 +0100 Subject: [toongoggles] Add new extractor(closes #12171) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/toongoggles.py | 81 +++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+) create mode 100644 youtube_dl/extractor/toongoggles.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 6b4742ed8..97d68d9ca 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -986,6 +986,7 @@ from .tnaflix import ( ) from .toggle import ToggleIE from .tonline import TOnlineIE +from .toongoggles import ToonGogglesIE from .toutv import TouTvIE from .toypics import ToypicsUserIE, ToypicsIE from .traileraddict import TrailerAddictIE diff --git a/youtube_dl/extractor/toongoggles.py b/youtube_dl/extractor/toongoggles.py new file mode 100644 index 000000000..b5ba1c01d --- /dev/null +++ b/youtube_dl/extractor/toongoggles.py @@ -0,0 +1,81 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_duration, +) + + +class ToonGogglesIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?toongoggles\.com/shows/(?P<show_id>\d+)(?:/[^/]+/episodes/(?P<episode_id>\d+))?' + _TESTS = [{ + 'url': 'http://www.toongoggles.com/shows/217143/bernard-season-2/episodes/217147/football', + 'md5': '18289fc2b951eff6b953a9d8f01e6831', + 'info_dict': { + 'id': '217147', + 'ext': 'mp4', + 'title': 'Football', + 'uploader_id': '1', + 'description': 'Bernard decides to play football in order to be better than Lloyd and tries to beat him no matter how, he even cheats.', + 'upload_date': '20160718', + 'timestamp': 1468879330, + } + }, { + 'url': 'http://www.toongoggles.com/shows/227759/om-nom-stories-around-the-world', + 'info_dict': { + 'id': '227759', + 'title': 'Om Nom Stories Around The World', + }, + 'playlist_mincount': 11, + }] + + def _call_api(self, action, page_id, query): + query.update({ + 'for_ng': 1, + 'for_web': 1, + 'show_meta': 1, + 'version': 7.0, + }) + return self._download_json('http://api.toongoggles.com/' + action, page_id, query=query) + + def _parse_episode_data(self, episode_data): + title = episode_data['episode_name'] + + return { + '_type': 'url_transparent', + 'id': episode_data['episode_id'], + 'title': title, + 'url': 'kaltura:513551:' + episode_data['entry_id'], + 'thumbnail': episode_data.get('thumbnail_url'), + 'description': episode_data.get('description'), + 'duration': parse_duration(episode_data.get('hms')), + 'series': episode_data.get('show_name'), + 'season_number': int_or_none(episode_data.get('season_num')), + 'episode_id': episode_data.get('episode_id'), + 'episode': title, + 'episode_number': int_or_none(episode_data.get('episode_num')), + 'categories': episode_data.get('categories'), + 'ie_key': 'Kaltura', + } + + def _real_extract(self, url): + show_id, episode_id = re.match(self._VALID_URL, url).groups() + if episode_id: + episode_data = self._call_api('search', episode_id, { + 'filter': 'episode', + 'id': episode_id, + })['objects'][0] + return self._parse_episode_data(episode_data) + else: + show_data = self._call_api('getepisodesbyshow', show_id, { + 'max': 1000000000, + 'showid': show_id, + }) + entries = [] + for episode_data in show_data.get('objects', []): + entries.append(self._parse_episode_data(episode_data)) + return self.playlist_result(entries, show_id, show_data.get('show_name')) -- cgit 1.4.1 From 46b18f2349670d395b9d84a57ee3d9b5d221ff4b Mon Sep 17 00:00:00 2001 From: John Hawkinson <jhawk@mit.edu> Date: Wed, 8 Mar 2017 18:13:54 -0500 Subject: [BostonGlobe] New. Nonstandard version of Brightcove. Has a "data-brightcove-video-id" instead of a "data-video-id," otherwise pretty much just Brightcove. Except the Globe isn't all Brightcove videos, so fallback to Generic, too. Also, abstract playlist_from_matches() from generic.py to common.py, and use it here. History of these changes can be found in 51170427d4b1143572a498dedaee61863a5b2c5b. --- youtube_dl/extractor/bostonglobe.py | 72 ++++++++++++++++++++++++++++++++++ youtube_dl/extractor/common.py | 28 +++++++++----- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/generic.py | 77 +++++++++++++++++-------------------- 4 files changed, 126 insertions(+), 52 deletions(-) create mode 100644 youtube_dl/extractor/bostonglobe.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/bostonglobe.py b/youtube_dl/extractor/bostonglobe.py new file mode 100644 index 000000000..57882fbee --- /dev/null +++ b/youtube_dl/extractor/bostonglobe.py @@ -0,0 +1,72 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + +from ..utils import ( + extract_attributes, +) + + +class BostonGlobeIE(InfoExtractor): + _VALID_URL = r'(?i)https?://(?:www\.)?bostonglobe\.com/.*/(?P<id>[^/]+)/\w+(?:\.html)?' + _TESTS = [ + { + 'url': 'http://www.bostonglobe.com/metro/2017/02/11/tree-finally-succumbs-disease-leaving-hole-neighborhood/h1b4lviqzMTIn9sVy8F3gP/story.html', + 'md5': '0a62181079c85c2d2b618c9a738aedaf', + 'info_dict': { + 'title': 'A tree finally succumbs to disease, leaving a hole in a neighborhood', + 'id': '5320421710001', + 'ext': 'mp4', + 'description': 'It arrived as a sapling when the Back Bay was in its infancy, a spindly American elm tamped down into a square of dirt cut into the brick sidewalk of 1880s Marlborough Street, no higher than the first bay window of the new brownstone behind it.', + 'timestamp': 1486877593, + 'upload_date': '20170212', + 'uploader_id': '245991542', + }, + }, + { + # Embedded youtube video; we hand it off to the Generic extractor. + 'url': 'https://www.bostonglobe.com/lifestyle/names/2017/02/17/does-ben-affleck-play-matt-damon-favorite-version-batman/ruqkc9VxKBYmh5txn1XhSI/story.html', + 'md5': '582b40327089d5c0c949b3c54b13c24b', + 'info_dict': { + 'title': "Who Is Matt Damon's Favorite Batman?", + 'id': 'ZW1QCnlA6Qc', + 'ext': 'mp4', + 'upload_date': '20170217', + 'description': 'md5:3b3dccb9375867e0b4d527ed87d307cb', + 'uploader': 'The Late Late Show with James Corden', + 'uploader_id': 'TheLateLateShow', + }, + 'expected_warnings': ['404'], + }, + ] + + def _real_extract(self, url): + page_id = self._match_id(url) + webpage = self._download_webpage(url, page_id) + + page_title = self._og_search_title(webpage, default=None) + + # <video data-brightcove-video-id="5320421710001" data-account="245991542" data-player="SJWAiyYWg" data-embed="default" class="video-js" controls itemscope itemtype="http://schema.org/VideoObject"> + entries = [] + for video in re.findall(r'(?i)(<video[^>]+>)', webpage): + attrs = extract_attributes(video) + + video_id = attrs.get('data-brightcove-video-id') + account_id = attrs.get('data-account') + player_id = attrs.get('data-player') + embed = attrs.get('data-embed') + + if video_id and account_id and player_id and embed: + entries.append( + 'http://players.brightcove.net/%s/%s_%s/index.html?videoId=%s' + % (account_id, player_id, embed, video_id)) + + if len(entries) == 0: + return self.url_result(url, 'Generic') + elif len(entries) == 1: + return self.url_result(entries[0], 'BrightcoveNew') + else: + return self.playlist_from_matches(entries, page_id, page_title, ie='BrightcoveNew') diff --git a/youtube_dl/extractor/common.py b/youtube_dl/extractor/common.py index b51799bfa..0852b8e8c 100644 --- a/youtube_dl/extractor/common.py +++ b/youtube_dl/extractor/common.py @@ -36,34 +36,35 @@ from ..utils import ( clean_html, compiled_regex_type, determine_ext, + determine_protocol, error_to_compat_str, ExtractorError, + extract_attributes, fix_xml_ampersands, float_or_none, GeoRestrictedError, GeoUtils, int_or_none, js_to_json, + mimetype2ext, + orderedSet, + parse_codecs, + parse_duration, parse_iso8601, + parse_m3u8_attributes, RegexNotFoundError, - sanitize_filename, sanitized_Request, + sanitize_filename, unescapeHTML, unified_strdate, unified_timestamp, + update_Request, + update_url_query, + urljoin, url_basename, xpath_element, xpath_text, xpath_with_ns, - determine_protocol, - parse_duration, - mimetype2ext, - update_Request, - update_url_query, - parse_m3u8_attributes, - extract_attributes, - parse_codecs, - urljoin, ) @@ -714,6 +715,13 @@ class InfoExtractor(object): video_info['title'] = video_title return video_info + def playlist_from_matches(self, matches, video_id, video_title, getter=None, ie=None): + urlrs = orderedSet( + self.url_result(self._proto_relative_url(getter(m) if getter else m), ie) + for m in matches) + return self.playlist_result( + urlrs, playlist_id=video_id, playlist_title=video_title) + @staticmethod def playlist_result(entries, playlist_id=None, playlist_title=None, playlist_description=None): """Returns a playlist""" diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 97d68d9ca..40a5c9842 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -117,6 +117,7 @@ from .bleacherreport import ( from .blinkx import BlinkxIE from .bloomberg import BloombergIE from .bokecc import BokeCCIE +from .bostonglobe import BostonGlobeIE from .bpb import BpbIE from .br import BRIE from .bravotv import BravoTVIE diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 0fcb3fdac..a71d6bac0 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -1841,14 +1841,6 @@ class GenericIE(InfoExtractor): video_description = self._og_search_description(webpage, default=None) video_thumbnail = self._og_search_thumbnail(webpage, default=None) - # Helper method - def _playlist_from_matches(matches, getter=None, ie=None): - urlrs = orderedSet( - self.url_result(self._proto_relative_url(getter(m) if getter else m), ie) - for m in matches) - return self.playlist_result( - urlrs, playlist_id=video_id, playlist_title=video_title) - # Look for Brightcove Legacy Studio embeds bc_urls = BrightcoveLegacyIE._extract_brightcove_urls(webpage) if bc_urls: @@ -1869,28 +1861,28 @@ class GenericIE(InfoExtractor): # Look for Brightcove New Studio embeds bc_urls = BrightcoveNewIE._extract_urls(webpage) if bc_urls: - return _playlist_from_matches(bc_urls, ie='BrightcoveNew') + return self.playlist_from_matches(bc_urls, video_id, video_title, ie='BrightcoveNew') # Look for ThePlatform embeds tp_urls = ThePlatformIE._extract_urls(webpage) if tp_urls: - return _playlist_from_matches(tp_urls, ie='ThePlatform') + return self.playlist_from_matches(tp_urls, video_id, video_title, ie='ThePlatform') # Look for Vessel embeds vessel_urls = VesselIE._extract_urls(webpage) if vessel_urls: - return _playlist_from_matches(vessel_urls, ie=VesselIE.ie_key()) + return self.playlist_from_matches(vessel_urls, video_id, video_title, ie=VesselIE.ie_key()) # Look for embedded rtl.nl player matches = re.findall( r'<iframe[^>]+?src="((?:https?:)?//(?:www\.)?rtl\.nl/system/videoplayer/[^"]+(?:video_)?embed[^"]+)"', webpage) if matches: - return _playlist_from_matches(matches, ie='RtlNl') + return self.playlist_from_matches(matches, video_id, video_title, ie='RtlNl') vimeo_urls = VimeoIE._extract_urls(url, webpage) if vimeo_urls: - return _playlist_from_matches(vimeo_urls, ie=VimeoIE.ie_key()) + return self.playlist_from_matches(vimeo_urls, video_id, video_title, ie=VimeoIE.ie_key()) vid_me_embed_url = self._search_regex( r'src=[\'"](https?://vid\.me/[^\'"]+)[\'"]', @@ -1912,25 +1904,25 @@ class GenericIE(InfoExtractor): (?:embed|v|p)/.+?) \1''', webpage) if matches: - return _playlist_from_matches( - matches, lambda m: unescapeHTML(m[1])) + return self.playlist_from_matches( + matches, video_id, video_title, lambda m: unescapeHTML(m[1])) # Look for lazyYT YouTube embed matches = re.findall( r'class="lazyYT" data-youtube-id="([^"]+)"', webpage) if matches: - return _playlist_from_matches(matches, lambda m: unescapeHTML(m)) + return self.playlist_from_matches(matches, video_id, video_title, lambda m: unescapeHTML(m)) # Look for Wordpress "YouTube Video Importer" plugin matches = re.findall(r'''(?x)<div[^>]+ class=(?P<q1>[\'"])[^\'"]*\byvii_single_video_player\b[^\'"]*(?P=q1)[^>]+ data-video_id=(?P<q2>[\'"])([^\'"]+)(?P=q2)''', webpage) if matches: - return _playlist_from_matches(matches, lambda m: m[-1]) + return self.playlist_from_matches(matches, video_id, video_title, lambda m: m[-1]) matches = DailymotionIE._extract_urls(webpage) if matches: - return _playlist_from_matches(matches) + return self.playlist_from_matches(matches, video_id, video_title) # Look for embedded Dailymotion playlist player (#3822) m = re.search( @@ -1939,8 +1931,8 @@ class GenericIE(InfoExtractor): playlists = re.findall( r'list\[\]=/playlist/([^/]+)/', unescapeHTML(m.group('url'))) if playlists: - return _playlist_from_matches( - playlists, lambda p: '//dailymotion.com/playlist/%s' % p) + return self.playlist_from_matches( + playlists, video_id, video_title, lambda p: '//dailymotion.com/playlist/%s' % p) # Look for embedded Wistia player match = re.search( @@ -2047,8 +2039,9 @@ class GenericIE(InfoExtractor): if mobj is not None: embeds = self._parse_json(mobj.group(1), video_id, fatal=False) if embeds: - return _playlist_from_matches( - embeds, getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala') + return self.playlist_from_matches( + embeds, video_id, video_title, + getter=lambda v: OoyalaIE._url_for_embed_code(smuggle_url(v['provider_video_id'], {'domain': url})), ie='Ooyala') # Look for Aparat videos mobj = re.search(r'<iframe .*?src="(http://www\.aparat\.com/video/[^"]+)"', webpage) @@ -2110,13 +2103,13 @@ class GenericIE(InfoExtractor): # Look for funnyordie embed matches = re.findall(r'<iframe[^>]+?src="(https?://(?:www\.)?funnyordie\.com/embed/[^"]+)"', webpage) if matches: - return _playlist_from_matches( - matches, getter=unescapeHTML, ie='FunnyOrDie') + return self.playlist_from_matches( + matches, video_id, video_title, getter=unescapeHTML, ie='FunnyOrDie') # Look for BBC iPlayer embed matches = re.findall(r'setPlaylist\("(https?://www\.bbc\.co\.uk/iplayer/[^/]+/[\da-z]{8})"\)', webpage) if matches: - return _playlist_from_matches(matches, ie='BBCCoUk') + return self.playlist_from_matches(matches, video_id, video_title, ie='BBCCoUk') # Look for embedded RUTV player rutv_url = RUTVIE._extract_url(webpage) @@ -2131,32 +2124,32 @@ class GenericIE(InfoExtractor): # Look for embedded SportBox player sportbox_urls = SportBoxEmbedIE._extract_urls(webpage) if sportbox_urls: - return _playlist_from_matches(sportbox_urls, ie='SportBoxEmbed') + return self.playlist_from_matches(sportbox_urls, video_id, video_title, ie='SportBoxEmbed') # Look for embedded XHamster player xhamster_urls = XHamsterEmbedIE._extract_urls(webpage) if xhamster_urls: - return _playlist_from_matches(xhamster_urls, ie='XHamsterEmbed') + return self.playlist_from_matches(xhamster_urls, video_id, video_title, ie='XHamsterEmbed') # Look for embedded TNAFlixNetwork player tnaflix_urls = TNAFlixNetworkEmbedIE._extract_urls(webpage) if tnaflix_urls: - return _playlist_from_matches(tnaflix_urls, ie=TNAFlixNetworkEmbedIE.ie_key()) + return self.playlist_from_matches(tnaflix_urls, video_id, video_title, ie=TNAFlixNetworkEmbedIE.ie_key()) # Look for embedded PornHub player pornhub_urls = PornHubIE._extract_urls(webpage) if pornhub_urls: - return _playlist_from_matches(pornhub_urls, ie=PornHubIE.ie_key()) + return self.playlist_from_matches(pornhub_urls, video_id, video_title, ie=PornHubIE.ie_key()) # Look for embedded DrTuber player drtuber_urls = DrTuberIE._extract_urls(webpage) if drtuber_urls: - return _playlist_from_matches(drtuber_urls, ie=DrTuberIE.ie_key()) + return self.playlist_from_matches(drtuber_urls, video_id, video_title, ie=DrTuberIE.ie_key()) # Look for embedded RedTube player redtube_urls = RedTubeIE._extract_urls(webpage) if redtube_urls: - return _playlist_from_matches(redtube_urls, ie=RedTubeIE.ie_key()) + return self.playlist_from_matches(redtube_urls, video_id, video_title, ie=RedTubeIE.ie_key()) # Look for embedded Tvigle player mobj = re.search( @@ -2202,12 +2195,12 @@ class GenericIE(InfoExtractor): # Look for embedded soundcloud player soundcloud_urls = SoundcloudIE._extract_urls(webpage) if soundcloud_urls: - return _playlist_from_matches(soundcloud_urls, getter=unescapeHTML, ie=SoundcloudIE.ie_key()) + return self.playlist_from_matches(soundcloud_urls, video_id, video_title, getter=unescapeHTML, ie=SoundcloudIE.ie_key()) # Look for tunein player tunein_urls = TuneInBaseIE._extract_urls(webpage) if tunein_urls: - return _playlist_from_matches(tunein_urls) + return self.playlist_from_matches(tunein_urls, video_id, video_title) # Look for embedded mtvservices player mtvservices_url = MTVServicesEmbeddedIE._extract_url(webpage) @@ -2490,35 +2483,35 @@ class GenericIE(InfoExtractor): # Look for DBTV embeds dbtv_urls = DBTVIE._extract_urls(webpage) if dbtv_urls: - return _playlist_from_matches(dbtv_urls, ie=DBTVIE.ie_key()) + return self.playlist_from_matches(dbtv_urls, video_id, video_title, ie=DBTVIE.ie_key()) # Look for Videa embeds videa_urls = VideaIE._extract_urls(webpage) if videa_urls: - return _playlist_from_matches(videa_urls, ie=VideaIE.ie_key()) + return self.playlist_from_matches(videa_urls, video_id, video_title, ie=VideaIE.ie_key()) # Look for 20 minuten embeds twentymin_urls = TwentyMinutenIE._extract_urls(webpage) if twentymin_urls: - return _playlist_from_matches( - twentymin_urls, ie=TwentyMinutenIE.ie_key()) + return self.playlist_from_matches( + twentymin_urls, video_id, video_title, ie=TwentyMinutenIE.ie_key()) # Look for Openload embeds openload_urls = OpenloadIE._extract_urls(webpage) if openload_urls: - return _playlist_from_matches( - openload_urls, ie=OpenloadIE.ie_key()) + return self.playlist_from_matches( + openload_urls, video_id, video_title, ie=OpenloadIE.ie_key()) # Look for VideoPress embeds videopress_urls = VideoPressIE._extract_urls(webpage) if videopress_urls: - return _playlist_from_matches( - videopress_urls, ie=VideoPressIE.ie_key()) + return self.playlist_from_matches( + videopress_urls, video_id, video_title, ie=VideoPressIE.ie_key()) # Look for Rutube embeds rutube_urls = RutubeIE._extract_urls(webpage) if rutube_urls: - return _playlist_from_matches( + return self.playlist_from_matches( rutube_urls, ie=RutubeIE.ie_key()) # Looking for http://schema.org/VideoObject -- cgit 1.4.1 From d66d43c5547daf4fc1a269824a8432477fbb099d Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 25 Mar 2017 18:13:13 +0100 Subject: [atvat] Add new extractor(closes #5325) --- youtube_dl/extractor/atvat.py | 73 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 74 insertions(+) create mode 100644 youtube_dl/extractor/atvat.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/atvat.py b/youtube_dl/extractor/atvat.py new file mode 100644 index 000000000..1584d53fc --- /dev/null +++ b/youtube_dl/extractor/atvat.py @@ -0,0 +1,73 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + int_or_none, + unescapeHTML, +) + + +class ATVAtIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?atv\.at/(?:[^/]+/){2}(?P<id>[dv]\d+)' + _TESTS = [{ + 'url': 'http://atv.at/aktuell/di-210317-2005-uhr/v1698449/', + 'md5': 'c3b6b975fb3150fc628572939df205f2', + 'info_dict': { + 'id': '1698447', + 'ext': 'mp4', + 'title': 'DI, 21.03.17 | 20:05 Uhr 1/1', + } + }, { + 'url': 'http://atv.at/aktuell/meinrad-knapp/d8416/', + 'only_matching': True, + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + video_data = self._parse_json(unescapeHTML(self._search_regex( + r'class="[^"]*jsb_video/FlashPlayer[^"]*"[^>]+data-jsb="([^"]+)"', + webpage, 'player data')), display_id)['config']['initial_video'] + + video_id = video_data['id'] + video_title = video_data['title'] + + parts = [] + for part in video_data.get('parts', []): + part_id = part['id'] + part_title = part['title'] + + formats = [] + for source in part.get('sources', []): + source_url = source.get('src') + if not source_url: + continue + ext = determine_ext(source_url) + if ext == 'm3u8': + formats.extend(self._extract_m3u8_formats( + source_url, part_id, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + else: + formats.append({ + 'format_id': source.get('delivery'), + 'url': source_url, + }) + self._sort_formats(formats) + + parts.append({ + 'id': part_id, + 'title': part_title, + 'thumbnail': part.get('preview_image_url'), + 'duration': int_or_none(part.get('duration')), + 'is_live': part.get('is_livestream'), + 'formats': formats, + }) + + return { + '_type': 'multi_video', + 'id': video_id, + 'title': video_title, + 'entries': parts, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 40a5c9842..6a7028a4d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -71,6 +71,7 @@ from .arte import ( ) from .atresplayer import AtresPlayerIE from .atttechchannel import ATTTechChannelIE +from .atvat import ATVAtIE from .audimedia import AudiMediaIE from .audioboom import AudioBoomIE from .audiomack import AudiomackIE, AudiomackAlbumIE -- cgit 1.4.1 From 82be732b174ea8e9984e7b0582c69e41b266d1da Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Fri, 31 Mar 2017 12:24:23 +0100 Subject: [adn] Add new extractor --- youtube_dl/extractor/adn.py | 136 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 137 insertions(+) create mode 100644 youtube_dl/extractor/adn.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/adn.py b/youtube_dl/extractor/adn.py new file mode 100644 index 000000000..e44caa00b --- /dev/null +++ b/youtube_dl/extractor/adn.py @@ -0,0 +1,136 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import base64 +import json +import os + +from .common import InfoExtractor +from ..aes import aes_cbc_decrypt +from ..compat import compat_ord +from ..utils import ( + bytes_to_intlist, + ExtractorError, + float_or_none, + intlist_to_bytes, + srt_subtitles_timecode, + strip_or_none, +) + + +class ADNIE(InfoExtractor): + IE_DESC = 'Anime Digital Network' + _VALID_URL = r'https?://(?:www\.)?animedigitalnetwork\.fr/video/[^/]+/(?P<id>\d+)' + _TEST = { + 'url': 'http://animedigitalnetwork.fr/video/blue-exorcist-kyoto-saga/7778-episode-1-debut-des-hostilites', + 'md5': 'e497370d847fd79d9d4c74be55575c7a', + 'info_dict': { + 'id': '7778', + 'ext': 'mp4', + 'title': 'Blue Exorcist - Kyôto Saga - Épisode 1', + 'description': 'md5:2f7b5aa76edbc1a7a92cedcda8a528d5', + } + } + + def _get_subtitles(self, sub_path, video_id): + if not sub_path: + return None + + enc_subtitles = self._download_webpage( + 'http://animedigitalnetwork.fr/' + sub_path, + video_id, fatal=False) + if not enc_subtitles: + return None + + # http://animedigitalnetwork.fr/components/com_vodvideo/videojs/adn-vjs.min.js + dec_subtitles = intlist_to_bytes(aes_cbc_decrypt( + bytes_to_intlist(base64.b64decode(enc_subtitles[24:])), + bytes_to_intlist(b'\xb5@\xcfq\xa3\x98"N\xe4\xf3\x12\x98}}\x16\xd8'), + bytes_to_intlist(base64.b64decode(enc_subtitles[:24])) + )) + subtitles_json = self._parse_json( + dec_subtitles[:-compat_ord(dec_subtitles[-1])], + None, fatal=False) + if not subtitles_json: + return None + + subtitles = {} + for sub_lang, sub in subtitles_json.items(): + srt = '' + for num, current in enumerate(sub): + start, end, text = ( + float_or_none(current.get('startTime')), + float_or_none(current.get('endTime')), + current.get('text')) + if start is None or end is None or text is None: + continue + srt += os.linesep.join( + ( + '%d' % num, + '%s --> %s' % ( + srt_subtitles_timecode(start), + srt_subtitles_timecode(end)), + text, + os.linesep, + )) + + if sub_lang == 'vostf': + sub_lang = 'fr' + subtitles.setdefault(sub_lang, []).extend([{ + 'ext': 'json', + 'data': json.dumps(sub), + }, { + 'ext': 'srt', + 'data': srt, + }]) + return subtitles + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + player_config = self._parse_json(self._search_regex( + r'playerConfig\s*=\s*({.+});', webpage, 'player config'), video_id) + + video_info = {} + video_info_str = self._search_regex( + r'videoInfo\s*=\s*({.+});', webpage, + 'video info', fatal=False) + if video_info_str: + video_info = self._parse_json( + video_info_str, video_id, fatal=False) or {} + + options = player_config.get('options') or {} + metas = options.get('metas') or {} + title = metas.get('title') or video_info['title'] + links = player_config.get('links') or {} + + formats = [] + for format_id, qualities in links.items(): + for load_balancer_url in qualities.values(): + load_balancer_data = self._download_json( + load_balancer_url, video_id, fatal=False) or {} + m3u8_url = load_balancer_data.get('location') + if not m3u8_url: + continue + m3u8_formats = self._extract_m3u8_formats( + m3u8_url, video_id, 'mp4', 'm3u8_native', + m3u8_id=format_id, fatal=False) + if format_id == 'vf': + for f in m3u8_formats: + f['language'] = 'fr' + formats.extend(m3u8_formats) + error = options.get('error') + if not formats and error: + raise ExtractorError('%s said: %s' % (self.IE_NAME, error), expected=True) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'description': strip_or_none(metas.get('summary') or video_info.get('resume')), + 'thumbnail': video_info.get('image'), + 'formats': formats, + 'subtitles': self.extract_subtitles(player_config.get('subtitles'), video_id), + 'episode': metas.get('subtitle') or video_info.get('videoTitle'), + 'series': video_info.get('playlistTitle'), + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 6a7028a4d..43933ad5b 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -19,6 +19,7 @@ from .acast import ( ACastChannelIE, ) from .addanime import AddAnimeIE +from .adn import ADNIE from .adobetv import ( AdobeTVIE, AdobeTVShowIE, -- cgit 1.4.1 From 7453999580f2809153a84420d3ca72b24186c02b Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 1 Apr 2017 00:25:27 +0700 Subject: [packtpub] Add extractor (closes #12610) --- youtube_dl/extractor/extractors.py | 4 ++ youtube_dl/extractor/packtpub.py | 138 +++++++++++++++++++++++++++++++++++++ 2 files changed, 142 insertions(+) create mode 100644 youtube_dl/extractor/packtpub.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 43933ad5b..6ad7444fe 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -729,6 +729,10 @@ from .orf import ( ORFFM4IE, ORFIPTVIE, ) +from .packtpub import ( + PacktPubIE, + PacktPubCourseIE, +) from .pandatv import PandaTVIE from .pandoratv import PandoraTVIE from .parliamentliveuk import ParliamentLiveUKIE diff --git a/youtube_dl/extractor/packtpub.py b/youtube_dl/extractor/packtpub.py new file mode 100644 index 000000000..881f3bcc7 --- /dev/null +++ b/youtube_dl/extractor/packtpub.py @@ -0,0 +1,138 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + clean_html, + ExtractorError, + remove_end, + strip_or_none, + unified_timestamp, + urljoin, +) + + +class PacktPubBaseIE(InfoExtractor): + _PACKT_BASE = 'https://www.packtpub.com' + _MAPT_REST = '%s/mapt-rest' % _PACKT_BASE + + +class PacktPubIE(PacktPubBaseIE): + _VALID_URL = r'https?://(?:www\.)?packtpub\.com/mapt/video/[^/]+/(?P<course_id>\d+)/(?P<chapter_id>\d+)/(?P<id>\d+)' + + _TEST = { + 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215/20528/20530/Project+Intro', + 'md5': '1e74bd6cfd45d7d07666f4684ef58f70', + 'info_dict': { + 'id': '20530', + 'ext': 'mp4', + 'title': 'Project Intro', + 'thumbnail': r're:(?i)^https?://.*\.jpg', + 'timestamp': 1490918400, + 'upload_date': '20170331', + }, + } + + def _handle_error(self, response): + if response.get('status') != 'success': + raise ExtractorError( + '% said: %s' % (self.IE_NAME, response['message']), + expected=True) + + def _download_json(self, *args, **kwargs): + response = super(PacktPubIE, self)._download_json(*args, **kwargs) + self._handle_error(response) + return response + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + course_id, chapter_id, video_id = mobj.group( + 'course_id', 'chapter_id', 'id') + + video = self._download_json( + '%s/users/me/products/%s/chapters/%s/sections/%s' + % (self._MAPT_REST, course_id, chapter_id, video_id), video_id, + 'Downloading JSON video')['data'] + + content = video.get('content') + if not content: + raise ExtractorError('This video is locked', expected=True) + + video_url = content['file'] + + metadata = self._download_json( + '%s/products/%s/chapters/%s/sections/%s/metadata' + % (self._MAPT_REST, course_id, chapter_id, video_id), + video_id)['data'] + + title = metadata['pageTitle'] + course_title = metadata.get('title') + if course_title: + title = remove_end(title, ' - %s' % course_title) + timestamp = unified_timestamp(metadata.get('publicationDate')) + thumbnail = urljoin(self._PACKT_BASE, metadata.get('filepath')) + + return { + 'id': video_id, + 'url': video_url, + 'title': title, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + } + + +class PacktPubCourseIE(PacktPubBaseIE): + _VALID_URL = r'(?P<url>https?://(?:www\.)?packtpub\.com/mapt/video/[^/]+/(?P<id>\d+))' + _TEST = { + 'url': 'https://www.packtpub.com/mapt/video/web-development/9781787122215', + 'info_dict': { + 'id': '9781787122215', + 'title': 'Learn Nodejs by building 12 projects [Video]', + }, + 'playlist_count': 90, + } + + @classmethod + def suitable(cls, url): + return False if PacktPubIE.suitable(url) else super( + PacktPubCourseIE, cls).suitable(url) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + url, course_id = mobj.group('url', 'id') + + course = self._download_json( + '%s/products/%s/metadata' % (self._MAPT_REST, course_id), + course_id)['data'] + + entries = [] + for chapter_num, chapter in enumerate(course['tableOfContents'], 1): + if chapter.get('type') != 'chapter': + continue + children = chapter.get('children') + if not isinstance(children, list): + continue + chapter_info = { + 'chapter': chapter.get('title'), + 'chapter_number': chapter_num, + 'chapter_id': chapter.get('id'), + } + for section in children: + if section.get('type') != 'section': + continue + section_url = section.get('seoUrl') + if not isinstance(section_url, compat_str): + continue + entry = { + '_type': 'url_transparent', + 'url': urljoin(url + '/', section_url), + 'title': strip_or_none(section.get('title')), + 'description': clean_html(section.get('summary')), + 'ie_key': PacktPubIE.ie_key(), + } + entry.update(chapter_info) + entries.append(entry) + + return self.playlist_result(entries, course_id, course.get('title')) -- cgit 1.4.1 From 77c8ebe6318055cc34eaedca63f4866c4c47437a Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Fri, 31 Mar 2017 23:28:24 +0100 Subject: [vrv] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vrv.py | 151 +++++++++++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 youtube_dl/extractor/vrv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 6ad7444fe..1b427e256 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1182,6 +1182,7 @@ from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE from .vrak import VrakIE +from .vrv import VRVIE from .medialaan import MedialaanIE from .vube import VubeIE from .vuclip import VuClipIE diff --git a/youtube_dl/extractor/vrv.py b/youtube_dl/extractor/vrv.py new file mode 100644 index 000000000..33618c951 --- /dev/null +++ b/youtube_dl/extractor/vrv.py @@ -0,0 +1,151 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import base64 +import json +import hashlib +import hmac +import random +import string +import time + +from .common import InfoExtractor +from ..compat import ( + compat_urllib_parse_urlencode, + compat_urlparse, +) +from ..utils import ( + float_or_none, + int_or_none, +) + + +class VRVIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?vrv\.co/watch/(?P<id>[A-Z0-9]+)' + _TEST = { + 'url': 'https://vrv.co/watch/GR9PNZ396/Hidden-America-with-Jonah-Ray:BOSTON-WHERE-THE-PAST-IS-THE-PRESENT', + 'info_dict': { + 'id': 'GR9PNZ396', + 'ext': 'mp4', + 'title': 'BOSTON: WHERE THE PAST IS THE PRESENT', + 'description': 'md5:4ec8844ac262ca2df9e67c0983c6b83f', + 'uploader_id': 'seeso', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } + _API_DOMAIN = None + _API_PARAMS = {} + _CMS_SIGNING = {} + + def _call_api(self, path, video_id, note, data=None): + base_url = self._API_DOMAIN + '/core/' + path + encoded_query = compat_urllib_parse_urlencode({ + 'oauth_consumer_key': self._API_PARAMS['oAuthKey'], + 'oauth_nonce': ''.join([random.choice(string.ascii_letters) for _ in range(32)]), + 'oauth_signature_method': 'HMAC-SHA1', + 'oauth_timestamp': int(time.time()), + 'oauth_version': '1.0', + }) + headers = self.geo_verification_headers() + if data: + data = json.dumps(data).encode() + headers['Content-Type'] = 'application/json' + method = 'POST' if data else 'GET' + base_string = '&'.join([method, compat_urlparse.quote(base_url, ''), compat_urlparse.quote(encoded_query, '')]) + oauth_signature = base64.b64encode(hmac.new( + (self._API_PARAMS['oAuthSecret'] + '&').encode('ascii'), + base_string.encode(), hashlib.sha1).digest()).decode() + encoded_query += '&oauth_signature=' + compat_urlparse.quote(oauth_signature, '') + return self._download_json( + '?'.join([base_url, encoded_query]), video_id, + note='Downloading %s JSON metadata' % note, headers=headers, data=data) + + def _call_cms(self, path, video_id, note): + return self._download_json( + self._API_DOMAIN + path, video_id, query=self._CMS_SIGNING, + note='Downloading %s JSON metadata' % note, headers=self.geo_verification_headers()) + + def _set_api_params(self, webpage, video_id): + if not self._API_PARAMS: + self._API_PARAMS = self._parse_json(self._search_regex( + r'window\.__APP_CONFIG__\s*=\s*({.+?})</script>', + webpage, 'api config'), video_id)['cxApiParams'] + self._API_DOMAIN = self._API_PARAMS.get('apiDomain', 'https://api.vrv.co') + + def _set_cms_signing(self, video_id): + if not self._CMS_SIGNING: + self._CMS_SIGNING = self._call_api('index', video_id, 'CMS Signing')['cms_signing'] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage( + url, video_id, + headers=self.geo_verification_headers()) + media_resource = self._parse_json(self._search_regex( + r'window\.__INITIAL_STATE__\s*=\s*({.+?})</script>', + webpage, 'inital state'), video_id).get('watch', {}).get('mediaResource') or {} + + video_data = media_resource.get('json') + if not video_data: + self._set_api_params(webpage, video_id) + episode_path = self._call_api('cms_resource', video_id, 'episode resource path', data={ + 'resource_key': 'cms:/episodes/' + video_id, + })['__links__']['cms_resource']['href'] + self._set_cms_signing(video_id) + video_data = self._call_cms(episode_path, video_id, 'video') + title = video_data['title'] + + streams_json = media_resource.get('streams', {}).get('json', {}) + if not streams_json: + self._set_api_params(webpage, video_id) + streams_path = video_data['__links__']['streams']['href'] + self._set_cms_signing(video_id) + streams_json = self._call_cms(streams_path, video_id, 'streams') + + audio_locale = streams_json.get('audio_locale') + formats = [] + for stream_id, stream in streams_json.get('streams', {}).get('adaptive_hls', {}).items(): + stream_url = stream.get('url') + if not stream_url: + continue + stream_id = stream_id or audio_locale + m3u8_formats = self._extract_m3u8_formats( + stream_url, video_id, 'mp4', m3u8_id=stream_id, + note='Downloading %s m3u8 information' % stream_id, + fatal=False) + if audio_locale: + for f in m3u8_formats: + f['language'] = audio_locale + formats.extend(m3u8_formats) + self._sort_formats(formats) + + thumbnails = [] + for thumbnail in video_data.get('images', {}).get('thumbnails', []): + thumbnail_url = thumbnail.get('source') + if not thumbnail_url: + continue + thumbnails.append({ + 'url': thumbnail_url, + 'width': int_or_none(thumbnail.get('width')), + 'height': int_or_none(thumbnail.get('height')), + }) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnails': thumbnails, + 'description': video_data.get('description'), + 'duration': float_or_none(video_data.get('duration_ms'), 1000), + 'uploader_id': video_data.get('channel_id'), + 'series': video_data.get('series_title'), + 'season': video_data.get('season_title'), + 'season_number': int_or_none(video_data.get('season_number')), + 'season_id': video_data.get('season_id'), + 'episode': title, + 'episode_number': int_or_none(video_data.get('episode_number')), + 'episode_id': video_data.get('production_episode_id'), + } -- cgit 1.4.1 From 48ab554feb9c6d3e0f13e1357e04f4c89089e2d3 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 1 Apr 2017 18:09:36 +0100 Subject: [vrv] add support for series pages --- youtube_dl/extractor/extractors.py | 5 ++- youtube_dl/extractor/vrv.py | 88 +++++++++++++++++++++++++++----------- 2 files changed, 68 insertions(+), 25 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 1b427e256..980333a11 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1182,7 +1182,10 @@ from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE from .vrak import VrakIE -from .vrv import VRVIE +from .vrv import ( + VRVIE, + VRVSeriesIE, +) from .medialaan import MedialaanIE from .vube import VubeIE from .vuclip import VuClipIE diff --git a/youtube_dl/extractor/vrv.py b/youtube_dl/extractor/vrv.py index 33618c951..487047fd7 100644 --- a/youtube_dl/extractor/vrv.py +++ b/youtube_dl/extractor/vrv.py @@ -20,22 +20,7 @@ from ..utils import ( ) -class VRVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?vrv\.co/watch/(?P<id>[A-Z0-9]+)' - _TEST = { - 'url': 'https://vrv.co/watch/GR9PNZ396/Hidden-America-with-Jonah-Ray:BOSTON-WHERE-THE-PAST-IS-THE-PRESENT', - 'info_dict': { - 'id': 'GR9PNZ396', - 'ext': 'mp4', - 'title': 'BOSTON: WHERE THE PAST IS THE PRESENT', - 'description': 'md5:4ec8844ac262ca2df9e67c0983c6b83f', - 'uploader_id': 'seeso', - }, - 'params': { - # m3u8 download - 'skip_download': True, - }, - } +class VRVBaseIE(InfoExtractor): _API_DOMAIN = None _API_PARAMS = {} _CMS_SIGNING = {} @@ -64,6 +49,8 @@ class VRVIE(InfoExtractor): note='Downloading %s JSON metadata' % note, headers=headers, data=data) def _call_cms(self, path, video_id, note): + if not self._CMS_SIGNING: + self._CMS_SIGNING = self._call_api('index', video_id, 'CMS Signing')['cms_signing'] return self._download_json( self._API_DOMAIN + path, video_id, query=self._CMS_SIGNING, note='Downloading %s JSON metadata' % note, headers=self.geo_verification_headers()) @@ -75,9 +62,30 @@ class VRVIE(InfoExtractor): webpage, 'api config'), video_id)['cxApiParams'] self._API_DOMAIN = self._API_PARAMS.get('apiDomain', 'https://api.vrv.co') - def _set_cms_signing(self, video_id): - if not self._CMS_SIGNING: - self._CMS_SIGNING = self._call_api('index', video_id, 'CMS Signing')['cms_signing'] + def _get_cms_resource(self, resource_key, video_id): + return self._call_api( + 'cms_resource', video_id, 'resource path', data={ + 'resource_key': resource_key, + })['__links__']['cms_resource']['href'] + + +class VRVIE(VRVBaseIE): + IE_NAME = 'vrv' + _VALID_URL = r'https?://(?:www\.)?vrv\.co/watch/(?P<id>[A-Z0-9]+)' + _TEST = { + 'url': 'https://vrv.co/watch/GR9PNZ396/Hidden-America-with-Jonah-Ray:BOSTON-WHERE-THE-PAST-IS-THE-PRESENT', + 'info_dict': { + 'id': 'GR9PNZ396', + 'ext': 'mp4', + 'title': 'BOSTON: WHERE THE PAST IS THE PRESENT', + 'description': 'md5:4ec8844ac262ca2df9e67c0983c6b83f', + 'uploader_id': 'seeso', + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + } def _real_extract(self, url): video_id = self._match_id(url) @@ -91,10 +99,8 @@ class VRVIE(InfoExtractor): video_data = media_resource.get('json') if not video_data: self._set_api_params(webpage, video_id) - episode_path = self._call_api('cms_resource', video_id, 'episode resource path', data={ - 'resource_key': 'cms:/episodes/' + video_id, - })['__links__']['cms_resource']['href'] - self._set_cms_signing(video_id) + episode_path = self._get_cms_resource( + 'cms:/episodes/' + video_id, video_id) video_data = self._call_cms(episode_path, video_id, 'video') title = video_data['title'] @@ -102,7 +108,6 @@ class VRVIE(InfoExtractor): if not streams_json: self._set_api_params(webpage, video_id) streams_path = video_data['__links__']['streams']['href'] - self._set_cms_signing(video_id) streams_json = self._call_cms(streams_path, video_id, 'streams') audio_locale = streams_json.get('audio_locale') @@ -149,3 +154,38 @@ class VRVIE(InfoExtractor): 'episode_number': int_or_none(video_data.get('episode_number')), 'episode_id': video_data.get('production_episode_id'), } + + +class VRVSeriesIE(VRVBaseIE): + IE_NAME = 'vrv:series' + _VALID_URL = r'https?://(?:www\.)?vrv\.co/series/(?P<id>[A-Z0-9]+)' + _TEST = { + 'url': 'https://vrv.co/series/G68VXG3G6/The-Perfect-Insider', + 'info_dict': { + 'id': 'G68VXG3G6', + }, + 'playlist_mincount': 11, + } + + def _real_extract(self, url): + series_id = self._match_id(url) + webpage = self._download_webpage( + url, series_id, + headers=self.geo_verification_headers()) + + self._set_api_params(webpage, series_id) + seasons_path = self._get_cms_resource( + 'cms:/seasons?series_id=' + series_id, series_id) + seasons_data = self._call_cms(seasons_path, series_id, 'seasons') + + entries = [] + for season in seasons_data.get('items', []): + episodes_path = season['__links__']['season/episodes']['href'] + episodes = self._call_cms(episodes_path, series_id, 'episodes') + for episode in episodes.get('items', []): + episode_id = episode['id'] + entries.append(self.url_result( + 'https://vrv.co/watch/' + episode_id, + 'VRV', episode_id, episode.get('title'))) + + return self.playlist_result(entries, series_id) -- cgit 1.4.1 From 51342717cddafde83dbf39f2212be40a196a577a Mon Sep 17 00:00:00 2001 From: Timendum <timedum@gmail.com> Date: Tue, 14 Mar 2017 16:11:09 +0100 Subject: [rai] Fix extraction --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/rai.py | 355 ++++++++++++++++++++----------------- 2 files changed, 195 insertions(+), 162 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 980333a11..d9e8d53ac 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -802,7 +802,7 @@ from .radiojavan import RadioJavanIE from .radiobremen import RadioBremenIE from .radiofrance import RadioFranceIE from .rai import ( - RaiTVIE, + RaiPlayIE, RaiIE, ) from .rbmaradio import RBMARadioIE diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py index 41afbd9af..b67e94f88 100644 --- a/youtube_dl/extractor/rai.py +++ b/youtube_dl/extractor/rai.py @@ -3,8 +3,8 @@ from __future__ import unicode_literals from .common import InfoExtractor from ..compat import compat_urlparse from ..utils import ( - determine_ext, ExtractorError, + determine_ext, find_xpath_attr, fix_xml_ampersands, int_or_none, @@ -55,181 +55,157 @@ class RaiBaseIE(InfoExtractor): return formats - def _extract_from_content_id(self, content_id, base_url): - media = self._download_json( - 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % content_id, - content_id, 'Downloading video JSON') - thumbnails = [] - for image_type in ('image', 'image_medium', 'image_300'): - thumbnail_url = media.get(image_type) - if thumbnail_url: - thumbnails.append({ - 'url': compat_urlparse.urljoin(base_url, thumbnail_url), - }) +class RaiPlayIE(RaiBaseIE): + _VALID_URL = r'https?://(?:www\.)?raiplay\.it/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})\.html' + _TESTS = [{ + 'url': 'http://www.raiplay.it/video/2016/10/La-Casa-Bianca-e06118bb-59a9-4636-b914-498e4cfd2c66.html?source=twitter', + 'md5': '340aa3b7afb54bfd14a8c11786450d76', + 'info_dict': { + 'id': 'e06118bb-59a9-4636-b914-498e4cfd2c66', + 'ext': 'mp4', + 'title': 'La Casa Bianca', + 'thumbnail': r're:^https?://.*\.jpg$', + 'uploader': r're:^Rai.+', + 'description': 're:^[A-Za-z]+' + } + }, { + 'url': 'http://www.raiplay.it/video/2016/11/gazebotraindesi-efebe701-969c-4593-92f3-285f0d1ce750.html?', + 'md5': 'ed4da3d70ccf8129a33ab16b34d20ab8', + 'info_dict': { + 'id': 'efebe701-969c-4593-92f3-285f0d1ce750', + 'ext': 'mp4', + 'title': 'Gazebo - #gazebotraindesi', + 'thumbnail': r're:^https?://.*\.png$', + 'uploader': r're:^Rai.+', + 'description': r're:^[A-Za-z]+' + } + }, { + 'url': 'http://www.raiplay.it/video/2014/04/Report-del-07042014-cb27157f-9dd0-4aee-b788-b1f67643a391.html', + 'md5': '8970abf8caf8aef4696e7b1f2adfc696', + 'info_dict': { + 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391', + 'ext': 'mp4', + 'title': 'Report - Report del 07/04/2014', + 'thumbnail': r're:^https?://.*\.jpg$', + 'uploader': r're:^Rai.+', + 'description': r're:^[A-Za-z]+' + } + }] + _RESOLUTION = '600x400' - formats = [] - media_type = media['type'] - if 'Audio' in media_type: - formats.append({ - 'format_id': media.get('formatoAudio'), - 'url': media['audioUrl'], - 'ext': media.get('formatoAudio'), - }) - elif 'Video' in media_type: - formats.extend(self._extract_relinker_formats(media['mediaUri'], content_id)) - self._sort_formats(formats) - else: - raise ExtractorError('not a media file') + def _real_extract(self, url): + video_id = self._match_id(url) - subtitles = {} - captions = media.get('subtitlesUrl') - if captions: - STL_EXT = '.stl' - SRT_EXT = '.srt' - if captions.endswith(STL_EXT): - captions = captions[:-len(STL_EXT)] + SRT_EXT - subtitles['it'] = [{ - 'ext': 'srt', - 'url': captions, - }] + # remove query and fragment part from url + canonical_url = compat_urlparse.urljoin(url, compat_urlparse.urlparse(url).path) + webpage = self._download_webpage(canonical_url, video_id) - return { - 'id': content_id, - 'title': media['name'], - 'description': media.get('desc'), - 'thumbnails': thumbnails, - 'uploader': media.get('author'), - 'upload_date': unified_strdate(media.get('date')), - 'duration': parse_duration(media.get('length')), - 'formats': formats, - 'subtitles': subtitles, - } + media = self._download_json('%s?json' % canonical_url, + video_id, 'Downloading video JSON') + thumbnails = [] + if 'images' in media: + for _, value in media.get('images').items(): + if value: + thumbnails.append({ + 'url': value.replace('[RESOLUTION]', self._RESOLUTION) + }) -class RaiTVIE(RaiBaseIE): - _VALID_URL = r'https?://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/(?:[^/]+/)+(?:media|ondemand)/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html' - _TESTS = [ - { - 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-cb27157f-9dd0-4aee-b788-b1f67643a391.html', - 'md5': '8970abf8caf8aef4696e7b1f2adfc696', - 'info_dict': { - 'id': 'cb27157f-9dd0-4aee-b788-b1f67643a391', - 'ext': 'mp4', - 'title': 'Report del 07/04/2014', - 'description': 'md5:f27c544694cacb46a078db84ec35d2d9', - 'upload_date': '20140407', - 'duration': 6160, - 'thumbnail': r're:^https?://.*\.jpg$', - } - }, - { - # no m3u8 stream - 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html', - # HDS download, MD5 is unstable - 'info_dict': { - 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9', - 'ext': 'flv', - 'title': 'TG PRIMO TEMPO', - 'upload_date': '20140612', - 'duration': 1758, - 'thumbnail': r're:^https?://.*\.jpg$', - }, - 'skip': 'Geo-restricted to Italy', - }, - { - 'url': 'http://www.rainews.it/dl/rainews/media/state-of-the-net-Antonella-La-Carpia-regole-virali-7aafdea9-0e5d-49d5-88a6-7e65da67ae13.html', - 'md5': '35cf7c229f22eeef43e48b5cf923bef0', - 'info_dict': { - 'id': '7aafdea9-0e5d-49d5-88a6-7e65da67ae13', - 'ext': 'mp4', - 'title': 'State of the Net, Antonella La Carpia: regole virali', - 'description': 'md5:b0ba04a324126903e3da7763272ae63c', - 'upload_date': '20140613', - }, - 'skip': 'Error 404', - }, - { - 'url': 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-b4a49761-e0cc-4b14-8736-2729f6f73132-tg2.html', - 'info_dict': { - 'id': 'b4a49761-e0cc-4b14-8736-2729f6f73132', - 'ext': 'mp4', - 'title': 'Alluvione in Sardegna e dissesto idrogeologico', - 'description': 'Edizione delle ore 20:30 ', - }, - 'skip': 'invalid urls', - }, - { - 'url': 'http://www.ilcandidato.rai.it/dl/ray/media/Il-Candidato---Primo-episodio-Le-Primarie-28e5525a-b495-45e8-a7c3-bc48ba45d2b6.html', - 'md5': 'e57493e1cb8bc7c564663f363b171847', - 'info_dict': { - 'id': '28e5525a-b495-45e8-a7c3-bc48ba45d2b6', - 'ext': 'mp4', - 'title': 'Il Candidato - Primo episodio: "Le Primarie"', - 'description': 'md5:364b604f7db50594678f483353164fb8', - 'upload_date': '20140923', - 'duration': 386, - 'thumbnail': r're:^https?://.*\.jpg$', - } - }, - ] + if 'video' not in media: + raise ExtractorError('No video found') - def _real_extract(self, url): - video_id = self._match_id(url) + video = media.get('video') + duration = parse_duration(video.get('duration')), + formats = self._extract_relinker_formats(video.get('contentUrl'), video_id) + self._sort_formats(formats) - return self._extract_from_content_id(video_id, url) + return { + 'id': video_id, + 'title': self._og_search_title(webpage).replace(' - video - RaiPlay', ''), + 'description': self._og_search_description(webpage), + 'uploader': media.get('channel'), + 'duration': duration, + 'thumbnails': thumbnails, + 'formats': formats + } class RaiIE(RaiBaseIE): - _VALID_URL = r'https?://(?:.+?\.)?(?:rai\.it|rai\.tv|rainews\.it)/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html' - _TESTS = [ - { - 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html', - 'md5': '2dd727e61114e1ee9c47f0da6914e178', - 'info_dict': { - 'id': '59d69d28-6bb6-409d-a4b5-ed44096560af', - 'ext': 'mp4', - 'title': 'Il pacco', - 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a', - 'upload_date': '20141221', - }, + _VALID_URL = r'https?://.+\.(?:rai|rainews)\.it/dl/.+?-(?P<id>[\da-f]{8}-[\da-f]{4}-[\da-f]{4}-[\da-f]{4}-[\da-f]{12})(?:-.+?)?\.html' + _TESTS = [{ + # subdomain test case + 'url': 'http://www.raisport.rai.it/dl/raiSport/media/rassegna-stampa-04a9f4bd-b563-40cf-82a6-aad3529cb4a9.html', + 'info_dict': { + 'id': '04a9f4bd-b563-40cf-82a6-aad3529cb4a9', + 'ext': 'mp4', + 'title': 'TG PRIMO TEMPO', + 'upload_date': '20140612', + 'duration': 1758, + 'thumbnail': r're:^https?://.*\.jpg$' + } + }, { + # rainews test case + 'url': 'http://www.rainews.it/dl/rainews/media/Weekend-al-cinema-da-Hollywood-arriva-il-thriller-di-Tate-Taylor-La-ragazza-del-treno-1632c009-c843-4836-bb65-80c33084a64b.html', + 'info_dict': { + 'id': '1632c009-c843-4836-bb65-80c33084a64b', + 'ext': 'mp4', + 'title': 'Weekend al cinema, da Hollywood arriva il thriller di Tate Taylor \"La ragazza del treno\" ', + 'upload_date': '20161103', + 'thumbnail': r're:^https?://.*\.png$', + 'description': r're:^[A-Za-z]+' + } + }, { + # with media information + 'url': 'http://www.rai.it/dl/RaiTV/programmi/media/ContentItem-efb17665-691c-45d5-a60c-5301333cbb0c.html', + 'md5': '11959b4e44fa74de47011b5799490adf', + 'info_dict': { + 'id': 'efb17665-691c-45d5-a60c-5301333cbb0c', + 'ext': 'mp4', + 'title': 'TG1 ore 20:00 del 03/11/2016', + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20161103', + 'description': r're:^[A-Za-z]+' + } + }, { + # drawMediaRaiTV test case + 'url': 'http://www.report.rai.it/dl/Report/puntata/ContentItem-0c7a664b-d0f4-4b2c-8835-3f82e46f433e.html', + 'md5': '2dd727e61114e1ee9c47f0da6914e178', + 'info_dict': { + 'id': '59d69d28-6bb6-409d-a4b5-ed44096560af', + 'ext': 'mp4', + 'title': 'Il pacco', + 'description': 'md5:4b1afae1364115ce5d78ed83cd2e5b3a', + 'upload_date': '20141221', }, - { - # Direct relinker URL - 'url': 'http://www.rai.tv/dl/RaiTV/dirette/PublishingBlock-1912dbbf-3f96-44c3-b4cf-523681fbacbc.html?channel=EuroNews', - # HDS live stream, MD5 is unstable - 'info_dict': { - 'id': '1912dbbf-3f96-44c3-b4cf-523681fbacbc', - 'ext': 'flv', - 'title': 'EuroNews', - }, - 'skip': 'Geo-restricted to Italy', + }, { + # Direct relinker URL + 'url': 'http://www.rai.tv/dl/RaiTV/dirette/PublishingBlock-1912dbbf-3f96-44c3-b4cf-523681fbacbc.html?channel=EuroNews', + # HDS live stream, MD5 is unstable + 'info_dict': { + 'id': '1912dbbf-3f96-44c3-b4cf-523681fbacbc', + 'ext': 'flv', + 'title': 'EuroNews', }, - { - # Embedded content item ID - 'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined', - 'md5': '84c1135ce960e8822ae63cec34441d63', - 'info_dict': { - 'id': '0960e765-62c8-474a-ac4b-7eb3e2be39c8', - 'ext': 'mp4', - 'title': 'TG1 ore 20:00 del 02/07/2016', - 'upload_date': '20160702', - }, + }, { + # Embedded content item ID + 'url': 'http://www.tg1.rai.it/dl/tg1/2010/edizioni/ContentSet-9b6e0cba-4bef-4aef-8cf0-9f7f665b7dfb-tg1.html?item=undefined', + 'info_dict': { + 'id': 'd80d4b70-3812-4501-a888-92edec729f00', + 'ext': 'mp4', + 'title': r're:TG1 ore \d{2}:\d{2} del \d{2}/\d{2}/\d{4}', + 'upload_date': r're:\d{8}', + 'description': r're:.+', }, - { - 'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html', - # HDS live stream, MD5 is unstable - 'info_dict': { - 'id': '3156f2f2-dc70-4953-8e2f-70d7489d4ce9', - 'ext': 'flv', - 'title': 'La diretta di Rainews24', - }, + }, { + 'url': 'http://www.rainews.it/dl/rainews/live/ContentItem-3156f2f2-dc70-4953-8e2f-70d7489d4ce9.html', + # HDS live stream, MD5 is unstable + 'info_dict': { + 'id': '3156f2f2-dc70-4953-8e2f-70d7489d4ce9', + 'ext': 'mp4', + 'title': 'La diretta di Rainews24', }, - ] - - @classmethod - def suitable(cls, url): - return False if RaiTVIE.suitable(url) else super(RaiIE, cls).suitable(url) + }] def _real_extract(self, url): video_id = self._match_id(url) @@ -250,6 +226,12 @@ class RaiIE(RaiBaseIE): if content_item_id: return self._extract_from_content_id(content_item_id, url) + try: + return self._extract_from_content_id(video_id, url) + except ExtractorError: + # no media data, only direct relinker + pass + relinker_url = compat_urlparse.urljoin(url, self._search_regex( r'(?:var\s+videoURL|mediaInfo\.mediaUri)\s*=\s*(?P<q1>[\'"])(?P<url>(https?:)?//mediapolis\.rai\.it/relinker/relinkerServlet\.htm\?cont=\d+)(?P=q1)', webpage, 'relinker URL', group='url')) @@ -265,3 +247,54 @@ class RaiIE(RaiBaseIE): 'title': title, 'formats': formats, } + + def _extract_from_content_id(self, content_id, url): + media = self._download_json( + 'http://www.rai.tv/dl/RaiTV/programmi/media/ContentItem-%s.html?json' % content_id, + content_id, 'Downloading video JSON') + + thumbnails = [] + for image_type in ('image', 'image_medium', 'image_300'): + thumbnail_url = media.get(image_type) + if thumbnail_url: + thumbnails.append({ + 'url': compat_urlparse.urljoin(url, thumbnail_url), + }) + + formats = [] + media_type = media['type'] + if 'Audio' in media_type: + formats.append({ + 'format_id': media.get('formatoAudio'), + 'url': media['audioUrl'], + 'ext': media.get('formatoAudio'), + }) + elif 'Video' in media_type: + formats.extend(self._extract_relinker_formats(media['mediaUri'], content_id)) + self._sort_formats(formats) + else: + raise ExtractorError('not a media file') + + subtitles = {} + captions = media.get('subtitlesUrl') + if captions: + STL_EXT = '.stl' + SRT_EXT = '.srt' + if captions.endswith(STL_EXT): + captions = captions[:-len(STL_EXT)] + SRT_EXT + subtitles['it'] = [{ + 'ext': 'srt', + 'url': captions, + }] + + return { + 'id': content_id, + 'title': media['name'], + 'description': media.get('desc'), + 'thumbnails': thumbnails, + 'uploader': media.get('author'), + 'upload_date': unified_strdate(media.get('date')), + 'duration': parse_duration(media.get('length')), + 'formats': formats, + 'subtitles': subtitles, + } -- cgit 1.4.1 From 61e2331ad83569b3256e32f415c22cc439c000ed Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sat, 1 Apr 2017 23:47:35 +0100 Subject: [tv5mondeplus] Add new extractor(closes #11386) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tv5mondeplus.py | 78 ++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 youtube_dl/extractor/tv5mondeplus.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d9e8d53ac..46e5e3a1d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1023,6 +1023,7 @@ from .tv2 import ( ) from .tv3 import TV3IE from .tv4 import TV4IE +from .tv5mondeplus import TV5MondePlusIE from .tva import TVAIE from .tvanouvelles import ( TVANouvellesIE, diff --git a/youtube_dl/extractor/tv5mondeplus.py b/youtube_dl/extractor/tv5mondeplus.py new file mode 100644 index 000000000..8b2d7812a --- /dev/null +++ b/youtube_dl/extractor/tv5mondeplus.py @@ -0,0 +1,78 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + determine_ext, + extract_attributes, + get_element_by_class, + int_or_none, + parse_duration, + parse_iso8601, +) + + +class TV5MondePlusIE(InfoExtractor): + IE_DESC = 'TV5MONDE+' + _VALID_URL = r'https?://(?:www\.)?tv5mondeplus\.com/toutes-les-videos/[^/]+/(?P<id>[^/?#]+)' + _TEST = { + 'url': 'http://www.tv5mondeplus.com/toutes-les-videos/documentaire/tdah-mon-amour-tele-quebec-tdah-mon-amour-ep001-enfants', + 'md5': '12130fc199f020673138a83466542ec6', + 'info_dict': { + 'id': '0a774110-dc60-4037-f769-996439514f1f', + 'ext': 'mp4', + 'title': 'Tdah, mon amour - Enfants', + 'description': 'md5:b65f0cc50e46947e62e5d352e9916cc4', + 'upload_date': '20170401', + 'timestamp': 1491022860, + } + } + _GEO_BYPASS = False + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + if ">Ce programme n'est malheureusement pas disponible pour votre zone géographique.<" in webpage: + self.raise_geo_restricted(countries=['FR']) + + series = get_element_by_class('video-detail__title', webpage) + title = episode = get_element_by_class( + 'video-detail__subtitle', webpage) or series + if series and series != title: + title = '%s - %s' % (series, title) + vpl_data = extract_attributes(self._search_regex( + r'(<[^>]+class="video_player_loader"[^>]+>)', + webpage, 'video player loader')) + + video_files = self._parse_json( + vpl_data['data-broadcast'], display_id).get('files', []) + formats = [] + for video_file in video_files: + v_url = video_file.get('url') + if not v_url: + continue + video_format = video_file.get('format') or determine_ext(v_url) + if video_format == 'm3u8': + formats.extend(self._extract_m3u8_formats( + v_url, display_id, 'mp4', 'm3u8_native', + m3u8_id='hls', fatal=False)) + else: + formats.append({ + 'url': v_url, + 'format_id': video_format, + }) + self._sort_formats(formats) + + return { + 'id': vpl_data.get('data-guid') or display_id, + 'display_id': display_id, + 'title': title, + 'description': get_element_by_class('video-detail__description', webpage), + 'thumbnail': vpl_data.get('data-image'), + 'duration': int_or_none(vpl_data.get('data-duration')) or parse_duration(self._html_search_meta('duration', webpage)), + 'timestamp': parse_iso8601(self._html_search_meta('uploadDate', webpage)), + 'formats': formats, + 'episode': episode, + 'series': series, + } -- cgit 1.4.1 From a9bb61a4259972378dec5ef45d181fec150146b8 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Sun, 2 Apr 2017 09:22:09 +0100 Subject: [discoveryvr] Add new extractor(closes #12578) --- youtube_dl/extractor/discoveryvr.py | 59 +++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 60 insertions(+) create mode 100644 youtube_dl/extractor/discoveryvr.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/discoveryvr.py b/youtube_dl/extractor/discoveryvr.py new file mode 100644 index 000000000..cb63c2649 --- /dev/null +++ b/youtube_dl/extractor/discoveryvr.py @@ -0,0 +1,59 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import parse_duration + + +class DiscoveryVRIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?discoveryvr\.com/watch/(?P<id>[^/?#]+)' + _TEST = { + 'url': 'http://www.discoveryvr.com/watch/discovery-vr-an-introduction', + 'md5': '32b1929798c464a54356378b7912eca4', + 'info_dict': { + 'id': 'discovery-vr-an-introduction', + 'ext': 'mp4', + 'title': 'Discovery VR - An Introduction', + 'description': 'md5:80d418a10efb8899d9403e61d8790f06', + } + } + + def _real_extract(self, url): + display_id = self._match_id(url) + webpage = self._download_webpage(url, display_id) + + bootstrap_data = self._search_regex( + r'root\.DVR\.bootstrapData\s+=\s+"({.+?})";', + webpage, 'bootstrap data') + bootstrap_data = self._parse_json( + bootstrap_data.encode('utf-8').decode('unicode_escape'), + display_id) + videos = self._parse_json(bootstrap_data['videos'], display_id)['allVideos'] + video_data = next(video for video in videos if video.get('slug') == display_id) + + series = video_data.get('showTitle') + title = episode = video_data.get('title') or series + if series and series != title: + title = '%s - %s' % (series, title) + + formats = [] + for f, format_id in (('cdnUriM3U8', 'mobi'), ('webVideoUrlSd', 'sd'), ('webVideoUrlHd', 'hd')): + f_url = video_data.get(f) + if not f_url: + continue + formats.append({ + 'format_id': format_id, + 'url': f_url, + }) + + return { + 'id': display_id, + 'display_id': display_id, + 'title': title, + 'description': video_data.get('description'), + 'thumbnail': video_data.get('thumbnail'), + 'duration': parse_duration(video_data.get('runTime')), + 'formats': formats, + 'episode': episode, + 'series': series, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 46e5e3a1d..13ca1d2cd 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -273,6 +273,7 @@ from .discoverygo import ( DiscoveryGoPlaylistIE, ) from .discoverynetworks import DiscoveryNetworksDeIE +from .discoveryvr import DiscoveryVRIE from .disney import DisneyIE from .dispeak import DigitallySpeakingIE from .dropbox import DropboxIE -- cgit 1.4.1 From 2ab0bfcd810b521bcf4de6dd555c0a47e8a69f70 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Tue, 4 Apr 2017 03:05:18 +0700 Subject: [vshare] Add extractor (closes #12278) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vshare.py | 38 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 39 insertions(+) create mode 100644 youtube_dl/extractor/vshare.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 13ca1d2cd..7398dceef 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1188,6 +1188,7 @@ from .vrv import ( VRVIE, VRVSeriesIE, ) +from .vshare import VShareIE from .medialaan import MedialaanIE from .vube import VubeIE from .vuclip import VuClipIE diff --git a/youtube_dl/extractor/vshare.py b/youtube_dl/extractor/vshare.py new file mode 100644 index 000000000..5addbc280 --- /dev/null +++ b/youtube_dl/extractor/vshare.py @@ -0,0 +1,38 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class VShareIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?vshare\.io/[dv]/(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'https://vshare.io/d/0f64ce6', + 'md5': '16d7b8fef58846db47419199ff1ab3e7', + 'info_dict': { + 'id': '0f64ce6', + 'title': 'vl14062007715967', + 'ext': 'mp4', + } + }, { + 'url': 'https://vshare.io/v/0f64ce6/width-650/height-430/1', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage( + 'https://vshare.io/d/%s' % video_id, video_id) + + title = self._html_search_regex( + r'(?s)<div id="root-container">(.+?)<br/>', webpage, 'title') + video_url = self._search_regex( + r'<a[^>]+href=(["\'])(?P<url>(?:https?:)?//.+?)\1[^>]*>[Cc]lick\s+here', + webpage, 'video url', group='url') + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + } -- cgit 1.4.1 From b68e00b08ae60e60d9f5c2654884f3c58595075f Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Mon, 3 Apr 2017 21:35:54 +0100 Subject: [rtl2] add support for you.rtl2.de(closes #10257) --- youtube_dl/extractor/extractors.py | 6 +- youtube_dl/extractor/rtl2.py | 110 ++++++++++++++++++++++++++++++++++++- 2 files changed, 114 insertions(+), 2 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 7398dceef..2904dd4d1 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -834,7 +834,11 @@ from .rozhlas import RozhlasIE from .rtbf import RTBFIE from .rte import RteIE, RteRadioIE from .rtlnl import RtlNlIE -from .rtl2 import RTL2IE +from .rtl2 import ( + RTL2IE, + RTL2YouIE, + RTL2YouSeriesIE, +) from .rtp import RTPIE from .rts import RTSIE from .rtve import RTVEALaCartaIE, RTVELiveIE, RTVEInfantilIE, RTVELiveIE, RTVETelevisionIE diff --git a/youtube_dl/extractor/rtl2.py b/youtube_dl/extractor/rtl2.py index 721ee733c..666e90e90 100644 --- a/youtube_dl/extractor/rtl2.py +++ b/youtube_dl/extractor/rtl2.py @@ -1,13 +1,26 @@ # coding: utf-8 from __future__ import unicode_literals +import base64 import re from .common import InfoExtractor -from ..utils import int_or_none +from ..aes import aes_cbc_decrypt +from ..compat import ( + compat_ord, + compat_str, +) +from ..utils import ( + bytes_to_intlist, + ExtractorError, + intlist_to_bytes, + int_or_none, + strip_or_none, +) class RTL2IE(InfoExtractor): + IE_NAME = 'rtl2' _VALID_URL = r'http?://(?:www\.)?rtl2\.de/[^?#]*?/(?P<id>[^?#/]*?)(?:$|/(?:$|[?#]))' _TESTS = [{ 'url': 'http://www.rtl2.de/sendung/grip-das-motormagazin/folge/folge-203-0', @@ -98,3 +111,98 @@ class RTL2IE(InfoExtractor): 'duration': int_or_none(video_info.get('duration')), 'formats': formats, } + + +class RTL2YouBaseIE(InfoExtractor): + _BACKWERK_BASE_URL = 'https://p-you-backwerk.rtl2apps.de/' + + +class RTL2YouIE(RTL2YouBaseIE): + IE_NAME = 'rtl2:you' + _VALID_URL = r'http?://you\.rtl2\.de/(?:video/\d+/|youplayer/index\.html\?.*?\bvid=)(?P<id>\d+)' + _TESTS = [{ + 'url': 'http://you.rtl2.de/video/3002/15740/MJUNIK%20%E2%80%93%20Home%20of%20YOU/307-hirn-wo-bist-du', + 'info_dict': { + 'id': '15740', + 'ext': 'mp4', + 'title': 'MJUNIK – Home of YOU - #307 Hirn, wo bist du?!', + 'description': 'md5:ddaa95c61b372b12b66e115b2772fe01', + 'age_limit': 12, + }, + }, { + 'url': 'http://you.rtl2.de/youplayer/index.html?vid=15712', + 'only_matching': True, + }] + _AES_KEY = b'\xe9W\xe4.<*\xb8\x1a\xd2\xb6\x92\xf3C\xd3\xefL\x1b\x03*\xbbbH\xc0\x03\xffo\xc2\xf2(\xaa\xaa!' + _GEO_COUNTRIES = ['DE'] + + def _real_extract(self, url): + video_id = self._match_id(url) + + stream_data = self._download_json( + self._BACKWERK_BASE_URL + 'stream/video/' + video_id, video_id) + + data, iv = base64.b64decode(stream_data['streamUrl']).decode().split(':') + stream_url = intlist_to_bytes(aes_cbc_decrypt( + bytes_to_intlist(base64.b64decode(data)), + bytes_to_intlist(self._AES_KEY), + bytes_to_intlist(base64.b64decode(iv)) + )) + if b'rtl2_you_video_not_found' in stream_url: + raise ExtractorError('video not found', expected=True) + + formats = self._extract_m3u8_formats( + stream_url[:-compat_ord(stream_url[-1])].decode(), + video_id, 'mp4', 'm3u8_native') + self._sort_formats(formats) + + video_data = self._download_json( + self._BACKWERK_BASE_URL + 'video/' + video_id, video_id) + + series = video_data.get('formatTitle') + title = episode = video_data.get('title') or series + if series and series != title: + title = '%s - %s' % (series, title) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'description': strip_or_none(video_data.get('description')), + 'thumbnail': video_data.get('image'), + 'duration': int_or_none(stream_data.get('duration') or video_data.get('duration'), 1000), + 'series': series, + 'episode': episode, + 'age_limit': int_or_none(video_data.get('minimumAge')), + } + + +class RTL2YouSeriesIE(RTL2YouBaseIE): + IE_NAME = 'rtl2:you:series' + _VALID_URL = r'http?://you\.rtl2\.de/videos/(?P<id>\d+)' + _TEST = { + 'url': 'http://you.rtl2.de/videos/115/dragon-ball', + 'info_dict': { + 'id': '115', + }, + 'playlist_mincount': 5, + } + + def _real_extract(self, url): + series_id = self._match_id(url) + stream_data = self._download_json( + self._BACKWERK_BASE_URL + 'videos', + series_id, query={ + 'formatId': series_id, + 'limit': 1000000000, + }) + + entries = [] + for video in stream_data.get('videos', []): + video_id = compat_str(video['videoId']) + if not video_id: + continue + entries.append(self.url_result( + 'http://you.rtl2.de/video/%s/%s' % (series_id, video_id), + 'RTL2You', video_id)) + return self.playlist_result(entries, series_id) -- cgit 1.4.1 From 28b674ca238fbae29f2ee7bc85202fa2f115a876 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 8 Apr 2017 19:42:09 +0700 Subject: [ceskateleveize:porady] Add extractor (closes #7411, closes #12645) --- youtube_dl/extractor/ceskatelevize.py | 45 +++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 5 +++- 2 files changed, 49 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/ceskatelevize.py b/youtube_dl/extractor/ceskatelevize.py index 0daee313f..e250de18c 100644 --- a/youtube_dl/extractor/ceskatelevize.py +++ b/youtube_dl/extractor/ceskatelevize.py @@ -12,6 +12,7 @@ from ..utils import ( ExtractorError, float_or_none, sanitized_Request, + unescapeHTML, urlencode_postdata, USER_AGENTS, ) @@ -232,3 +233,47 @@ class CeskaTelevizeIE(InfoExtractor): yield line return '\r\n'.join(_fix_subtitle(subtitles)) + + +class CeskaTelevizePoradyIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?ceskatelevize\.cz/porady/(?:[^/?#&]+/)*(?P<id>[^/#?]+)' + _TESTS = [{ + # video with 18+ caution trailer + 'url': 'http://www.ceskatelevize.cz/porady/10520528904-queer/215562210900007-bogotart/', + 'info_dict': { + 'id': '215562210900007-bogotart', + 'title': 'Queer: Bogotart', + 'description': 'Alternativní průvodce současným queer světem', + }, + 'playlist': [{ + 'info_dict': { + 'id': '61924494876844842', + 'ext': 'mp4', + 'title': 'Queer: Bogotart (Varování 18+)', + 'duration': 10.2, + }, + }, { + 'info_dict': { + 'id': '61924494877068022', + 'ext': 'mp4', + 'title': 'Queer: Bogotart (Queer)', + 'thumbnail': r're:^https?://.*\.jpg', + 'duration': 1558.3, + }, + }], + 'params': { + # m3u8 download + 'skip_download': True, + }, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + data_url = unescapeHTML(self._search_regex( + r'<span[^>]*\bdata-url=(["\'])(?P<url>(?:(?!\1).)+)\1', + webpage, 'iframe player url', group='url')) + + return self.url_result(data_url, ie=CeskaTelevizeIE.ie_key()) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 2904dd4d1..72728d919 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -165,7 +165,10 @@ from .ccc import CCCIE from .ccma import CCMAIE from .cctv import CCTVIE from .cda import CDAIE -from .ceskatelevize import CeskaTelevizeIE +from .ceskatelevize import ( + CeskaTelevizeIE, + CeskaTelevizePoradyIE, +) from .channel9 import Channel9IE from .charlierose import CharlieRoseIE from .chaturbate import ChaturbateIE -- cgit 1.4.1 From 5f3e0b69efa8ae80f536fb54ace4bb4d9c667d2e Mon Sep 17 00:00:00 2001 From: Entropy <0fiscalentropy@users.noreply.github.com> Date: Fri, 7 Apr 2017 21:52:48 +0200 Subject: [TheSun] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/thesun.py | 27 +++++++++++++++++++++++++++ 2 files changed, 28 insertions(+) create mode 100644 youtube_dl/extractor/thesun.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 72728d919..01865479c 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -983,6 +983,7 @@ from .theplatform import ( from .thescene import TheSceneIE from .thesixtyone import TheSixtyOneIE from .thestar import TheStarIE +from .thesun import TheSunIE from .theweatherchannel import TheWeatherChannelIE from .thisamericanlife import ThisAmericanLifeIE from .thisav import ThisAVIE diff --git a/youtube_dl/extractor/thesun.py b/youtube_dl/extractor/thesun.py new file mode 100644 index 000000000..7f96bf8c9 --- /dev/null +++ b/youtube_dl/extractor/thesun.py @@ -0,0 +1,27 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from .ooyala import OoyalaIE + + +class TheSunIE(InfoExtractor): + _VALID_URL = r'https://(?:www\.)?thesun\.co\.uk/\w+/(?P<id>\d+)/[\w-]' + _TEST = { + 'url': 'https://www.thesun.co.uk/tvandshowbiz/2261604/orlando-bloom-and-katy-perry-post-adorable-instagram-video-together-celebrating-thanksgiving-after-split-rumours/', + 'md5': '5667123b24f25f43f4c4f381ef34c5c2', + 'info_dict': { + 'id': 'h4OXN0NzE6rv6ObkEifKcNA-gYUw4xFf', + 'ext': 'mp4', + 'title': 'Katy Perry and Orlando Bloom shut down split rumours with cute Thanksgiving video', + 'description': 'Still going strong', + 'duration': 31.28, + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + ooyala_id = self._search_regex(r'id\s*=\s*"thesun-ooyala-player-([^"]+)"', webpage, 'ooyala id') + + return OoyalaIE._build_url_result(ooyala_id) -- cgit 1.4.1 From 2bd875edfe674e72ae935bacdbf0a055572d1d1b Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 8 Apr 2017 22:38:37 +0700 Subject: [medici] Add extractor (closes #3406) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/medici.py | 70 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 71 insertions(+) create mode 100644 youtube_dl/extractor/medici.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 01865479c..68e1a5cfc 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -541,6 +541,7 @@ from .mangomolo import ( ) from .matchtv import MatchTVIE from .mdr import MDRIE +from .medici import MediciIE from .meipai import MeipaiIE from .melonvod import MelonVODIE from .meta import METAIE diff --git a/youtube_dl/extractor/medici.py b/youtube_dl/extractor/medici.py new file mode 100644 index 000000000..cd910238e --- /dev/null +++ b/youtube_dl/extractor/medici.py @@ -0,0 +1,70 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, + update_url_query, + urlencode_postdata, +) + + +class MediciIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?medici\.tv/#!/(?P<id>[^?#&]+)' + _TEST = { + 'url': 'http://www.medici.tv/#!/daniel-harding-frans-helmerson-verbier-festival-music-camp', + 'md5': '004c21bb0a57248085b6ff3fec72719d', + 'info_dict': { + 'id': '3059', + 'ext': 'flv', + 'title': 'Daniel Harding conducts the Verbier Festival Music Camp \u2013 With Frans Helmerson', + 'description': 'md5:322a1e952bafb725174fd8c1a8212f58', + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20170408', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + # Sets csrftoken cookie + self._download_webpage(url, video_id) + + MEDICI_URL = 'http://www.medici.tv/' + + data = self._download_json( + MEDICI_URL, video_id, + data=urlencode_postdata({ + 'json': 'true', + 'page': '/%s' % video_id, + 'timezone_offset': -420, + }), headers={ + 'X-CSRFToken': self._get_cookies(url)['csrftoken'].value, + 'X-Requested-With': 'XMLHttpRequest', + 'Referer': MEDICI_URL, + 'Content-Type': 'application/x-www-form-urlencoded', + }) + + video = data['video']['videos']['video1'] + + title = video.get('nom') or data['title'] + + video_id = video.get('id') or video_id + formats = self._extract_f4m_formats( + update_url_query(video['url_akamai'], { + 'hdcore': '3.1.0', + 'plugin=aasp': '3.1.0.43.124', + }), video_id, f4m_id='hds') + + description = data.get('meta_description') + thumbnail = video.get('url_thumbnail') or data.get('main_image') + upload_date = unified_strdate(data['video'].get('date')) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'formats': formats, + } -- cgit 1.4.1 From 3ef1d0c73373000bf65851c7afe9c68e707e4a97 Mon Sep 17 00:00:00 2001 From: Vukkk <Vukkk@github.com> Date: Wed, 31 Aug 2016 10:56:11 +0200 Subject: [tv2hu] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/tv2hu.py | 78 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 youtube_dl/extractor/tv2hu.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 68e1a5cfc..7484099f6 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1031,6 +1031,7 @@ from .tv2 import ( TV2IE, TV2ArticleIE, ) +from .tv2hu import TV2HUIE from .tv3 import TV3IE from .tv4 import TV4IE from .tv5mondeplus import TV5MondePlusIE diff --git a/youtube_dl/extractor/tv2hu.py b/youtube_dl/extractor/tv2hu.py new file mode 100644 index 000000000..cb1ce822a --- /dev/null +++ b/youtube_dl/extractor/tv2hu.py @@ -0,0 +1,78 @@ +# encoding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + +class TV2HUIE(InfoExtractor): + IE_NAME = 'tv2.hu' + _VALID_URL = r'https?://(?:www\.)?tv2\.hu/(?:musoraink/)?(?P<uploader>[^/]+)/(?:teljes_adasok/)?(?P<id>[0-9]+)_(.+?)\.html' + _JSON_URL = r'(?P<json_url>https?://.+?\.tv2\.hu/vod/(?P<upload_date>\d+)/id_(?P<upload_id>\d+).+?&type=json)' + + _TESTS = [{ + 'url': 'http://tv2.hu/ezek_megorultek/217679_ezek-megorultek---1.-adas-1.-resz.html', + 'info_dict': { + 'id': '217679', + 'ext': 'mp4', + 'title': 'Ezek megőrültek! - 1. adás 1. rész', + 'upload_id': '220289', + 'upload_date': '20160826', + 'uploader': 'ezek_megorultek', + 'thumbnail': 're:^https?://.*\.jpg$' + }, + 'params': { + # m3u8 download + 'skip_download': True, + } + }, { + 'url': 'http://tv2.hu/ezek_megorultek/teljes_adasok/217677_ezek-megorultek---1.-adas-2.-resz.html', + 'only_matching': True + }, { + 'url': 'http://tv2.hu/musoraink/aktiv/aktiv_teljes_adas/217963_aktiv-teljes-adas---2016.08.30..html', + 'only_matching': True + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage( + url, video_id, 'Downloading info page') + + json_url = re.search(self._JSON_URL, webpage) + + json_data = self._download_json( + json_url.group('json_url'), video_id, 'Downloading video info') + + manifest_url = json_data['bitrates']['hls'] + + formats = self._extract_m3u8_formats( + manifest_url, video_id, 'mp4', entry_protocol='m3u8_native') + + for i in range(len(json_data['bitrates']['mp4'])): + quality = json_data.get('mp4Labels')[i] + + if quality.lower() == 'auto': + continue + + formats.append({ + 'protocol': 'http', + 'url': json_data['bitrates']['mp4'][i], + 'height': int(quality[:-1]), + 'width': int(quality[:-1])/9*16, + 'ext': 'mp4', + 'format_id': quality, + 'format_note': 'HTTP', + 'preference': int(quality[:-1]) + }) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': self._og_search_title(webpage).strip(), + 'thumbnail': self._og_search_property('image', webpage), + 'uploader': self._search_regex(self._VALID_URL, url, 'uploader'), + 'upload_id': json_url.group('upload_id'), + 'upload_date': json_url.group('upload_date'), + 'formats': formats + } -- cgit 1.4.1 From e4d74e2778ca283330460d893a0923820a74df66 Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Wed, 12 Apr 2017 19:31:18 +0100 Subject: [tv2hu] improve extraction --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/tv2hu.py | 78 +++++++++++++++----------------------- 2 files changed, 32 insertions(+), 48 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 7484099f6..45591f2a7 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1031,7 +1031,7 @@ from .tv2 import ( TV2IE, TV2ArticleIE, ) -from .tv2hu import TV2HUIE +from .tv2hu import TV2HuIE from .tv3 import TV3IE from .tv4 import TV4IE from .tv5mondeplus import TV5MondePlusIE diff --git a/youtube_dl/extractor/tv2hu.py b/youtube_dl/extractor/tv2hu.py index cb1ce822a..ce01cc80d 100644 --- a/youtube_dl/extractor/tv2hu.py +++ b/youtube_dl/extractor/tv2hu.py @@ -1,29 +1,22 @@ # encoding: utf-8 from __future__ import unicode_literals -import re - from .common import InfoExtractor +from ..utils import int_or_none -class TV2HUIE(InfoExtractor): - IE_NAME = 'tv2.hu' - _VALID_URL = r'https?://(?:www\.)?tv2\.hu/(?:musoraink/)?(?P<uploader>[^/]+)/(?:teljes_adasok/)?(?P<id>[0-9]+)_(.+?)\.html' - _JSON_URL = r'(?P<json_url>https?://.+?\.tv2\.hu/vod/(?P<upload_date>\d+)/id_(?P<upload_id>\d+).+?&type=json)' +class TV2HuIE(InfoExtractor): + IE_NAME = 'tv2.hu' + _VALID_URL = r'https?://(?:www\.)?tv2\.hu/(?:[^/]+/)+(?P<id>\d+)_[^/?#]+?\.html' _TESTS = [{ 'url': 'http://tv2.hu/ezek_megorultek/217679_ezek-megorultek---1.-adas-1.-resz.html', + 'md5': '585e58e2e090f34603804bb2c48e98d8', 'info_dict': { 'id': '217679', 'ext': 'mp4', 'title': 'Ezek megőrültek! - 1. adás 1. rész', - 'upload_id': '220289', 'upload_date': '20160826', - 'uploader': 'ezek_megorultek', 'thumbnail': 're:^https?://.*\.jpg$' - }, - 'params': { - # m3u8 download - 'skip_download': True, } }, { 'url': 'http://tv2.hu/ezek_megorultek/teljes_adasok/217677_ezek-megorultek---1.-adas-2.-resz.html', @@ -35,44 +28,35 @@ class TV2HUIE(InfoExtractor): def _real_extract(self, url): video_id = self._match_id(url) - webpage = self._download_webpage( - url, video_id, 'Downloading info page') - - json_url = re.search(self._JSON_URL, webpage) - - json_data = self._download_json( - json_url.group('json_url'), video_id, 'Downloading video info') - - manifest_url = json_data['bitrates']['hls'] - - formats = self._extract_m3u8_formats( - manifest_url, video_id, 'mp4', entry_protocol='m3u8_native') - - for i in range(len(json_data['bitrates']['mp4'])): - quality = json_data.get('mp4Labels')[i] - - if quality.lower() == 'auto': - continue - - formats.append({ - 'protocol': 'http', - 'url': json_data['bitrates']['mp4'][i], - 'height': int(quality[:-1]), - 'width': int(quality[:-1])/9*16, - 'ext': 'mp4', - 'format_id': quality, - 'format_note': 'HTTP', - 'preference': int(quality[:-1]) - }) - + webpage = self._download_webpage(url, video_id) + json_url = self._search_regex( + r'jsonUrl\s*=\s*"([^"]+)"', webpage, 'json url') + json_data = self._download_json(json_url, video_id) + + formats = [] + for b in ('bitrates', 'backupBitrates'): + bitrates = json_data.get(b, {}) + m3u8_url = bitrates.get('hls') + if m3u8_url: + formats.extend(self._extract_wowza_formats( + m3u8_url, video_id, skip_protocols=['rtmp', 'rtsp'])) + + for mp4_url in bitrates.get('mp4', []): + height = int_or_none(self._search_regex( + r'\.(\d+)p\.mp4', mp4_url, 'height', default=None)) + formats.append({ + 'format_id': 'http' + ('-%d' % height if height else ''), + 'url': mp4_url, + 'height': height, + 'width': int_or_none(height / 9.0 * 16.0 if height else None), + }) self._sort_formats(formats) return { 'id': video_id, 'title': self._og_search_title(webpage).strip(), - 'thumbnail': self._og_search_property('image', webpage), - 'uploader': self._search_regex(self._VALID_URL, url, 'uploader'), - 'upload_id': json_url.group('upload_id'), - 'upload_date': json_url.group('upload_date'), - 'formats': formats + 'thumbnail': self._og_search_thumbnail(webpage), + 'upload_date': self._search_regex( + r'/vod/(\d{8})/', json_url, 'upload_date', default=None), + 'formats': formats, } -- cgit 1.4.1 From b2fff30817848c1761a53d49fbe49aabd596089e Mon Sep 17 00:00:00 2001 From: Remita Amine <remitamine@gmail.com> Date: Thu, 13 Apr 2017 12:27:28 +0100 Subject: [go90] Add new extractor(closes #10127) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/go90.py | 92 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 93 insertions(+) create mode 100644 youtube_dl/extractor/go90.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 45591f2a7..1671090f4 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -384,6 +384,7 @@ from .globo import ( GloboArticleIE, ) from .go import GoIE +from .go90 import Go90IE from .godtube import GodTubeIE from .godtv import GodTVIE from .golem import GolemIE diff --git a/youtube_dl/extractor/go90.py b/youtube_dl/extractor/go90.py new file mode 100644 index 000000000..3550eca7c --- /dev/null +++ b/youtube_dl/extractor/go90.py @@ -0,0 +1,92 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + parse_iso8601, +) + + +class Go90IE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?go90\.com/videos/(?P<id>[0-9a-zA-Z]+)' + _TEST = { + 'url': 'https://www.go90.com/videos/84BUqjLpf9D', + 'md5': 'efa7670dbbbf21a7b07b360652b24a32', + 'info_dict': { + 'id': '84BUqjLpf9D', + 'ext': 'mp4', + 'title': 'Inside The Utah Coalition Against Pornography Convention', + 'description': 'VICE\'s Karley Sciortino meets with activists who discuss the state\'s strong anti-porn stance. Then, VICE Sports explains NFL contracts.', + 'timestamp': 1491868800, + 'upload_date': '20170411', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._download_json( + 'https://www.go90.com/api/view/items/' + video_id, + video_id, headers={ + 'Content-Type': 'application/json; charset=utf-8', + }, data=b'{"client":"web","device_type":"pc"}') + title = video_data['title'] + main_video_asset = video_data['main_video_asset'] + + thumbnails = [] + formats = [] + for asset in video_data.get('assets'): + if asset.get('id') == main_video_asset: + for source in asset.get('sources', []): + source_location = source.get('location') + if not source_location: + continue + source_type = source.get('type') + if source_type == 'hls': + m3u8_formats = self._extract_m3u8_formats( + source_location, video_id, 'mp4', + 'm3u8_native', m3u8_id='hls', fatal=False) + for f in m3u8_formats: + mobj = re.search(r'/hls-(\d+)-(\d+)K', f['url']) + if mobj: + height, tbr = mobj.groups() + height = int_or_none(height) + f.update({ + 'height': f.get('height') or height, + 'width': f.get('width') or int_or_none(height / 9.0 * 16.0 if height else None), + 'tbr': f.get('tbr') or int_or_none(tbr), + }) + formats.extend(m3u8_formats) + elif source_type == 'dash': + formats.extend(self._extract_mpd_formats( + source_location, video_id, mpd_id='dash', fatal=False)) + else: + formats.append({ + 'format_id': source.get('name'), + 'url': source_location, + 'width': int_or_none(source.get('width')), + 'height': int_or_none(source.get('height')), + 'tbr': int_or_none(source.get('bitrate')), + }) + elif asset.get('type') == 'image': + asset_location = asset.get('location') + if not asset_location: + continue + thumbnails.append({ + 'url': asset_location, + 'width': int_or_none(asset.get('width')), + 'height': int_or_none(asset.get('height')), + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'thumbnails': thumbnails, + 'description': video_data.get('short_description'), + 'like_count': int_or_none(video_data.get('like_count')), + 'timestamp': parse_iso8601(video_data.get('released_at')), + } -- cgit 1.4.1 From 3266d08af29bbd6078aca172741458ddee180ab9 Mon Sep 17 00:00:00 2001 From: John Hawkinson <jhawk@mit.edu> Date: Sat, 25 Mar 2017 19:47:48 -0400 Subject: [wsj:article] Add extractor --- youtube_dl/extractor/extractors.py | 5 ++++- youtube_dl/extractor/wsj.py | 28 +++++++++++++++++++++++++--- 2 files changed, 29 insertions(+), 4 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 1671090f4..72d28a7e6 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1233,7 +1233,10 @@ from .wrzuta import ( WrzutaIE, WrzutaPlaylistIE, ) -from .wsj import WSJIE +from .wsj import ( + WSJIE, + WSJArticleIE, +) from .xbef import XBefIE from .xboxclips import XboxClipsIE from .xfileshare import XFileShareIE diff --git a/youtube_dl/extractor/wsj.py b/youtube_dl/extractor/wsj.py index deb7483ae..ec38a2ad8 100644 --- a/youtube_dl/extractor/wsj.py +++ b/youtube_dl/extractor/wsj.py @@ -10,10 +10,11 @@ from ..utils import ( class WSJIE(InfoExtractor): - _VALID_URL = r'''(?x)https?:// + _VALID_URL = r'''(?x) (?: - video-api\.wsj\.com/api-video/player/iframe\.html\?guid=| - (?:www\.)?wsj\.com/video/[^/]+/ + https?://video-api\.wsj\.com/api-video/player/iframe\.html\?guid=| + https?://(?:www\.)?wsj\.com/video/[^/]+/| + wsj: ) (?P<id>[a-zA-Z0-9-]+)''' IE_DESC = 'Wall Street Journal' @@ -87,3 +88,24 @@ class WSJIE(InfoExtractor): 'title': title, 'categories': info.get('keywords'), } + + +class WSJArticleIE(InfoExtractor): + _VALID_URL = r'(?i)https?://(?:www\.)?wsj\.com/articles/(?P<id>\w[^/]+)' + _TESTS = [{ + 'url': 'https://www.wsj.com/articles/dont-like-china-no-pandas-for-you-1490366939?', + 'info_dict': { + 'id': '4B13FA62-1D8C-45DB-8EA1-4105CB20B362', + 'ext': 'mp4', + 'upload_date': '20170221', + 'uploader_id': 'ralcaraz', + 'title': 'Bao Bao the Panda Leaves for China', + } + }] + + def _real_extract(self, url): + article_id = self._match_id(url) + webpage = self._download_webpage(url, article_id) + video_id = self._search_regex(r'data-src=["\']([A-Z0-9\-]+)', + webpage, 'video id') + return self.url_result('wsj:%s' % video_id, WSJIE.ie_key(), video_id) -- cgit 1.4.1 From 4db79fa1bc482ef5d97c8b73f0bf3683d5cc383c Mon Sep 17 00:00:00 2001 From: Marvin Ewald <marvin.e@protonmail.ch> Date: Tue, 4 Apr 2017 19:18:23 +0200 Subject: [streamango] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/streamango.py | 54 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+) create mode 100644 youtube_dl/extractor/streamango.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 72d28a7e6..a92cbefed 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -939,6 +939,7 @@ from .srmediathek import SRMediathekIE from .stanfordoc import StanfordOpenClassroomIE from .steam import SteamIE from .streamable import StreamableIE +from .streamango import StreamangoIE from .streamcloud import StreamcloudIE from .streamcz import StreamCZIE from .streetvoice import StreetVoiceIE diff --git a/youtube_dl/extractor/streamango.py b/youtube_dl/extractor/streamango.py new file mode 100644 index 000000000..a4ef06b66 --- /dev/null +++ b/youtube_dl/extractor/streamango.py @@ -0,0 +1,54 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class StreamangoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?streamango\.com/(?:f|embed)/(?P<id>.+?)/(?:.+)' + _TESTS = [{ + 'url': 'https://streamango.com/f/clapasobsptpkdfe/20170315_150006_mp4', + 'md5': 'e992787515a182f55e38fc97588d802a', + 'info_dict': { + 'id': 'clapasobsptpkdfe', + 'ext': 'mp4', + 'title': '20170315_150006.mp4', + 'url': r're:https://streamango\.com/v/d/clapasobsptpkdfe~[0-9]{10}~(?:[0-9]+\.){3}[0-9]+~.{8}/720', + } + }, { + 'url': 'https://streamango.com/embed/clapasobsptpkdfe/20170315_150006_mp4', + 'only_matching': True, + }] + + def _real_extract(self, url): + def extract_url(urltype): + return self._search_regex( + r'type\s*:\s*["\']{}["\']\s*,\s*src\s*:\s*["\'](?P<url>.+?)["\'].*'.format(urltype), + webpage, 'video URL', group='url') + + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._og_search_title(webpage) + url = 'https:' + extract_url('video/mp4') + dashurl = extract_url(r'application/dash\+xml') + + formats = [{ + 'url': url, + 'ext': 'mp4', + 'width': 1280, + 'height': 720, + 'format_id': 'mp4', + }] + + formats.extend(self._extract_mpd_formats( + dashurl, video_id, mpd_id='dash', fatal=False)) + + self._sort_formats(formats) + + return { + 'id': video_id, + 'url': url, + 'title': title, + 'formats': formats, + } -- cgit 1.4.1 From facfd79f9ad574db7bfcc90cbef01b3a7e7b1262 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Sat, 22 Apr 2017 21:20:25 +0800 Subject: [azubu] Remove extractor as the site is gone (closes #12813) --- ChangeLog | 1 + youtube_dl/extractor/azubu.py | 140 ------------------------------------- youtube_dl/extractor/extractors.py | 1 - 3 files changed, 1 insertion(+), 141 deletions(-) delete mode 100644 youtube_dl/extractor/azubu.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 1d49f8c8f..d699acb31 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version <unreleased> Extractors +- [azubu] Remove extractor (#12813) * [porn91] Fix extraction (#12814) + [generic] Support Brightcove videos in <iframe> (#12482) + [brightcove] Support URLs with bcpid instead of playerID (#12482) diff --git a/youtube_dl/extractor/azubu.py b/youtube_dl/extractor/azubu.py deleted file mode 100644 index 3ba2f00d3..000000000 --- a/youtube_dl/extractor/azubu.py +++ /dev/null @@ -1,140 +0,0 @@ -from __future__ import unicode_literals - -import json - -from .common import InfoExtractor -from ..utils import ( - ExtractorError, - float_or_none, - sanitized_Request, -) - - -class AzubuIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?azubu\.(?:tv|uol.com.br)/[^/]+#!/play/(?P<id>\d+)' - _TESTS = [ - { - 'url': 'http://www.azubu.tv/GSL#!/play/15575/2014-hot6-cup-last-big-match-ro8-day-1', - 'md5': 'a88b42fcf844f29ad6035054bd9ecaf4', - 'info_dict': { - 'id': '15575', - 'ext': 'mp4', - 'title': '2014 HOT6 CUP LAST BIG MATCH Ro8 Day 1', - 'description': 'md5:d06bdea27b8cc4388a90ad35b5c66c01', - 'thumbnail': r're:^https?://.*\.jpe?g', - 'timestamp': 1417523507.334, - 'upload_date': '20141202', - 'duration': 9988.7, - 'uploader': 'GSL', - 'uploader_id': 414310, - 'view_count': int, - }, - }, - { - 'url': 'http://www.azubu.tv/FnaticTV#!/play/9344/-fnatic-at-worlds-2014:-toyz---%22i-love-rekkles,-he-has-amazing-mechanics%22-', - 'md5': 'b72a871fe1d9f70bd7673769cdb3b925', - 'info_dict': { - 'id': '9344', - 'ext': 'mp4', - 'title': 'Fnatic at Worlds 2014: Toyz - "I love Rekkles, he has amazing mechanics"', - 'description': 'md5:4a649737b5f6c8b5c5be543e88dc62af', - 'thumbnail': r're:^https?://.*\.jpe?g', - 'timestamp': 1410530893.320, - 'upload_date': '20140912', - 'duration': 172.385, - 'uploader': 'FnaticTV', - 'uploader_id': 272749, - 'view_count': int, - }, - 'skip': 'Channel offline', - }, - ] - - def _real_extract(self, url): - video_id = self._match_id(url) - - data = self._download_json( - 'http://www.azubu.tv/api/video/%s' % video_id, video_id)['data'] - - title = data['title'].strip() - description = data.get('description') - thumbnail = data.get('thumbnail') - view_count = data.get('view_count') - user = data.get('user', {}) - uploader = user.get('username') - uploader_id = user.get('id') - - stream_params = json.loads(data['stream_params']) - - timestamp = float_or_none(stream_params.get('creationDate'), 1000) - duration = float_or_none(stream_params.get('length'), 1000) - - renditions = stream_params.get('renditions') or [] - video = stream_params.get('FLVFullLength') or stream_params.get('videoFullLength') - if video: - renditions.append(video) - - if not renditions and not user.get('channel', {}).get('is_live', True): - raise ExtractorError('%s said: channel is offline.' % self.IE_NAME, expected=True) - - formats = [{ - 'url': fmt['url'], - 'width': fmt['frameWidth'], - 'height': fmt['frameHeight'], - 'vbr': float_or_none(fmt['encodingRate'], 1000), - 'filesize': fmt['size'], - 'vcodec': fmt['videoCodec'], - 'container': fmt['videoContainer'], - } for fmt in renditions if fmt['url']] - self._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'description': description, - 'thumbnail': thumbnail, - 'timestamp': timestamp, - 'duration': duration, - 'uploader': uploader, - 'uploader_id': uploader_id, - 'view_count': view_count, - 'formats': formats, - } - - -class AzubuLiveIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?azubu\.(?:tv|uol.com.br)/(?P<id>[^/]+)$' - - _TESTS = [{ - 'url': 'http://www.azubu.tv/MarsTVMDLen', - 'only_matching': True, - }, { - 'url': 'http://azubu.uol.com.br/adolfz', - 'only_matching': True, - }] - - def _real_extract(self, url): - user = self._match_id(url) - - info = self._download_json( - 'http://api.azubu.tv/public/modules/last-video/{0}/info'.format(user), - user)['data'] - if info['type'] != 'STREAM': - raise ExtractorError('{0} is not streaming live'.format(user), expected=True) - - req = sanitized_Request( - 'https://edge-elb.api.brightcove.com/playback/v1/accounts/3361910549001/videos/ref:' + info['reference_id']) - req.add_header('Accept', 'application/json;pk=BCpkADawqM1gvI0oGWg8dxQHlgT8HkdE2LnAlWAZkOlznO39bSZX726u4JqnDsK3MDXcO01JxXK2tZtJbgQChxgaFzEVdHRjaDoxaOu8hHOO8NYhwdxw9BzvgkvLUlpbDNUuDoc4E4wxDToV') - bc_info = self._download_json(req, user) - m3u8_url = next(source['src'] for source in bc_info['sources'] if source['container'] == 'M2TS') - formats = self._extract_m3u8_formats(m3u8_url, user, ext='mp4') - self._sort_formats(formats) - - return { - 'id': info['id'], - 'title': self._live_title(info['title']), - 'uploader_id': user, - 'formats': formats, - 'is_live': True, - 'thumbnail': bc_info['poster'], - } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index a92cbefed..355a4e56f 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -87,7 +87,6 @@ from .azmedien import ( AZMedienPlaylistIE, AZMedienShowPlaylistIE, ) -from .azubu import AzubuIE, AzubuLiveIE from .baidu import BaiduVideoIE from .bambuser import BambuserIE, BambuserChannelIE from .bandcamp import BandcampIE, BandcampAlbumIE -- cgit 1.4.1 From 7986c3abcdad819b61bdf0fb7111759f9fc1fc32 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 29 Apr 2017 19:49:04 +0700 Subject: [anvato] Improve extraction (closes #12913) * Promote to regular shortcut based extractor * Add mcp to access key mapping table * Add support for embeds extraction * Add support for anvato embeds in generic extractor --- youtube_dl/extractor/anvato.py | 59 ++++++++++++++++++++++++++++++++++++-- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/generic.py | 16 +++++++++++ 3 files changed, 73 insertions(+), 3 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/anvato.py b/youtube_dl/extractor/anvato.py index 9fd91c2f6..ab8c237e9 100644 --- a/youtube_dl/extractor/anvato.py +++ b/youtube_dl/extractor/anvato.py @@ -5,6 +5,7 @@ import base64 import hashlib import json import random +import re import time from .common import InfoExtractor @@ -16,6 +17,7 @@ from ..utils import ( intlist_to_bytes, int_or_none, strip_jsonp, + unescapeHTML, ) @@ -26,6 +28,8 @@ def md5_text(s): class AnvatoIE(InfoExtractor): + _VALID_URL = r'anvato:(?P<access_key_or_mcp>[^:]+):(?P<id>\d+)' + # Copied from anvplayer.min.js _ANVACK_TABLE = { 'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ', @@ -114,6 +118,22 @@ class AnvatoIE(InfoExtractor): 'nbcu_nbcd_desktop_web_prod_93d8ead38ce2024f8f544b78306fbd15895ae5e6_secure': 'NNemUkySjxLyPTKvZRiGntBIjEyK8uqicjMakIaQ' } + _MCP_TO_ACCESS_KEY_TABLE = { + 'qa': 'anvato_mcpqa_demo_web_stage_18b55e00db5a13faa8d03ae6e41f6f5bcb15b922', + 'lin': 'anvato_mcp_lin_web_prod_4c36fbfd4d8d8ecae6488656e21ac6d1ac972749', + 'univison': 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa', + 'uni': 'anvato_mcp_univision_web_prod_37fe34850c99a3b5cdb71dab10a417dd5cdecafa', + 'dev': 'anvato_mcp_fs2go_web_prod_c7b90a93e171469cdca00a931211a2f556370d0a', + 'sps': 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336', + 'spsstg': 'anvato_mcp_sps_web_prod_54bdc90dd6ba21710e9f7074338365bba28da336', + 'anv': 'anvato_mcp_anv_web_prod_791407490f4c1ef2a4bcb21103e0cb1bcb3352b3', + 'gray': 'anvato_mcp_gray_web_prod_4c10f067c393ed8fc453d3930f8ab2b159973900', + 'hearst': 'anvato_mcp_hearst_web_prod_5356c3de0fc7c90a3727b4863ca7fec3a4524a99', + 'cbs': 'anvato_mcp_cbs_web_prod_02f26581ff80e5bda7aad28226a8d369037f2cbe', + 'telemundo': 'anvato_mcp_telemundo_web_prod_c5278d51ad46fda4b6ca3d0ea44a7846a054f582' + } + + _ANVP_RE = r'<script[^>]+\bdata-anvp\s*=\s*(["\'])(?P<anvp>(?:(?!\1).)+)\1' _AUTH_KEY = b'\x31\xc2\x42\x84\x9e\x73\xa0\xce' def __init__(self, *args, **kwargs): @@ -217,9 +237,42 @@ class AnvatoIE(InfoExtractor): 'subtitles': subtitles, } + @staticmethod + def _extract_urls(ie, webpage, video_id): + entries = [] + for mobj in re.finditer(AnvatoIE._ANVP_RE, webpage): + anvplayer_data = ie._parse_json( + mobj.group('anvp'), video_id, transform_source=unescapeHTML, + fatal=False) + if not anvplayer_data: + continue + video = anvplayer_data.get('video') + if not isinstance(video, compat_str) or not video.isdigit(): + continue + access_key = anvplayer_data.get('accessKey') + if not access_key: + mcp = anvplayer_data.get('mcp') + if mcp: + access_key = AnvatoIE._MCP_TO_ACCESS_KEY_TABLE.get( + mcp.lower()) + if not access_key: + continue + entries.append(ie.url_result( + 'anvato:%s:%s' % (access_key, video), ie=AnvatoIE.ie_key(), + video_id=video)) + return entries + def _extract_anvato_videos(self, webpage, video_id): - anvplayer_data = self._parse_json(self._html_search_regex( - r'<script[^>]+data-anvp=\'([^\']+)\'', webpage, - 'Anvato player data'), video_id) + anvplayer_data = self._parse_json( + self._html_search_regex( + self._ANVP_RE, webpage, 'Anvato player data', group='anvp'), + video_id) return self._get_anvato_videos( anvplayer_data['accessKey'], anvplayer_data['video']) + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + access_key, video_id = mobj.group('access_key_or_mcp', 'id') + if access_key not in self._ANVACK_TABLE: + access_key = self._MCP_TO_ACCESS_KEY_TABLE[access_key] + return self._get_anvato_videos(access_key, video_id) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 355a4e56f..39e5380b8 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -41,6 +41,7 @@ from .alphaporno import AlphaPornoIE from .amcnetworks import AMCNetworksIE from .animeondemand import AnimeOnDemandIE from .anitube import AnitubeIE +from .anvato import AnvatoIE from .anysex import AnySexIE from .aol import AolIE from .allocine import AllocineIE diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 67184bc5d..7f7c1ba29 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -86,6 +86,7 @@ from .openload import OpenloadIE from .videopress import VideoPressIE from .rutube import RutubeIE from .limelight import LimelightBaseIE +from .anvato import AnvatoIE class GenericIE(InfoExtractor): @@ -1677,6 +1678,15 @@ class GenericIE(InfoExtractor): }, 'playlist_mincount': 5, }, + { + 'url': 'http://kron4.com/2017/04/28/standoff-with-walnut-creek-murder-suspect-ends-with-arrest/', + 'info_dict': { + 'id': 'standoff-with-walnut-creek-murder-suspect-ends-with-arrest', + 'title': 'Standoff with Walnut Creek murder suspect ends', + 'description': 'md5:3ccc48a60fc9441eeccfc9c469ebf788', + }, + 'playlist_mincount': 4, + }, # { # # TODO: find another test # # http://schema.org/VideoObject @@ -2537,6 +2547,12 @@ class GenericIE(InfoExtractor): 'limelight:media:%s' % mobj.group('id'), {'source_url': url}), 'LimelightMedia', mobj.group('id')) + # Look for Anvato embeds + anvato_urls = AnvatoIE._extract_urls(self, webpage, video_id) + if anvato_urls: + return self.playlist_result( + anvato_urls, video_id, video_title, video_description) + # Look for AdobeTVVideo embeds mobj = re.search( r'<iframe[^>]+src=[\'"]((?:https?:)?//video\.tv\.adobe\.com/v/\d+[^"]+)[\'"]', -- cgit 1.4.1 From 1c7c76e4fb1f5d5c05cfa22483a46e7dd870ac0f Mon Sep 17 00:00:00 2001 From: Frederic Bournival <fredbournival@gmail.com> Date: Wed, 19 Apr 2017 17:43:32 -0400 Subject: [noovo] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/noovo.py | 52 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 53 insertions(+) create mode 100644 youtube_dl/extractor/noovo.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 39e5380b8..612007a46 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -663,6 +663,7 @@ from .nintendo import NintendoIE from .njpwworld import NJPWWorldIE from .nobelprize import NobelPrizeIE from .noco import NocoIE +from .noovo import NoovoIE from .normalboots import NormalbootsIE from .nosvideo import NosVideoIE from .nova import NovaIE diff --git a/youtube_dl/extractor/noovo.py b/youtube_dl/extractor/noovo.py new file mode 100644 index 000000000..9f441c2fc --- /dev/null +++ b/youtube_dl/extractor/noovo.py @@ -0,0 +1,52 @@ +# coding: utf-8 +from __future__ import unicode_literals +from .common import InfoExtractor + + +class NoovoIE(InfoExtractor): + IE_NAME = 'Noovo' + IE_DESC = 'VTele, Max, MusiquePlus' + _VALID_URL = r'https?://(?:[a-z0-9-]+\.)?noovo\.ca/videos/(?P<id>[a-z0-9-]+/[a-z0-9-]+)' + _TESTS = [{ + 'url': 'http://noovo.ca/videos/rpm-plus/chrysler-imperial', + 'md5': '2fcc04d0a8f4a853fad91233c2fdd121', + 'info_dict': { + 'id': '5386045029001', + 'description': 'Antoine présente des véhicules qu\'il aperçoit sur la rue.', + 'ext': 'mp4', + 'timestamp': 1491399228, + 'title': 'Chrysler Imperial', + 'upload_date': '20170405', + 'uploader_id': '618566855001' + } + }, { + 'url': 'http://noovo.ca/videos/l-amour-est-dans-le-pre/episode-13-8', + 'md5': '1199e96fbb93f2d42717115f72097b6b', + 'info_dict': { + 'id': '5395865725001', + 'description': 'md5:336d5ebc5436534e61d16e63ddfca327', + 'ext': 'mp4', + 'timestamp': 1492019320, + 'title': 'md5:2895fdc124639be0ef64ea0d06f5e493', + 'upload_date': '20170412', + 'uploader_id': '618566855001' + } + }, { + 'url': 'http://interventions.noovo.ca/911/video/intoxication-aux-drogues-dures/?autoplay=1', + 'only_matching': True + }] + API_URL_TEMPLATE = 'http://api.noovo.ca/api/v1/pages/single-episode/%s' + BRIGHTCOVE_URL_TEMPLATE = 'http://players.brightcove.net/618566855001/default_default/index.html?videoId=%s' + + def _real_extract(self, url): + video_id = self._match_id(url) + api_url = self.API_URL_TEMPLATE % video_id + api_content = self._download_json(api_url, video_id) + + brightcove_id = api_content.get('data').get('brightcoveId') + if not brightcove_id: + brightcove_id = api_content.get('data').get('contents')[0].get('brightcoveId') + + return self.url_result( + self.BRIGHTCOVE_URL_TEMPLATE % brightcove_id, 'BrightcoveNew', brightcove_id + ) -- cgit 1.4.1 From 9dac2cec2d7e31b65cf063164b3a99f257a86a63 Mon Sep 17 00:00:00 2001 From: slocum <piotr5151@interia.pl> Date: Sun, 9 Apr 2017 12:02:44 +0200 Subject: [zaq1] Add new extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/zaq1.py | 67 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 68 insertions(+) create mode 100644 youtube_dl/extractor/zaq1.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 612007a46..c0020dd7d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1300,5 +1300,6 @@ from .youtube import ( YoutubeWatchLaterIE, ) from .zapiks import ZapiksIE +from .zaq1 import Zaq1IE from .zdf import ZDFIE, ZDFChannelIE from .zingmp3 import ZingMp3IE diff --git a/youtube_dl/extractor/zaq1.py b/youtube_dl/extractor/zaq1.py new file mode 100644 index 000000000..33068f6b5 --- /dev/null +++ b/youtube_dl/extractor/zaq1.py @@ -0,0 +1,67 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + unified_strdate, + int_or_none +) + + +class Zaq1IE(InfoExtractor): + _VALID_URL = r'http://(?:www\.)?zaq1\.pl/video/(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'http://zaq1.pl/video/xev0e', + 'md5': '24a5eb3f052e604ae597c4d0d19b351e', + 'info_dict': { + 'id': 'xev0e', + 'title': 'DJ NA WESELE. TANIEC Z FIGURAMI.węgrów/sokołów podlaski/siedlce/mińsk mazowiecki/warszawa', + 'ext': 'mp4', + 'duration': 511, + 'uploader': 'Anonim', + 'upload_date': '20170330', + } + }, { + 'url': 'http://zaq1.pl/video/x80nc', + 'md5': '1245973520adc78139928a820959d9c5', + 'info_dict': { + 'id': 'x80nc', + 'title': 'DIY Inspiration Challenge #86 | koraliki | gwiazdka na choinkę z koralików i drutu', + 'ext': 'mp4', + 'duration': 438, + 'uploader': 'Anonim', + 'upload_date': '20170404', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex( + r'(?s)<h1>\s*<span.+class="watch-title".+title="([^"]+)">\1\s*</span>\s*</h1>', webpage, 'title') + + div = self._search_regex(r'(?s)(?P<div><div.+id=(["\'])video_player\2.+</div>)', webpage, 'video url', group='div') + video_url = self._search_regex(r'data-video-url="(http[^"]+)"', div, 'video url') + + ext = self._search_regex(r'data-file-extension="([^"]+)"', div, 'ext', None, False) + duration = int_or_none(self._search_regex(r'data-duration="([^"]+)"', div, 'duration', None, False)) + thumbnail = self._search_regex(r'data-photo-url="([^"]+)"', div, 'thumbnail', None, False) + + upload_date = unified_strdate(self._search_regex(r'<strong\s+class="watch-time-text">\s*Opublikowany\s+([0-9]{4}-[0-9]{2}-[0-9]{2})', webpage, 'upload date')) + uploader = self._search_regex(r'<div\s+id="watch7-user-header">.*Wideo dodał:\s*<a[^>]*>\s*([^<]+)\s*</a>', webpage, 'uploader') + + return { + 'id': video_id, + 'title': title, + 'formats': [{ + 'url': video_url, + 'ext': ext, + 'http_headers': {'Referer': url}, + }], + 'thumbnail': thumbnail, + 'uploader': uploader, + 'upload_date': upload_date, + 'duration': duration, + } -- cgit 1.4.1 From 4ac6dc3732492839b8c5e9a144ac8ef8fd1539aa Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Fri, 5 May 2017 20:26:51 +0800 Subject: [vice] Support Vice articles (closes #12968) --- ChangeLog | 1 + youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/vice.py | 79 +++++++++++++++++++++++++++++++------- 3 files changed, 67 insertions(+), 14 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 23f83cc56..0908e2e93 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,6 +1,7 @@ version <unreleased> Extractors ++ [vice] Support vice articles (#12968) * [vice] Fix extraction for non en_us videos (#12967) * [gdcvault] Fix extraction for some videos (#12733) + [cda] Support birthday verification (#12789) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index c0020dd7d..1d7495910 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1125,6 +1125,7 @@ from .vgtv import ( from .vh1 import VH1IE from .vice import ( ViceIE, + ViceArticleIE, ViceShowIE, ) from .viceland import VicelandIE diff --git a/youtube_dl/extractor/vice.py b/youtube_dl/extractor/vice.py index 275fc1395..b2e95734b 100644 --- a/youtube_dl/extractor/vice.py +++ b/youtube_dl/extractor/vice.py @@ -88,7 +88,7 @@ class ViceBaseIE(AdobePassIE): class ViceIE(ViceBaseIE): - _VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?P<locale>[^/]+/)(?:[^/]+/)?videos?/(?P<id>[^/?#&]+)' + _VALID_URL = r'https?://(?:.+?\.)?vice\.com/(?P<locale>[^/]+)/(?:[^/]+/)?videos?/(?P<id>[^/?#&]+)' _TESTS = [{ 'url': 'http://www.vice.com/video/cowboy-capitalists-part-1', @@ -100,19 +100,6 @@ class ViceIE(ViceBaseIE): 'duration': 725.983, }, 'add_ie': ['Ooyala'], - }, { - 'url': 'http://www.vice.com/video/how-to-hack-a-car', - 'md5': 'a7ecf64ee4fa19b916c16f4b56184ae2', - 'info_dict': { - 'id': '3jstaBeXgAs', - 'ext': 'mp4', - 'title': 'How to Hack a Car: Phreaked Out (Episode 2)', - 'description': 'md5:ee95453f7ff495db8efe14ae8bf56f30', - 'uploader_id': 'MotherboardTV', - 'uploader': 'Motherboard', - 'upload_date': '20140529', - }, - 'add_ie': ['Youtube'], }, { 'url': 'https://video.vice.com/en_us/video/the-signal-from-tolva/5816510690b70e6c5fd39a56', 'info_dict': { @@ -208,3 +195,67 @@ class ViceShowIE(InfoExtractor): description = self._html_search_meta('description', webpage, 'description') return self.playlist_result(entries, show_id, title, description) + + +class ViceArticleIE(InfoExtractor): + _VALID_URL = r'https://www.vice.com/[^/]+/article/(?P<id>[^?#]+)' + + _TESTS = [{ + 'url': 'https://www.vice.com/en_us/article/on-set-with-the-woman-making-mormon-porn-in-utah', + 'info_dict': { + 'id': '58dc0a3dee202d2a0ccfcbd8', + 'ext': 'mp4', + 'title': 'Mormon War on Porn ', + 'description': 'md5:ad396a2481e7f8afb5ed486878421090', + 'uploader': 'VICE', + 'uploader_id': '57a204088cb727dec794c693', + 'timestamp': 1489160690, + 'upload_date': '20170310', + }, + 'params': { + # AES-encrypted m3u8 + 'skip_download': True, + }, + }, { + 'url': 'http://www.vice.com/video/how-to-hack-a-car', + 'md5': 'a7ecf64ee4fa19b916c16f4b56184ae2', + 'info_dict': { + 'id': '3jstaBeXgAs', + 'ext': 'mp4', + 'title': 'How to Hack a Car: Phreaked Out (Episode 2)', + 'description': 'md5:ee95453f7ff495db8efe14ae8bf56f30', + 'uploader_id': 'MotherboardTV', + 'uploader': 'Motherboard', + 'upload_date': '20140529', + }, + 'add_ie': ['Youtube'], + }] + + def _real_extract(self, url): + display_id = self._match_id(url) + + webpage = self._download_webpage(url, display_id) + + prefetch_data = self._parse_json(self._search_regex( + r'window\.__PREFETCH_DATA\s*=\s*({.*});', + webpage, 'prefetch data'), display_id) + body = prefetch_data['body'] + youtube_url = self._html_search_regex( + r'<iframe[^>]+src="(.*youtube\.com/.*)"', body, 'YouTube URL', default=None) + if youtube_url: + return { + '_type': 'url_transparent', + 'url': youtube_url, + 'display_id': display_id, + 'ie_key': 'Youtube', + } + + video_url = self._html_search_regex( + r'data-video-url="([^"]+)"', prefetch_data['embed_code'], 'video URL') + + return { + '_type': 'url_transparent', + 'url': video_url, + 'display_id': display_id, + 'ie_key': ViceIE.ie_key(), + } -- cgit 1.4.1 From b6eb74e340fd523fb0c710b3141d6f9c696069f2 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Mon, 8 May 2017 20:08:58 +0700 Subject: [nonktube] Add extractor (closes #8647, closes #13024) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/nonktube.py | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 38 insertions(+) create mode 100644 youtube_dl/extractor/nonktube.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 1d7495910..968cca9d2 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -663,6 +663,7 @@ from .nintendo import NintendoIE from .njpwworld import NJPWWorldIE from .nobelprize import NobelPrizeIE from .noco import NocoIE +from .nonktube import NonkTubeIE from .noovo import NoovoIE from .normalboots import NormalbootsIE from .nosvideo import NosVideoIE diff --git a/youtube_dl/extractor/nonktube.py b/youtube_dl/extractor/nonktube.py new file mode 100644 index 000000000..ba6007cd1 --- /dev/null +++ b/youtube_dl/extractor/nonktube.py @@ -0,0 +1,37 @@ +from __future__ import unicode_literals + +from .nuevo import NuevoBaseIE + + +class NonkTubeIE(NuevoBaseIE): + _VALID_URL = r'https?://(?:www\.)?nonktube\.com/(?:(?:video|embed)/|media/nuevo/embed\.php\?.*?\bid=)(?P<id>\d+)' + _TESTS = [{ + 'url': 'https://www.nonktube.com/video/118636/sensual-wife-uncensored-fucked-in-hairy-pussy-and-facialized', + 'info_dict': { + 'id': '118636', + 'ext': 'mp4', + 'title': 'Sensual Wife Uncensored Fucked In Hairy Pussy And Facialized', + 'age_limit': 18, + 'duration': 1150.98, + }, + 'params': { + 'skip_download': True, + } + }, { + 'url': 'https://www.nonktube.com/embed/118636', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + nuevo_url = 'https://www.nonktube.com/media/nuevo/config.php?key=%s' % video_id + + info = self._extract_nuevo( + nuevo_url, video_id, headers={ + 'Referer': 'https://www.nonktube.com/media/nuevo/player.swf?config=%s' % nuevo_url + }) + info.update({ + 'age_limit': 18 + }) + return info -- cgit 1.4.1 From 6d1ded75021ca76f14c4fe8d3aa698704bae1dd9 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Tue, 9 May 2017 23:07:01 +0700 Subject: [francetv] Adapt to site redesign (closes #13034) --- youtube_dl/extractor/extractors.py | 4 +- youtube_dl/extractor/francetv.py | 215 ++++++++++++++----------------------- 2 files changed, 85 insertions(+), 134 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 968cca9d2..61a6f1013 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -350,9 +350,9 @@ from .foxsports import FoxSportsIE from .franceculture import FranceCultureIE from .franceinter import FranceInterIE from .francetv import ( - PluzzIE, - FranceTvInfoIE, FranceTVIE, + FranceTVEmbedIE, + FranceTVInfoIE, GenerationQuoiIE, CultureboxIE, ) diff --git a/youtube_dl/extractor/francetv.py b/youtube_dl/extractor/francetv.py index 48d43ae58..6e1a537a0 100644 --- a/youtube_dl/extractor/francetv.py +++ b/youtube_dl/extractor/francetv.py @@ -21,11 +21,13 @@ from .dailymotion import ( class FranceTVBaseInfoExtractor(InfoExtractor): - def _extract_video(self, video_id, catalogue): + def _extract_video(self, video_id, catalogue=None): info = self._download_json( - 'http://webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/?idDiffusion=%s&catalogue=%s' - % (video_id, catalogue), - video_id, 'Downloading video JSON') + 'https://sivideo.webservices.francetelevisions.fr/tools/getInfosOeuvre/v2/', + video_id, 'Downloading video JSON', query={ + 'idDiffusion': video_id, + 'catalogue': catalogue or '', + }) if info.get('status') == 'NOK': raise ExtractorError( @@ -109,27 +111,94 @@ class FranceTVBaseInfoExtractor(InfoExtractor): } -class PluzzIE(FranceTVBaseInfoExtractor): - IE_NAME = 'pluzz.francetv.fr' - _VALID_URL = r'https?://(?:m\.)?pluzz\.francetv\.fr/videos/(?P<id>.+?)\.html' +class FranceTVIE(FranceTVBaseInfoExtractor): + _VALID_URL = r'https?://(?:www\.)?france\.tv/(?:[^/]+/)+(?P<id>[^/]+)\.html' - # Can't use tests, videos expire in 7 days + _TESTS = [{ + 'url': 'https://www.france.tv/france-2/13h15-le-dimanche/140921-les-mysteres-de-jesus.html', + 'info_dict': { + 'id': '157550144', + 'ext': 'mp4', + 'title': '13h15, le dimanche... - Les mystères de Jésus', + 'description': 'md5:75efe8d4c0a8205e5904498ffe1e1a42', + 'timestamp': 1494156300, + 'upload_date': '20170507', + }, + 'params': { + # m3u8 downloads + 'skip_download': True, + }, + }, { + # france3 + 'url': 'https://www.france.tv/france-3/des-chiffres-et-des-lettres/139063-emission-du-mardi-9-mai-2017.html', + 'only_matching': True, + }, { + # france4 + 'url': 'https://www.france.tv/france-4/hero-corp/saison-1/134151-apres-le-calme.html', + 'only_matching': True, + }, { + # france5 + 'url': 'https://www.france.tv/france-5/c-a-dire/saison-10/137013-c-a-dire.html', + 'only_matching': True, + }, { + # franceo + 'url': 'https://www.france.tv/france-o/archipels/132249-mon-ancetre-l-esclave.html', + 'only_matching': True, + }, { + # france2 live + 'url': 'https://www.france.tv/france-2/direct.html', + 'only_matching': True, + }, { + 'url': 'https://www.france.tv/documentaires/histoire/136517-argentine-les-500-bebes-voles-de-la-dictature.html', + 'only_matching': True, + }, { + 'url': 'https://www.france.tv/jeux-et-divertissements/divertissements/133965-le-web-contre-attaque.html', + 'only_matching': True, + }] def _real_extract(self, url): display_id = self._match_id(url) webpage = self._download_webpage(url, display_id) - video_id = self._html_search_meta( - 'id_video', webpage, 'video id', default=None) + catalogue = None + video_id = self._search_regex( + r'data-main-video=(["\'])(?P<id>(?:(?!\1).)+)\1', + webpage, 'video id', default=None, group='id') + if not video_id: - video_id = self._search_regex( - r'data-diffusion=["\'](\d+)', webpage, 'video id') + video_id, catalogue = self._html_search_regex( + r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"', + webpage, 'video ID').split('@') + return self._extract_video(video_id, catalogue) - return self._extract_video(video_id, 'Pluzz') +class FranceTVEmbedIE(FranceTVBaseInfoExtractor): + _VALID_URL = r'https?://embed\.francetv\.fr/*\?.*?\bue=(?P<id>[^&]+)' -class FranceTvInfoIE(FranceTVBaseInfoExtractor): + _TEST = { + 'url': 'http://embed.francetv.fr/?ue=7fd581a2ccf59d2fc5719c5c13cf6961', + 'info_dict': { + 'id': 'NI_983319', + 'ext': 'mp4', + 'title': 'Le Pen Reims', + 'upload_date': '20170505', + 'timestamp': 1493981780, + 'duration': 16, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + video = self._download_json( + 'http://api-embed.webservices.francetelevisions.fr/key/%s' % video_id, + video_id) + + return self._extract_video(video['video_id'], video.get('catalog')) + + +class FranceTVInfoIE(FranceTVBaseInfoExtractor): IE_NAME = 'francetvinfo.fr' _VALID_URL = r'https?://(?:www|mobile|france3-regions)\.francetvinfo\.fr/(?:[^/]+/)*(?P<title>[^/?#&.]+)' @@ -233,124 +302,6 @@ class FranceTvInfoIE(FranceTVBaseInfoExtractor): return self._extract_video(video_id, catalogue) -class FranceTVIE(FranceTVBaseInfoExtractor): - IE_NAME = 'francetv' - IE_DESC = 'France 2, 3, 4, 5 and Ô' - _VALID_URL = r'''(?x) - https?:// - (?: - (?:www\.)?france[2345o]\.fr/ - (?: - emissions/[^/]+/(?:videos|diffusions)| - emission/[^/]+| - videos| - jt - ) - /| - embed\.francetv\.fr/\?ue= - ) - (?P<id>[^/?]+) - ''' - - _TESTS = [ - # france2 - { - 'url': 'http://www.france2.fr/emissions/13h15-le-samedi-le-dimanche/videos/75540104', - 'md5': 'c03fc87cb85429ffd55df32b9fc05523', - 'info_dict': { - 'id': '109169362', - 'ext': 'flv', - 'title': '13h15, le dimanche...', - 'description': 'md5:9a0932bb465f22d377a449be9d1a0ff7', - 'upload_date': '20140914', - 'timestamp': 1410693600, - }, - }, - # france3 - { - 'url': 'http://www.france3.fr/emissions/pieces-a-conviction/diffusions/13-11-2013_145575', - 'md5': '679bb8f8921f8623bd658fa2f8364da0', - 'info_dict': { - 'id': '000702326_CAPP_PicesconvictionExtrait313022013_120220131722_Au', - 'ext': 'mp4', - 'title': 'Le scandale du prix des médicaments', - 'description': 'md5:1384089fbee2f04fc6c9de025ee2e9ce', - 'upload_date': '20131113', - 'timestamp': 1384380000, - }, - }, - # france4 - { - 'url': 'http://www.france4.fr/emissions/hero-corp/videos/rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4', - 'md5': 'a182bf8d2c43d88d46ec48fbdd260c1c', - 'info_dict': { - 'id': 'rhozet_herocorp_bonus_1_20131106_1923_06112013172108_F4', - 'ext': 'mp4', - 'title': 'Hero Corp Making of - Extrait 1', - 'description': 'md5:c87d54871b1790679aec1197e73d650a', - 'upload_date': '20131106', - 'timestamp': 1383766500, - }, - }, - # france5 - { - 'url': 'http://www.france5.fr/emissions/c-a-dire/videos/quels_sont_les_enjeux_de_cette_rentree_politique__31-08-2015_908948?onglet=tous&page=1', - 'md5': 'f6c577df3806e26471b3d21631241fd0', - 'info_dict': { - 'id': '123327454', - 'ext': 'flv', - 'title': 'C à dire ?! - Quels sont les enjeux de cette rentrée politique ?', - 'description': 'md5:4a0d5cb5dce89d353522a84462bae5a4', - 'upload_date': '20150831', - 'timestamp': 1441035120, - }, - }, - # franceo - { - 'url': 'http://www.franceo.fr/jt/info-soir/18-07-2015', - 'md5': '47d5816d3b24351cdce512ad7ab31da8', - 'info_dict': { - 'id': '125377621', - 'ext': 'flv', - 'title': 'Infô soir', - 'description': 'md5:01b8c6915a3d93d8bbbd692651714309', - 'upload_date': '20150718', - 'timestamp': 1437241200, - 'duration': 414, - }, - }, - { - # francetv embed - 'url': 'http://embed.francetv.fr/?ue=8d7d3da1e3047c42ade5a5d7dfd3fc87', - 'info_dict': { - 'id': 'EV_30231', - 'ext': 'flv', - 'title': 'Alcaline, le concert avec Calogero', - 'description': 'md5:61f08036dcc8f47e9cfc33aed08ffaff', - 'upload_date': '20150226', - 'timestamp': 1424989860, - 'duration': 5400, - }, - }, - { - 'url': 'http://www.france4.fr/emission/highlander/diffusion-du-17-07-2015-04h05', - 'only_matching': True, - }, - { - 'url': 'http://www.franceo.fr/videos/125377617', - 'only_matching': True, - } - ] - - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - video_id, catalogue = self._html_search_regex( - r'(?:href=|player\.setVideo\(\s*)"http://videos?\.francetv\.fr/video/([^@]+@[^"]+)"', - webpage, 'video ID').split('@') - return self._extract_video(video_id, catalogue) - - class GenerationQuoiIE(InfoExtractor): IE_NAME = 'france2.fr:generation-quoi' _VALID_URL = r'https?://generation-quoi\.france2\.fr/portrait/(?P<id>[^/?#]+)' -- cgit 1.4.1 From 7274f3d0e9e7d76d00fa3d438df1e1dc43143435 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Fri, 12 May 2017 18:44:10 +0800 Subject: [douyu] Support Douyu shows (closes #12228) --- ChangeLog | 6 +++ youtube_dl/extractor/douyutv.py | 82 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 5 ++- 3 files changed, 92 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 4775f8695..43431ac93 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version <unreleased> + +Extractor ++ [douyu] Support Douyu shows (#12228) + + version 2017.05.09 Core diff --git a/youtube_dl/extractor/douyutv.py b/youtube_dl/extractor/douyutv.py index d22133d24..9757f4422 100644 --- a/youtube_dl/extractor/douyutv.py +++ b/youtube_dl/extractor/douyutv.py @@ -3,11 +3,14 @@ from __future__ import unicode_literals import time import hashlib +import re from .common import InfoExtractor from ..utils import ( ExtractorError, unescapeHTML, + unified_strdate, + urljoin, ) @@ -117,3 +120,82 @@ class DouyuTVIE(InfoExtractor): 'uploader': uploader, 'is_live': True, } + + +class DouyuShowIE(InfoExtractor): + _VALID_URL = r'https?://v(?:mobile)?\.douyu\.com/show/(?P<id>[0-9a-zA-Z]+)' + + _TESTS = [{ + 'url': 'https://v.douyu.com/show/rjNBdvnVXNzvE2yw', + 'md5': '0c2cfd068ee2afe657801269b2d86214', + 'info_dict': { + 'id': 'rjNBdvnVXNzvE2yw', + 'ext': 'mp4', + 'title': '陈一发儿:砒霜 我有个室友系列!04-01 22点场', + 'duration': 7150.08, + 'thumbnail': r're:^https?://.*\.jpg$', + 'uploader': '陈一发儿', + 'uploader_id': 'XrZwYelr5wbK', + 'uploader_url': 'https://v.douyu.com/author/XrZwYelr5wbK', + 'upload_date': '20170402', + }, + }, { + 'url': 'https://vmobile.douyu.com/show/rjNBdvnVXNzvE2yw', + 'only_matching': True, + }] + + def _real_extract(self, url): + url = url.replace('vmobile.', 'v.') + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + room_info = self._parse_json(self._search_regex( + r'var\s+\$ROOM\s*=\s*({.+});', webpage, 'room info'), video_id) + + video_info = None + + for trial in range(5): + # Sometimes Douyu rejects our request. Let's try it more times + try: + video_info = self._download_json( + 'https://vmobile.douyu.com/video/getInfo', video_id, + query={'vid': video_id}, + headers={ + 'Referer': url, + 'x-requested-with': 'XMLHttpRequest', + }) + break + except ExtractorError: + self._sleep(1, video_id) + + if not video_info: + raise ExtractorError('Can\'t fetch video info') + + formats = self._extract_m3u8_formats( + video_info['data']['video_url'], video_id, + entry_protocol='m3u8_native', ext='mp4') + + upload_date = unified_strdate(self._html_search_regex( + r'<em>上传时间:</em><span>([^<]+)</span>', webpage, + 'upload date', fatal=False)) + + uploader = uploader_id = uploader_url = None + mobj = re.search( + r'(?m)<a[^>]+href="/author/([0-9a-zA-Z]+)".+?<strong[^>]+title="([^"]+)"', + webpage) + if mobj: + uploader_id, uploader = mobj.groups() + uploader_url = urljoin(url, '/author/' + uploader_id) + + return { + 'id': video_id, + 'title': room_info['name'], + 'formats': formats, + 'duration': room_info.get('duration'), + 'thumbnail': room_info.get('pic'), + 'upload_date': upload_date, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'uploader_url': uploader_url, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 61a6f1013..510c35fcb 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -251,7 +251,10 @@ from .democracynow import DemocracynowIE from .dfb import DFBIE from .dhm import DHMIE from .dotsub import DotsubIE -from .douyutv import DouyuTVIE +from .douyutv import ( + DouyuShowIE, + DouyuTVIE, +) from .dplay import ( DPlayIE, DPlayItIE, -- cgit 1.4.1 From 7fc60f4ee9129c67aecacd6f9164e98dbf7601f3 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 13 May 2017 21:52:59 +0700 Subject: [upskill] Add extractor (closes #13043) --- youtube_dl/extractor/extractors.py | 4 + youtube_dl/extractor/upskill.py | 176 +++++++++++++++++++++++++++++++++++++ 2 files changed, 180 insertions(+) create mode 100644 youtube_dl/extractor/upskill.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 510c35fcb..811519925 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1102,6 +1102,10 @@ from .uplynk import ( UplynkIE, UplynkPreplayIE, ) +from .upskill import ( + UpskillIE, + UpskillCourseIE, +) from .urort import UrortIE from .urplay import URPlayIE from .usanetwork import USANetworkIE diff --git a/youtube_dl/extractor/upskill.py b/youtube_dl/extractor/upskill.py new file mode 100644 index 000000000..30297b4dd --- /dev/null +++ b/youtube_dl/extractor/upskill.py @@ -0,0 +1,176 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .wistia import WistiaIE +from ..compat import compat_str +from ..utils import ( + clean_html, + ExtractorError, + get_element_by_class, + urlencode_postdata, + urljoin, +) + + +class UpskillBaseIE(InfoExtractor): + _LOGIN_URL = 'http://upskillcourses.com/sign_in' + _NETRC_MACHINE = 'upskill' + + def _real_initialize(self): + self._login() + + def _login(self): + username, password = self._get_login_info() + if username is None: + return + + login_page, urlh = self._download_webpage_handle( + self._LOGIN_URL, None, 'Downloading login page') + + login_url = compat_str(urlh.geturl()) + + login_form = self._hidden_inputs(login_page) + + login_form.update({ + 'user[email]': username, + 'user[password]': password, + }) + + post_url = self._search_regex( + r'<form[^>]+action=(["\'])(?P<url>(?:(?!\1).)+)\1', login_page, + 'post url', default=login_url, group='url') + + if not post_url.startswith('http'): + post_url = urljoin(login_url, post_url) + + response = self._download_webpage( + post_url, None, 'Logging in', + data=urlencode_postdata(login_form), + headers={ + 'Content-Type': 'application/x-www-form-urlencoded', + 'Referer': login_url, + }) + + # Successful login + if any(re.search(p, response) for p in ( + r'class=["\']user-signout', + r'<a[^>]+\bhref=["\']/sign_out', + r'>\s*Log out\s*<')): + return + + message = get_element_by_class('alert', response) + if message is not None: + raise ExtractorError( + 'Unable to login: %s' % clean_html(message), expected=True) + + raise ExtractorError('Unable to log in') + + +class UpskillIE(UpskillBaseIE): + _VALID_URL = r'https?://(?:www\.)?upskillcourses\.com/courses/[^/]+/lectures/(?P<id>\d+)' + + _TESTS = [{ + 'url': 'http://upskillcourses.com/courses/essential-web-developer-course/lectures/1747100', + 'info_dict': { + 'id': 'uzw6zw58or', + 'ext': 'mp4', + 'title': 'Welcome to the Course!', + 'description': 'md5:8d66c13403783370af62ca97a7357bdd', + 'duration': 138.763, + 'timestamp': 1479846621, + 'upload_date': '20161122', + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'http://upskillcourses.com/courses/119763/lectures/1747100', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + wistia_url = WistiaIE._extract_url(webpage) + if not wistia_url: + if any(re.search(p, webpage) for p in ( + r'class=["\']lecture-contents-locked', + r'>\s*Lecture contents locked', + r'id=["\']lecture-locked')): + self.raise_login_required('Lecture contents locked') + + title = self._og_search_title(webpage, default=None) + + return { + '_type': 'url_transparent', + 'url': wistia_url, + 'ie_key': WistiaIE.ie_key(), + 'title': title, + } + + +class UpskillCourseIE(UpskillBaseIE): + _VALID_URL = r'https?://(?:www\.)?upskillcourses\.com/courses/(?:enrolled/)?(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'http://upskillcourses.com/courses/essential-web-developer-course/', + 'info_dict': { + 'id': '119763', + 'title': 'The Essential Web Developer Course (Free)', + }, + 'playlist_count': 192, + }, { + 'url': 'http://upskillcourses.com/courses/119763/', + 'only_matching': True, + }, { + 'url': 'http://upskillcourses.com/courses/enrolled/119763', + 'only_matching': True, + }] + + @classmethod + def suitable(cls, url): + return False if UpskillIE.suitable(url) else super( + UpskillCourseIE, cls).suitable(url) + + def _real_extract(self, url): + course_id = self._match_id(url) + + webpage = self._download_webpage(url, course_id) + + course_id = self._search_regex( + r'data-course-id=["\'](\d+)', webpage, 'course id', + default=course_id) + + entries = [] + + for mobj in re.finditer( + r'(?s)(?P<li><li[^>]+class=(["\'])(?:(?!\2).)*?section-item[^>]+>.+?</li>)', + webpage): + li = mobj.group('li') + if 'fa-youtube-play' not in li: + continue + lecture_url = self._search_regex( + r'<a[^>]+href=(["\'])(?P<url>(?:(?!\1).)+)\1', li, + 'lecture url', default=None, group='url') + if not lecture_url: + continue + lecture_id = self._search_regex( + r'/lectures/(\d+)', lecture_url, 'lecture id', default=None) + title = self._html_search_regex( + r'<span[^>]+class=["\']lecture-name[^>]+>([^<]+)', li, + 'title', default=None) + entries.append( + self.url_result( + urljoin('http://upskillcourses.com/', lecture_url), + ie=UpskillIE.ie_key(), video_id=lecture_id, + video_title=clean_html(title))) + + course_title = self._html_search_regex( + (r'(?s)<img[^>]+class=["\']course-image[^>]+>\s*<h\d>(.+?)</h', + r'(?s)<h\d[^>]+class=["\']course-title[^>]+>(.+?)</h'), + webpage, 'course title', fatal=False) + + return self.playlist_result(entries, course_id, course_title) -- cgit 1.4.1 From efe9316703efa5dc1561dc1c5159a77c838f5ab0 Mon Sep 17 00:00:00 2001 From: phaer <phaer@phaer.org> Date: Sun, 8 Jan 2017 15:49:54 +0100 Subject: [orf:radio] Fix extraction Since oe1.orf.at has been updated, both ORF radios supported by youtube_dl use the same API. This commit honors this fact by merging both extractors into one. --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/orf.py | 113 +++++++++++++++++-------------------- 2 files changed, 53 insertions(+), 62 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 811519925..dc4f9f24b 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -736,8 +736,8 @@ from .openload import OpenloadIE from .ora import OraTVIE from .orf import ( ORFTVthekIE, - ORFOE1IE, ORFFM4IE, + ORFOE1IE, ORFIPTVIE, ) from .packtpub import ( diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py index 1e2c54e68..10e4a2e1c 100644 --- a/youtube_dl/extractor/orf.py +++ b/youtube_dl/extractor/orf.py @@ -2,8 +2,6 @@ from __future__ import unicode_literals import re -import calendar -import datetime from .common import InfoExtractor from ..compat import compat_str @@ -144,77 +142,25 @@ class ORFTVthekIE(InfoExtractor): } -class ORFOE1IE(InfoExtractor): - IE_NAME = 'orf:oe1' - IE_DESC = 'Radio Österreich 1' - _VALID_URL = r'https?://oe1\.orf\.at/(?:programm/|konsole\?.*?\btrack_id=)(?P<id>[0-9]+)' - - # Audios on ORF radio are only available for 7 days, so we can't add tests. - _TESTS = [{ - 'url': 'http://oe1.orf.at/konsole?show=on_demand#?track_id=394211', - 'only_matching': True, - }, { - 'url': 'http://oe1.orf.at/konsole?show=ondemand&track_id=443608&load_day=/programm/konsole/tag/20160726', - 'only_matching': True, - }] - - def _real_extract(self, url): - show_id = self._match_id(url) - data = self._download_json( - 'http://oe1.orf.at/programm/%s/konsole' % show_id, - show_id - ) - - timestamp = datetime.datetime.strptime('%s %s' % ( - data['item']['day_label'], - data['item']['time'] - ), '%d.%m.%Y %H:%M') - unix_timestamp = calendar.timegm(timestamp.utctimetuple()) - - return { - 'id': show_id, - 'title': data['item']['title'], - 'url': data['item']['url_stream'], - 'ext': 'mp3', - 'description': data['item'].get('info'), - 'timestamp': unix_timestamp - } - - -class ORFFM4IE(InfoExtractor): - IE_NAME = 'orf:fm4' - IE_DESC = 'radio FM4' - _VALID_URL = r'https?://fm4\.orf\.at/(?:7tage/?#|player/)(?P<date>[0-9]+)/(?P<show>\w+)' - - _TEST = { - 'url': 'http://fm4.orf.at/player/20160110/IS/', - 'md5': '01e736e8f1cef7e13246e880a59ad298', - 'info_dict': { - 'id': '2016-01-10_2100_tl_54_7DaysSun13_11244', - 'ext': 'mp3', - 'title': 'Im Sumpf', - 'description': 'md5:384c543f866c4e422a55f66a62d669cd', - 'duration': 7173, - 'timestamp': 1452456073, - 'upload_date': '20160110', - }, - 'skip': 'Live streams on FM4 got deleted soon', - } - +class ORFRadioIE(InfoExtractor): def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) + station = mobj.group('station') show_date = mobj.group('date') show_id = mobj.group('show') + if station == 'fm4': + show_id = '4%s' % show_id + data = self._download_json( - 'http://audioapi.orf.at/fm4/json/2.0/broadcasts/%s/4%s' % (show_date, show_id), + 'http://audioapi.orf.at/%s/api/json/current/broadcast/%s/%s' % (station, show_id, show_date), show_id ) def extract_entry_dict(info, title, subtitle): return { 'id': info['loopStreamId'].replace('.mp3', ''), - 'url': 'http://loopstream01.apa.at/?channel=fm4&id=%s' % info['loopStreamId'], + 'url': 'http://loopstream01.apa.at/?channel=%s&id=%s' % (station, info['loopStreamId']), 'title': title, 'description': subtitle, 'duration': (info['end'] - info['start']) / 1000, @@ -233,6 +179,51 @@ class ORFFM4IE(InfoExtractor): } +class ORFFM4IE(ORFRadioIE): + IE_NAME = 'orf:fm4' + IE_DESC = 'radio FM4' + _VALID_URL = r'https?://(?P<station>fm4)\.orf\.at/(?:7tage/?#|player/)(?P<date>[0-9]+)/(?P<show>\w+)' + + _TESTS = [ + { + 'url': 'http://fm4.orf.at/player/20170107/CC', + 'md5': '2b0be47375432a7ef104453432a19212', + 'info_dict': { + 'id': '2017-01-07_2100_tl_54_7DaysSat18_31295', + 'ext': 'mp3', + 'title': 'Solid Steel Radioshow', + 'description': 'Die Mixshow von Coldcut und Ninja Tune.', + 'duration': 3599, + 'timestamp': 1483819257, + 'upload_date': '20170107', + }, + 'skip': 'Shows from ORF radios are only available for 7 days.' + } + ] + + +class ORFOE1IE(ORFRadioIE): + IE_NAME = 'orf:oe1' + IE_DESC = 'Radio Österreich 1' + _VALID_URL = r'https?://(?P<station>oe1)\.orf\.at/(?:7tage/?#|player/)(?P<date>[0-9]+)/(?P<show>\w+)' + + _TESTS = [ + { + 'url': 'http://oe1.orf.at/player/20170108/456544', + 'md5': '34d8a6e67ea888293741c86a099b745b', + 'info_dict': { + 'id': '2017-01-08_0759_tl_51_7DaysSun6_256141', + 'ext': 'mp3', + 'title': 'Morgenjournal', + 'duration': 609, + 'timestamp': 1483858796, + 'upload_date': '20170108', + }, + 'skip': 'Shows from ORF radios are only available for 7 days.' + } + ] + + class ORFIPTVIE(InfoExtractor): IE_NAME = 'orf:iptv' IE_DESC = 'iptv.ORF.at' -- cgit 1.4.1 From 0de136341a9b6384098e14e4a0a1a5be438abdf2 Mon Sep 17 00:00:00 2001 From: Timendum <timedum@gmail.com> Date: Tue, 2 May 2017 16:17:15 +0200 Subject: [mediaset] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/mediaset.py | 118 +++++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+) create mode 100644 youtube_dl/extractor/mediaset.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index dc4f9f24b..ed603eb29 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -545,6 +545,7 @@ from .mangomolo import ( ) from .matchtv import MatchTVIE from .mdr import MDRIE +from .mediaset import MediasetIE from .medici import MediciIE from .meipai import MeipaiIE from .melonvod import MelonVODIE diff --git a/youtube_dl/extractor/mediaset.py b/youtube_dl/extractor/mediaset.py new file mode 100644 index 000000000..4b1489524 --- /dev/null +++ b/youtube_dl/extractor/mediaset.py @@ -0,0 +1,118 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import ( + InfoExtractor, + ExtractorError +) +from ..utils import ( + determine_ext, + parse_duration, + unified_strdate +) + + +class MediasetIE(InfoExtractor): + _VALID_URL = r'https?://www\.video\.mediaset\.it/(?:(?:video|on-demand)/(?:.+)_|player/playerIFrame(?:Twitter)?\.shtml\?id=)(?P<id>[0-9]+)(?:.html|&.+)' + _TESTS = [{ + # full episode + 'url': 'http://www.video.mediaset.it/video/hello_goodbye/full/quarta-puntata_661824.html', + 'md5': '9b75534d42c44ecef7bf1ffeacb7f85d', + 'info_dict': { + 'id': '661824', + 'ext': 'mp4', + 'title': 'Quarta puntata', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'md5:7183696d6df570e3412a5ef74b27c5e2', + 'uploader': 'mediaset' + } + }, { + # on demand + 'url': 'http://www.video.mediaset.it/video/domenica_live/interviste/il-fenomeno-elettra-lamborghini_716283.html', + 'md5': '81c57566bf2ee02e995f5342f079ca25', + 'info_dict': { + 'id': '716283', + 'ext': 'mp4', + 'title': 'Il fenomeno Elettra Lamborghini', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'md5:dabf0e7cf48fc6d0a3417b989028748a', + 'uploader': 'mediaset' + } + }, { + # clip + 'url': 'http://www.video.mediaset.it/video/gogglebox/clip/un-grande-classico-della-commedia-sexy_661680.html', + 'md5': '189ca72fe399db80dbfa595a4abf42d0', + 'info_dict': { + 'id': '661680', + 'ext': 'mp4', + 'title': 'Un grande classico della commedia sexy', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'Un film che riesce a risvegliare i sensi di Gigi.', + 'uploader': 'mediaset' + } + }, { + # iframe simple + 'url': 'http://www.video.mediaset.it/player/playerIFrame.shtml?id=665924&autoplay=true', + 'md5': '308430901e55e1ad83dddb4be2a4454a', + 'info_dict': { + 'id': '665924', + 'ext': 'mp4', + 'title': 'Gianna Nannini incontra i fan a Milano', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'La cantante parla del nuovo libro', + 'uploader': 'mediaset' + } + }, { + # iframe twitter (from http://www.wittytv.it/se-prima-mi-fidavo-zero/) + 'url': 'https://www.video.mediaset.it/player/playerIFrameTwitter.shtml?id=665104&playrelated=false&autoplay=false&related=true&hidesocial=true', + 'md5': '6f53a834b3b5eac1ebc2037ccf7194d0', + 'info_dict': { + 'id': '665104', + 'ext': 'mp4', + 'title': '\"Se prima mi fidavo zero...\"', + 'thumbnail': 're:^https?://.*\.jpg$', + 'description': 'Una piccola anteprima della prossima puntata del Trono Classico', + 'uploader': 'mediaset' + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + formats = [] + uploader = None + categories = None + + mediainfo = self._download_json( + 'http://plr.video.mediaset.it/html/metainfo.sjson?id=%s' % video_id, + video_id, 'Downloading video info JSON').get('video') + + if 'brand-info' in mediainfo: + uploader = mediainfo.get('brand-info').get('publisher') + categories = [mediainfo.get('brand-info').get('category')] + + cnd = self._download_json( + 'http://cdnsel01.mediaset.net/GetCdn.aspx?streamid=%s&format=json' % video_id, + video_id, 'Downloading video CND JSON') + + if not cnd.get('videoList'): + raise ExtractorError('Video not found') + + for media_url in cnd.get('videoList'): + formats.append({ + 'url': media_url, + 'ext': determine_ext(media_url) + }) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': mediainfo.get('title'), + 'formats': formats, + 'description': mediainfo.get('short-description'), + 'uploader': uploader, + 'thumbnail': mediainfo.get('thumbnail'), + 'duration': parse_duration(mediainfo.get('duration')), + 'release_date': unified_strdate(mediainfo.get('production-date')), + 'webpage_url': mediainfo.get('url'), + 'categories': categories + } -- cgit 1.4.1 From c130f0a37b5c31fa559373ec319f79830c591c22 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan <yan12125@gmail.com> Date: Fri, 26 May 2017 23:04:42 +0800 Subject: [tudou] Merge into youku extractor (fixes #12214) Also, there are no tudou playlists anymore. All playlist URLs points to youku playlists. --- ChangeLog | 1 + youtube_dl/extractor/extractors.py | 5 -- youtube_dl/extractor/tudou.py | 132 ------------------------------------- youtube_dl/extractor/youku.py | 16 ++++- 4 files changed, 15 insertions(+), 139 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 2ac471f8e..936ba512c 100644 --- a/ChangeLog +++ b/ChangeLog @@ -5,6 +5,7 @@ Core * [postprocessor/ffmpeg] Fix metadata filename handling on Python 2 (#13182) Extractors +* [tudou] Merge into youku extractor (#12214) * [youku:show] Fix extraction * [youku] Fix extraction (#13191) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index ed603eb29..558c910d4 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1019,11 +1019,6 @@ from .trilulilu import TriluliluIE from .trutv import TruTVIE from .tube8 import Tube8IE from .tubitv import TubiTvIE -from .tudou import ( - TudouIE, - TudouPlaylistIE, - TudouAlbumIE, -) from .tumblr import TumblrIE from .tunein import ( TuneInClipIE, diff --git a/youtube_dl/extractor/tudou.py b/youtube_dl/extractor/tudou.py index 2aae55e7e..7421378a8 100644 --- a/youtube_dl/extractor/tudou.py +++ b/youtube_dl/extractor/tudou.py @@ -3,138 +3,6 @@ from __future__ import unicode_literals from .common import InfoExtractor -from ..compat import compat_str -from ..utils import ( - ExtractorError, - int_or_none, - InAdvancePagedList, - float_or_none, - unescapeHTML, -) - - -class TudouIE(InfoExtractor): - IE_NAME = 'tudou' - _VALID_URL = r'https?://(?:www\.)?tudou\.com/(?:(?:programs|wlplay)/view|(?:listplay|albumplay)/[\w-]{11})/(?P<id>[\w-]{11})' - _TESTS = [{ - 'url': 'http://www.tudou.com/listplay/zzdE77v6Mmo/2xN2duXMxmw.html', - 'md5': '140a49ed444bd22f93330985d8475fcb', - 'info_dict': { - 'id': '159448201', - 'ext': 'f4v', - 'title': '卡马乔国足开大脚长传冲吊集锦', - 'thumbnail': r're:^https?://.*\.jpg$', - 'timestamp': 1372113489000, - 'description': '卡马乔卡家军,开大脚先进战术不完全集锦!', - 'duration': 289.04, - 'view_count': int, - 'filesize': int, - } - }, { - 'url': 'http://www.tudou.com/programs/view/ajX3gyhL0pc/', - 'info_dict': { - 'id': '117049447', - 'ext': 'f4v', - 'title': 'La Sylphide-Bolshoi-Ekaterina Krysanova & Vyacheslav Lopatin 2012', - 'thumbnail': r're:^https?://.*\.jpg$', - 'timestamp': 1349207518000, - 'description': 'md5:294612423894260f2dcd5c6c04fe248b', - 'duration': 5478.33, - 'view_count': int, - 'filesize': int, - } - }] - - _PLAYER_URL = 'http://js.tudouui.com/bin/lingtong/PortalPlayer_177.swf' - - # Translated from tudou/tools/TVCHelper.as in PortalPlayer_193.swf - # 0001, 0002 and 4001 are not included as they indicate temporary issues - TVC_ERRORS = { - '0003': 'The video is deleted or does not exist', - '1001': 'This video is unavailable due to licensing issues', - '1002': 'This video is unavailable as it\'s under review', - '1003': 'This video is unavailable as it\'s under review', - '3001': 'Password required', - '5001': 'This video is available in Mainland China only due to licensing issues', - '7001': 'This video is unavailable', - '8001': 'This video is unavailable due to licensing issues', - } - - def _url_for_id(self, video_id, quality=None): - info_url = 'http://v2.tudou.com/f?id=' + compat_str(video_id) - if quality: - info_url += '&hd' + quality - xml_data = self._download_xml(info_url, video_id, 'Opening the info XML page') - error = xml_data.attrib.get('error') - if error is not None: - raise ExtractorError('Tudou said: %s' % error, expected=True) - final_url = xml_data.text - return final_url - - def _real_extract(self, url): - video_id = self._match_id(url) - item_data = self._download_json( - 'http://www.tudou.com/tvp/getItemInfo.action?ic=%s' % video_id, video_id) - - youku_vcode = item_data.get('vcode') - if youku_vcode: - return self.url_result('youku:' + youku_vcode, ie='Youku') - - if not item_data.get('itemSegs'): - tvc_code = item_data.get('tvcCode') - if tvc_code: - err_msg = self.TVC_ERRORS.get(tvc_code) - if err_msg: - raise ExtractorError('Tudou said: %s' % err_msg, expected=True) - raise ExtractorError('Unexpected error %s returned from Tudou' % tvc_code) - raise ExtractorError('Unxpected error returned from Tudou') - - title = unescapeHTML(item_data['kw']) - description = item_data.get('desc') - thumbnail_url = item_data.get('pic') - view_count = int_or_none(item_data.get('playTimes')) - timestamp = int_or_none(item_data.get('pt')) - - segments = self._parse_json(item_data['itemSegs'], video_id) - # It looks like the keys are the arguments that have to be passed as - # the hd field in the request url, we pick the higher - # Also, filter non-number qualities (see issue #3643). - quality = sorted(filter(lambda k: k.isdigit(), segments.keys()), - key=lambda k: int(k))[-1] - parts = segments[quality] - len_parts = len(parts) - if len_parts > 1: - self.to_screen('%s: found %s parts' % (video_id, len_parts)) - - def part_func(partnum): - part = parts[partnum] - part_id = part['k'] - final_url = self._url_for_id(part_id, quality) - ext = (final_url.split('?')[0]).split('.')[-1] - return [{ - 'id': '%s' % part_id, - 'url': final_url, - 'ext': ext, - 'title': title, - 'thumbnail': thumbnail_url, - 'description': description, - 'view_count': view_count, - 'timestamp': timestamp, - 'duration': float_or_none(part.get('seconds'), 1000), - 'filesize': int_or_none(part.get('size')), - 'http_headers': { - 'Referer': self._PLAYER_URL, - }, - }] - - entries = InAdvancePagedList(part_func, len_parts, 1) - - return { - '_type': 'multi_video', - 'entries': entries, - 'id': video_id, - 'title': title, - } class TudouPlaylistIE(InfoExtractor): diff --git a/youtube_dl/extractor/youku.py b/youtube_dl/extractor/youku.py index e081cfc19..ab97466d2 100644 --- a/youtube_dl/extractor/youku.py +++ b/youtube_dl/extractor/youku.py @@ -22,7 +22,9 @@ class YoukuIE(InfoExtractor): IE_DESC = '优酷' _VALID_URL = r'''(?x) (?: - http://(?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)| + https?://( + (?:v|player)\.youku\.com/(?:v_show/id_|player\.php/sid/)| + video\.tudou\.com/v/)| youku:) (?P<id>[A-Za-z0-9]+)(?:\.html|/v\.swf|) ''' @@ -71,6 +73,16 @@ class YoukuIE(InfoExtractor): 'ext': 'mp4', 'title': '我的世界☆明月庄主☆车震猎杀☆杀人艺术Minecraft', }, + }, { + 'url': 'http://video.tudou.com/v/XMjIyNzAzMTQ4NA==.html?f=46177805', + 'info_dict': { + 'id': 'XMjIyNzAzMTQ4NA', + 'ext': 'mp4', + 'title': '卡马乔国足开大脚长传冲吊集锦', + }, + }, { + 'url': 'http://video.tudou.com/v/XMjE4ODI3OTg2MA==.html', + 'only_matching': True, }] @staticmethod @@ -107,7 +119,7 @@ class YoukuIE(InfoExtractor): # request basic data basic_data_params = { 'vid': video_id, - 'ccode': '0401', + 'ccode': '0402' if 'tudou.com' in url else '0401', 'client_ip': '192.168.1.1', 'utid': cna, 'client_ts': time.time() / 1000, -- cgit 1.4.1 From 1e0d65f0bded292440d55ca37449038c8b5d9344 Mon Sep 17 00:00:00 2001 From: Mike Fährmann <mike_faehrmann@web.de> Date: Sun, 14 May 2017 10:04:42 +0200 Subject: [beam:vod] Add extractor --- youtube_dl/extractor/beampro.py | 121 ++++++++++++++++++++++++++++++++++--- youtube_dl/extractor/extractors.py | 5 +- 2 files changed, 116 insertions(+), 10 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/beampro.py b/youtube_dl/extractor/beampro.py index f3a9e3278..79c5fe5cd 100644 --- a/youtube_dl/extractor/beampro.py +++ b/youtube_dl/extractor/beampro.py @@ -6,16 +6,30 @@ from ..utils import ( ExtractorError, clean_html, compat_str, + float_or_none, int_or_none, parse_iso8601, try_get, + urljoin, ) -class BeamProLiveIE(InfoExtractor): +class BeamProBaseIE(InfoExtractor): + _RATINGS = {'family': 0, 'teen': 13, '18+': 18} + + def _extract_channel_info(self, chan): + user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id']) + return { + 'uploader': chan.get('token') or try_get( + chan, lambda x: x['user']['username'], compat_str), + 'uploader_id': compat_str(user_id) if user_id else None, + 'age_limit': self._RATINGS.get(chan.get('audience')), + } + + +class BeamProLiveIE(BeamProBaseIE): IE_NAME = 'Beam:live' _VALID_URL = r'https?://(?:\w+\.)?beam\.pro/(?P<id>[^/?#&]+)' - _RATINGS = {'family': 0, 'teen': 13, '18+': 18} _TEST = { 'url': 'http://www.beam.pro/niterhayven', 'info_dict': { @@ -38,6 +52,10 @@ class BeamProLiveIE(InfoExtractor): }, } + @classmethod + def suitable(cls, url): + return False if BeamProVodIE.suitable(url) else super(BeamProLiveIE, cls).suitable(url) + def _real_extract(self, url): channel_name = self._match_id(url) @@ -55,19 +73,104 @@ class BeamProLiveIE(InfoExtractor): channel_name, ext='mp4', m3u8_id='hls', fatal=False) self._sort_formats(formats) - user_id = chan.get('userId') or try_get(chan, lambda x: x['user']['id']) - - return { + info = { 'id': compat_str(chan.get('id') or channel_name), 'title': self._live_title(chan.get('name') or channel_name), 'description': clean_html(chan.get('description')), 'thumbnail': try_get(chan, lambda x: x['thumbnail']['url'], compat_str), 'timestamp': parse_iso8601(chan.get('updatedAt')), - 'uploader': chan.get('token') or try_get( - chan, lambda x: x['user']['username'], compat_str), - 'uploader_id': compat_str(user_id) if user_id else None, - 'age_limit': self._RATINGS.get(chan.get('audience')), 'is_live': True, 'view_count': int_or_none(chan.get('viewersTotal')), 'formats': formats, } + info.update(self._extract_channel_info(chan)) + + return info + + +class BeamProVodIE(BeamProBaseIE): + IE_NAME = 'Beam:vod' + _VALID_URL = r'https?://(?:\w+\.)?beam\.pro/[^/?#&]+.*[?&]vod=(?P<id>\d+)' + _TEST = { + 'url': 'https://beam.pro/willow8714?vod=2259830', + 'md5': 'b2431e6e8347dc92ebafb565d368b76b', + 'info_dict': { + 'id': '2259830', + 'ext': 'mp4', + 'title': 'willow8714\'s Channel', + 'duration': 6828.15, + 'thumbnail': r're:https://.*source\.png$', + 'timestamp': 1494046474, + 'upload_date': '20170506', + 'uploader': 'willow8714', + 'uploader_id': '6085379', + 'age_limit': 13, + 'view_count': int, + }, + } + + def _extract_format(self, vod, vod_type): + if not vod.get('baseUrl'): + return [] + + if vod_type == 'hls': + filename, protocol = 'manifest.m3u8', 'm3u8' + elif vod_type == 'raw': + filename, protocol = 'source.mp4', 'https' + else: + return [] + + data = vod.get('data') or {} + + format_id = [vod_type] + if 'Height' in data: + format_id.append('%sp' % data['Height']) + + return [{ + 'url': urljoin(vod['baseUrl'], filename), + 'format_id': '-'.join(format_id), + 'ext': 'mp4', + 'protocol': protocol, + 'width': int_or_none(data.get('Width')), + 'height': int_or_none(data.get('Height')), + 'fps': int_or_none(data.get('Fps')), + 'tbr': int_or_none(data.get('Bitrate'), 1000), + }] + + def _real_extract(self, url): + vod_id = self._match_id(url) + + vod_info = self._download_json( + 'https://beam.pro/api/v1/recordings/%s' % vod_id, vod_id) + + state = vod_info.get('state') + if state != 'AVAILABLE': + raise ExtractorError( + 'VOD %s is not available (state: %s)' % (vod_id, state), expected=True) + + formats = [] + thumbnail_url = None + + for vod in vod_info['vods']: + vod_type = vod.get('format') + if vod_type in ('hls', 'raw'): + formats.extend(self._extract_format(vod, vod_type)) + elif vod_type == 'thumbnail': + thumbnail_url = urljoin(vod.get('baseUrl'), 'source.png') + + self._sort_formats(formats) + + info = { + 'id': vod_id, + 'title': vod_info.get('name') or vod_id, + 'duration': float_or_none(vod_info.get('duration')), + 'thumbnail': thumbnail_url, + 'timestamp': parse_iso8601(vod_info.get('createdAt')), + 'view_count': int_or_none(vod_info.get('viewsTotal')), + 'formats': formats, + } + + chan = vod_info.get('channel') or {} + info.update(self._extract_channel_info(chan)) + + return info diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 558c910d4..aecd08ac1 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -98,7 +98,10 @@ from .bbc import ( BBCCoUkPlaylistIE, BBCIE, ) -from .beampro import BeamProLiveIE +from .beampro import ( + BeamProLiveIE, + BeamProVodIE, +) from .beeg import BeegIE from .behindkink import BehindKinkIE from .bellmedia import BellMediaIE -- cgit 1.4.1 From cd750b731c41cd9e84c7b16f7c22b6a34da18452 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 3 Jun 2017 22:07:47 +0700 Subject: [godtv] Remove extractor (closes #13175) --- youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/godtv.py | 66 -------------------------------------- 2 files changed, 67 deletions(-) delete mode 100644 youtube_dl/extractor/godtv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index aecd08ac1..31e790155 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -392,7 +392,6 @@ from .globo import ( from .go import GoIE from .go90 import Go90IE from .godtube import GodTubeIE -from .godtv import GodTVIE from .golem import GolemIE from .googledrive import GoogleDriveIE from .googleplus import GooglePlusIE diff --git a/youtube_dl/extractor/godtv.py b/youtube_dl/extractor/godtv.py deleted file mode 100644 index c5d3b4e6a..000000000 --- a/youtube_dl/extractor/godtv.py +++ /dev/null @@ -1,66 +0,0 @@ -from __future__ import unicode_literals - -from .common import InfoExtractor -from .ooyala import OoyalaIE -from ..utils import js_to_json - - -class GodTVIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?god\.tv(?:/[^/]+)*/(?P<id>[^/?#&]+)' - _TESTS = [{ - 'url': 'http://god.tv/jesus-image/video/jesus-conference-2016/randy-needham', - 'info_dict': { - 'id': 'lpd3g2MzE6D1g8zFAKz8AGpxWcpu6o_3', - 'ext': 'mp4', - 'title': 'Randy Needham', - 'duration': 3615.08, - }, - 'params': { - 'skip_download': True, - } - }, { - 'url': 'http://god.tv/playlist/bible-study', - 'info_dict': { - 'id': 'bible-study', - }, - 'playlist_mincount': 37, - }, { - 'url': 'http://god.tv/node/15097', - 'only_matching': True, - }, { - 'url': 'http://god.tv/live/africa', - 'only_matching': True, - }, { - 'url': 'http://god.tv/liveevents', - 'only_matching': True, - }] - - def _real_extract(self, url): - display_id = self._match_id(url) - - webpage = self._download_webpage(url, display_id) - - settings = self._parse_json( - self._search_regex( - r'jQuery\.extend\(Drupal\.settings\s*,\s*({.+?})\);', - webpage, 'settings', default='{}'), - display_id, transform_source=js_to_json, fatal=False) - - ooyala_id = None - - if settings: - playlist = settings.get('playlist') - if playlist and isinstance(playlist, list): - entries = [ - OoyalaIE._build_url_result(video['content_id']) - for video in playlist if video.get('content_id')] - if entries: - return self.playlist_result(entries, display_id) - ooyala_id = settings.get('ooyala', {}).get('content_id') - - if not ooyala_id: - ooyala_id = self._search_regex( - r'["\']content_id["\']\s*:\s*(["\'])(?P<id>[\w-]+)\1', - webpage, 'ooyala id', group='id') - - return OoyalaIE._build_url_result(ooyala_id) -- cgit 1.4.1 From 62bafabc099de4ededf9e136e3c11e878749d066 Mon Sep 17 00:00:00 2001 From: Adam Voss <vossad01@gmail.com> Date: Sun, 4 Jun 2017 08:47:05 -0500 Subject: [bandcamp:weekly] Add extractor --- youtube_dl/extractor/bandcamp.py | 67 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 2 +- 2 files changed, 68 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/bandcamp.py b/youtube_dl/extractor/bandcamp.py index 489d0ba53..54fa8634c 100644 --- a/youtube_dl/extractor/bandcamp.py +++ b/youtube_dl/extractor/bandcamp.py @@ -17,6 +17,7 @@ from ..utils import ( parse_filesize, unescapeHTML, update_url_query, + unified_strdate, ) @@ -222,6 +223,10 @@ class BandcampAlbumIE(InfoExtractor): 'playlist_count': 2, }] + @classmethod + def suitable(cls, url): + return False if BandcampWeeklyIE.suitable(url) else super(BandcampAlbumIE, cls).suitable(url) + def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) uploader_id = mobj.group('subdomain') @@ -250,3 +255,65 @@ class BandcampAlbumIE(InfoExtractor): 'title': title, 'entries': entries, } + + +class BandcampWeeklyIE(InfoExtractor): + IE_NAME = 'Bandcamp:bandcamp_weekly' + _VALID_URL = r'https?://(?:www\.)?bandcamp\.com/?\?(?:.*&)?show=(?P<id>\d+)(?:$|[&#])' + _TESTS = [{ + 'url': 'https://bandcamp.com/?show=224', + 'md5': 'b00df799c733cf7e0c567ed187dea0fd', + 'info_dict': { + 'id': '224', + 'ext': 'opus', + 'title': 'BC Weekly April 4th 2017: Magic Moments', + 'description': 'Stones Throw\'s Vex Ruffin, plus up and coming singer Salami Rose Joe Louis, in conversation about their fantastic DIY albums.', + } + }, { + 'url': 'https://bandcamp.com/?blah/blah@&show=228', + 'only_matching': True + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + blob = self._parse_json( + self._search_regex( + r'data-blob=(["\'])(?P<blob>{.+?})\1', webpage, + 'blob', group='blob'), + video_id, transform_source=unescapeHTML) + + show = blob['bcw_show'] + + # This is desired because any invalid show id redirects to `bandcamp.com` + # which happens to expose the latest Bandcamp Weekly episode. + video_id = compat_str(show['show_id']) + + def to_format_dictionaries(audio_stream): + dictionaries = [{'format_id': kvp[0], 'url': kvp[1]} for kvp in audio_stream.items()] + known_extensions = ['mp3', 'opus'] + + for dictionary in dictionaries: + for ext in known_extensions: + if ext in dictionary['format_id']: + dictionary['ext'] = ext + break + + return dictionaries + + formats = to_format_dictionaries(show['audio_stream']) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': show['audio_title'] + ': ' + show['subtitle'], + 'description': show.get('desc'), + 'duration': float_or_none(show.get('audio_duration')), + 'webpage_url': 'https://bandcamp.com/?show=' + video_id, + 'is_live': False, + 'release_date': unified_strdate(show.get('published_date')), + 'series': 'Bandcamp Weekly', + 'episode_id': compat_str(video_id), + 'formats': formats + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 31e790155..e1907314d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -90,7 +90,7 @@ from .azmedien import ( ) from .baidu import BaiduVideoIE from .bambuser import BambuserIE, BambuserChannelIE -from .bandcamp import BandcampIE, BandcampAlbumIE +from .bandcamp import BandcampIE, BandcampAlbumIE, BandcampWeeklyIE from .bbc import ( BBCCoUkIE, BBCCoUkArticleIE, -- cgit 1.4.1 From 70e7967202efa0acb6202ceedf87049ba72b94e0 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 11 Jun 2017 20:50:33 +0700 Subject: [newgrounds:playlist] Add extractor (closes #10611) --- youtube_dl/extractor/extractors.py | 5 +++- youtube_dl/extractor/newgrounds.py | 56 +++++++++++++++++++++++++++++++++++++- 2 files changed, 59 insertions(+), 2 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e1907314d..7e45232dd 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -636,7 +636,10 @@ from .neteasemusic import ( NetEaseMusicProgramIE, NetEaseMusicDjRadioIE, ) -from .newgrounds import NewgroundsIE +from .newgrounds import ( + NewgroundsIE, + NewgroundsPlaylistIE, +) from .newstube import NewstubeIE from .nextmedia import ( NextMediaIE, diff --git a/youtube_dl/extractor/newgrounds.py b/youtube_dl/extractor/newgrounds.py index ae4fe95ef..dc183caec 100644 --- a/youtube_dl/extractor/newgrounds.py +++ b/youtube_dl/extractor/newgrounds.py @@ -1,7 +1,12 @@ from __future__ import unicode_literals +import re + from .common import InfoExtractor -from ..utils import int_or_none +from ..utils import ( + extract_attributes, + int_or_none, +) class NewgroundsIE(InfoExtractor): @@ -82,3 +87,52 @@ class NewgroundsIE(InfoExtractor): 'uploader': uploader, 'formats': formats, } + + +class NewgroundsPlaylistIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?newgrounds\.com/(?:collection|[^/]+/search/[^/]+)/(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'https://www.newgrounds.com/collection/cats', + 'info_dict': { + 'id': 'cats', + 'title': 'Cats', + }, + 'playlist_mincount': 46, + }, { + 'url': 'http://www.newgrounds.com/portal/search/author/ZONE-SAMA', + 'info_dict': { + 'id': 'ZONE-SAMA', + 'title': 'Portal Search: ZONE-SAMA', + }, + 'playlist_mincount': 47, + }, { + 'url': 'http://www.newgrounds.com/audio/search/title/cats', + 'only_matching': True, + }] + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + title = self._search_regex( + r'<title>([^>]+)', webpage, 'title', default=None) + + # cut left menu + webpage = self._search_regex( + r'(?s)]+\bclass=["\']column wide(.+)', + webpage, 'wide column', default=webpage) + + entries = [] + for a, path, media_id in re.findall( + r'(]+\bhref=["\']/?((?:portal/view|audio/listen)/(\d+))[^>]+>)', + webpage): + a_class = extract_attributes(a).get('class') + if a_class not in ('item-portalsubmission', 'item-audiosubmission'): + continue + entries.append( + self.url_result( + 'https://www.newgrounds.com/%s' % path, + ie=NewgroundsIE.ie_key(), video_id=media_id)) + + return self.playlist_result(entries, playlist_id, title) -- cgit 1.4.1 From 560050669b1e98bc76c797256c6e21719541b2ac Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sun, 18 Jun 2017 20:18:51 +0700 Subject: [asiancrush] Add extractor (closes #13420) --- youtube_dl/extractor/asiancrush.py | 93 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 4 ++ 2 files changed, 97 insertions(+) create mode 100644 youtube_dl/extractor/asiancrush.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/asiancrush.py b/youtube_dl/extractor/asiancrush.py new file mode 100644 index 000000000..594c88c9c --- /dev/null +++ b/youtube_dl/extractor/asiancrush.py @@ -0,0 +1,93 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from .kaltura import KalturaIE +from ..utils import ( + extract_attributes, + remove_end, + urlencode_postdata, +) + + +class AsianCrushIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?asiancrush\.com/video/(?:[^/]+/)?0+(?P\d+)v\b' + _TESTS = [{ + 'url': 'https://www.asiancrush.com/video/012869v/women-who-flirt/', + 'md5': 'c3b740e48d0ba002a42c0b72857beae6', + 'info_dict': { + 'id': '1_y4tmjm5r', + 'ext': 'mp4', + 'title': 'Women Who Flirt', + 'description': 'md5:3db14e9186197857e7063522cb89a805', + 'timestamp': 1496936429, + 'upload_date': '20170608', + 'uploader_id': 'craig@crifkin.com', + }, + }, { + 'url': 'https://www.asiancrush.com/video/she-was-pretty/011886v-pretty-episode-3/', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + data = self._download_json( + 'https://www.asiancrush.com/wp-admin/admin-ajax.php', video_id, + data=urlencode_postdata({ + 'postid': video_id, + 'action': 'get_channel_kaltura_vars', + })) + + entry_id = data['entry_id'] + + return self.url_result( + 'kaltura:%s:%s' % (data['partner_id'], entry_id), + ie=KalturaIE.ie_key(), video_id=entry_id, + video_title=data.get('vid_label')) + + +class AsianCrushPlaylistIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?asiancrush\.com/series/0+(?P\d+)s\b' + _TEST = { + 'url': 'https://www.asiancrush.com/series/012481s/scholar-walks-night/', + 'info_dict': { + 'id': '12481', + 'title': 'Scholar Who Walks the Night', + 'description': 'md5:7addd7c5132a09fd4741152d96cce886', + }, + 'playlist_count': 20, + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + + webpage = self._download_webpage(url, playlist_id) + + entries = [] + + for mobj in re.finditer( + r']+href=(["\'])(?P%s.*?)\1[^>]*>' % AsianCrushIE._VALID_URL, + webpage): + attrs = extract_attributes(mobj.group(0)) + if attrs.get('class') == 'clearfix': + entries.append(self.url_result( + mobj.group('url'), ie=AsianCrushIE.ie_key())) + + title = remove_end( + self._html_search_regex( + r'(?s)]\bid=["\']movieTitle[^>]+>(.+?)', webpage, + 'title', default=None) or self._og_search_title( + webpage, default=None) or self._html_search_meta( + 'twitter:title', webpage, 'title', + default=None) or self._search_regex( + r'([^<]+)', webpage, 'title', fatal=False), + ' | AsianCrush') + + description = self._og_search_description( + webpage, default=None) or self._html_search_meta( + 'twitter:description', webpage, 'description', fatal=False) + + return self.playlist_result(entries, playlist_id, title, description) diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 7e45232dd..edbb4bdde 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -71,6 +71,10 @@ from .arte import ( TheOperaPlatformIE, ArteTVPlaylistIE, ) +from .asiancrush import ( + AsianCrushIE, + AsianCrushPlaylistIE, +) from .atresplayer import AtresPlayerIE from .atttechchannel import ATTTechChannelIE from .atvat import ATVAtIE -- cgit 1.4.1 From a1de83e5f01cc220fa45caee80b9159cc555609d Mon Sep 17 00:00:00 2001 From: Orn Date: Thu, 15 Jun 2017 22:29:27 +0000 Subject: [ruv] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/ruv.py | 31 +++++++++++++++++++++++++++++++ 2 files changed, 32 insertions(+) create mode 100644 youtube_dl/extractor/ruv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index edbb4bdde..e97691daa 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -875,6 +875,7 @@ from .rutube import ( ) from .rutv import RUTVIE from .ruutu import RuutuIE +from .ruv import RuvIE from .sandia import SandiaIE from .safari import ( SafariIE, diff --git a/youtube_dl/extractor/ruv.py b/youtube_dl/extractor/ruv.py new file mode 100644 index 000000000..518fb5027 --- /dev/null +++ b/youtube_dl/extractor/ruv.py @@ -0,0 +1,31 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class RuvIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?ruv\.is/sarpurinn/ruv/\w+/(?P[0-9]+)' + _TEST = { + 'url': 'http://ruv.is/sarpurinn/ruv/frettir/20170614', + 'md5': 'a07ea1ebaba64082d90323b1c96f264b', + 'info_dict': { + 'id': '20170614', + 'ext': 'mp4', + 'title': 'Fréttir', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._og_search_title(webpage) + video_url = self._html_search_regex(r'video\.src\s*=\s*["\'](.+?)["\']', webpage, 'video URL') + + return { + 'id': video_id, + 'title': title, + 'url': video_url, + 'ext': 'mp4' + } -- cgit 1.4.1 From 449c66577640a0c3f0b383204a1e7284429a61c3 Mon Sep 17 00:00:00 2001 From: james Date: Sat, 17 Jun 2017 17:15:41 +0200 Subject: [raiplay:live] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/rai.py | 19 ++++++++++++++++++- 2 files changed, 19 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e97691daa..a263c88b3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -824,6 +824,7 @@ from .radiobremen import RadioBremenIE from .radiofrance import RadioFranceIE from .rai import ( RaiPlayIE, + RaiPlayLiveIE, RaiIE, ) from .rbmaradio import RBMARadioIE diff --git a/youtube_dl/extractor/rai.py b/youtube_dl/extractor/rai.py index 81eb9db85..ed15a5f10 100644 --- a/youtube_dl/extractor/rai.py +++ b/youtube_dl/extractor/rai.py @@ -208,10 +208,27 @@ class RaiPlayIE(RaiBaseIE): } info.update(relinker_info) - return info +class RaiPlayLiveIE(RaiBaseIE): + _VALID_URL = r'https?://(?:www\.)?raiplay\.it/dirette/(?P\w*)' + _TEST = { + 'url': 'http://www.raiplay.it/dirette/rai3', + 'only_matching': True, + } + + def _real_extract(self, url): + channel = self._match_id(url) + + webpage = self._download_webpage(url, channel) + re_id = r']*)data-uniquename=(["\'])[\w-]*(?P%s)(\2)([^>]*?)>' % RaiBaseIE._UUID_RE + video_id = self._html_search_regex(re_id, webpage, 'livestream-id', group='id') + + return self.url_result('http://www.raiplay.it/dirette/ContentItem-%s.html' % video_id, + RaiPlayIE.ie_key(), video_id) + + class RaiIE(RaiBaseIE): _VALID_URL = r'https?://[^/]+\.(?:rai\.(?:it|tv)|rainews\.it)/dl/.+?-(?P%s)(?:-.+?)?\.html' % RaiBaseIE._UUID_RE _TESTS = [{ -- cgit 1.4.1 From 9b35297be1b7e5fa7372bac664ab91a7fa364760 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sat, 1 Jul 2017 18:39:29 +0700 Subject: [extractors] Add import for tastytrade --- youtube_dl/extractor/extractors.py | 1 + 1 file changed, 1 insertion(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index a263c88b3..bbdb4a2fe 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -973,6 +973,7 @@ from .tagesschau import ( TagesschauIE, ) from .tass import TassIE +from .tastytrade import TastyTradeIE from .tbs import TBSIE from .tdslifeway import TDSLifewayIE from .teachertube import ( -- cgit 1.4.1 From eadd313321f4fbacac2ec10f2f382197e401d7f7 Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Sat, 8 Jul 2017 15:48:05 +0800 Subject: [yam] Remove extractor mymedia.yam.com is dead. An wikipedia user also pointed out that Yam's blog service is no longer available. [1] [1] https://zh.wikipedia.org/zh-tw/%E5%A4%A9%E7%A9%BA%E9%83%A8%E8%90%BD --- ChangeLog | 6 ++ youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/yam.py | 123 ------------------------------------- 3 files changed, 6 insertions(+), 124 deletions(-) delete mode 100644 youtube_dl/extractor/yam.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 4c2011326..5d07c12cb 100644 --- a/ChangeLog +++ b/ChangeLog @@ -1,3 +1,9 @@ +version + +Extractors +- [yam] Remove extractor + + version 2017.07.02 Core diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index bbdb4a2fe..b83c3aba5 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1280,7 +1280,6 @@ from .yahoo import ( YahooIE, YahooSearchIE, ) -from .yam import YamIE from .yandexmusic import ( YandexMusicTrackIE, YandexMusicAlbumIE, diff --git a/youtube_dl/extractor/yam.py b/youtube_dl/extractor/yam.py deleted file mode 100644 index ef5535547..000000000 --- a/youtube_dl/extractor/yam.py +++ /dev/null @@ -1,123 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -import re - -from .common import InfoExtractor -from ..compat import compat_urlparse -from ..utils import ( - float_or_none, - month_by_abbreviation, - ExtractorError, - get_element_by_attribute, -) - - -class YamIE(InfoExtractor): - IE_DESC = '蕃薯藤yam天空部落' - _VALID_URL = r'https?://mymedia\.yam\.com/m/(?P\d+)' - - _TESTS = [{ - # An audio hosted on Yam - 'url': 'http://mymedia.yam.com/m/2283921', - 'md5': 'c011b8e262a52d5473d9c2e3c9963b9c', - 'info_dict': { - 'id': '2283921', - 'ext': 'mp3', - 'title': '發現 - 趙薇 京華煙雲主題曲', - 'description': '發現 - 趙薇 京華煙雲主題曲', - 'uploader_id': 'princekt', - 'upload_date': '20080807', - 'duration': 313.0, - } - }, { - # An external video hosted on YouTube - 'url': 'http://mymedia.yam.com/m/3599430', - 'md5': '03127cf10d8f35d120a9e8e52e3b17c6', - 'info_dict': { - 'id': 'CNpEoQlrIgA', - 'ext': 'mp4', - 'upload_date': '20150306', - 'uploader': '新莊社大瑜伽社', - 'description': 'md5:11e2e405311633ace874f2e6226c8b17', - 'uploader_id': '2323agoy', - 'title': '20090412陽明山二子坪-1', - }, - 'skip': 'Video does not exist', - }, { - 'url': 'http://mymedia.yam.com/m/3598173', - 'info_dict': { - 'id': '3598173', - 'ext': 'mp4', - }, - 'skip': 'cause Yam system error', - }, { - 'url': 'http://mymedia.yam.com/m/3599437', - 'info_dict': { - 'id': '3599437', - 'ext': 'mp4', - }, - 'skip': 'invalid YouTube URL', - }, { - 'url': 'http://mymedia.yam.com/m/2373534', - 'md5': '7ff74b91b7a817269d83796f8c5890b1', - 'info_dict': { - 'id': '2373534', - 'ext': 'mp3', - 'title': '林俊傑&蔡卓妍-小酒窩', - 'description': 'md5:904003395a0fcce6cfb25028ff468420', - 'upload_date': '20080928', - 'uploader_id': 'onliner2', - } - }] - - def _real_extract(self, url): - video_id = self._match_id(url) - page = self._download_webpage(url, video_id) - - # Check for errors - system_msg = self._html_search_regex( - r'系統訊息(?:
|\n|\r)*([^<>]+)
', page, 'system message', - default=None) - if system_msg: - raise ExtractorError(system_msg, expected=True) - - # Is it hosted externally on YouTube? - youtube_url = self._html_search_regex( - r']+class="heading"[^>]*>\s*(.+)\s*', page, 'title') - - api_page = self._download_webpage( - 'http://mymedia.yam.com/api/a/?pID=' + video_id, video_id, - note='Downloading API page') - api_result_obj = compat_urlparse.parse_qs(api_page) - - info_table = get_element_by_attribute('class', 'info', page) - uploader_id = self._html_search_regex( - r':[\n ]+
(?P[A-Z][a-z]{2})\s+' + - r'(?P\d{1,2}), (?P\d{4})', page) - if mobj: - upload_date = '%s%02d%02d' % ( - mobj.group('year'), - month_by_abbreviation(mobj.group('mon')), - int(mobj.group('day'))) - else: - upload_date = None - duration = float_or_none(api_result_obj['totaltime'][0], scale=1000) - - return { - 'id': video_id, - 'url': api_result_obj['mp3file'][0], - 'title': title, - 'description': self._html_search_meta('description', page), - 'duration': duration, - 'uploader_id': uploader_id, - 'upload_date': upload_date, - } -- cgit 1.4.1 From d2b9f362fabad8f9490825456d8ed679d7159271 Mon Sep 17 00:00:00 2001 From: Christopher Smith Date: Thu, 29 Jun 2017 13:10:45 -0600 Subject: [cjsw] Add extractor --- youtube_dl/extractor/cjsw.py | 41 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 42 insertions(+) create mode 100644 youtube_dl/extractor/cjsw.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/cjsw.py b/youtube_dl/extractor/cjsw.py new file mode 100644 index 000000000..087cac9bc --- /dev/null +++ b/youtube_dl/extractor/cjsw.py @@ -0,0 +1,41 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class CJSWIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?cjsw\.com/program/\S+/(?P[0-9]+)' + IE_NAME = 'cjsw' + _TEST = { + 'url': 'http://cjsw.com/program/freshly-squeezed/episode/20170620', + 'md5': 'cee14d40f1e9433632c56e3d14977120', + 'info_dict': { + 'id': '20170620', + 'ext': 'mp3', + 'title': 'Freshly Squeezed', + 'description': 'Sled Island artists featured // Live session with Phi Pho, followed by a live session with Sinzere & The Late Nights! // Stay Fresh Y\'all!!', + } + } + + def _real_extract(self, url): + episode_id = self._match_id(url) + + webpage = self._download_webpage(url, episode_id) + + title = self._search_regex( + r']+data-showname=(["\'])(?P(?!\1).+?)\1[^>]*>', webpage, 'title', group='title') + description = self._html_search_regex( + r'<p>(?P<description>.+?)</p>', webpage, 'description', fatal=False) + formats = [{ + 'url': self._search_regex( + r'<button[^>]+data-audio-src=(["\'])(?P<audio_url>(?!\1).+?)\1[^>]*>', webpage, 'audio_url', group='audio_url'), + 'ext': 'mp3', + 'vcodec': 'none', + }] + return { + 'id': episode_id, + 'title': title, + 'description': description, + 'formats': formats, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index b83c3aba5..4524fa687 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -185,6 +185,7 @@ from .chirbit import ( ChirbitProfileIE, ) from .cinchcast import CinchcastIE +from .cjsw import CJSWIE from .clipfish import ClipfishIE from .cliphunter import CliphunterIE from .cliprs import ClipRsIE -- cgit 1.4.1 From 256a746d21634eccad07a1e6dcafedcdf8b6181b Mon Sep 17 00:00:00 2001 From: luboss <lubos.katrinec@gmail.com> Date: Fri, 2 Jun 2017 22:44:39 +0200 Subject: [joj] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/joj.py | 56 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 57 insertions(+) create mode 100755 youtube_dl/extractor/joj.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 4524fa687..9ee080895 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -470,6 +470,7 @@ from .jamendo import ( ) from .jeuxvideo import JeuxVideoIE from .jove import JoveIE +from .joj import JojIE from .jwplatform import JWPlatformIE from .jpopsukitv import JpopsukiIE from .kaltura import KalturaIE diff --git a/youtube_dl/extractor/joj.py b/youtube_dl/extractor/joj.py new file mode 100755 index 000000000..2ebfec902 --- /dev/null +++ b/youtube_dl/extractor/joj.py @@ -0,0 +1,56 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +import re + + +class JojIE(InfoExtractor): + _VALID_URL = r'https?://[a-z0-9]+\.joj\.sk/([^/]+/)*(?P<title_query>(?P<release_date>[0-9]{4}(-[0-9]{2}){2}).*)' # noqa + _TESTS = [{ + 'url': 'https://www.joj.sk/nove-byvanie/archiv/2017-05-28-nove-byvanie', # noqa + 'info_dict': { + 'id': 'a388ec4c-6019-4a4a-9312-b1bee194e932', + 'ext': 'mp4', + 'title': 'Nové Bývanie', + 'release_date': '20170528' + } + }, { + 'url': 'http://nasi.joj.sk/epizody/2016-09-06-stari-rodicia', + 'info_dict': { + 'id': 'f18b2c5f-9ea8-4941-a164-a814c53306ad', + 'ext': 'mp4', + 'title': 'Starí Rodičia', + 'release_date': '20160906' + } + }] + + media_src_url = 'http://n16.joj.sk/storage/' + xml_source_url = 'https://media.joj.sk/services/Video.php?clip=' + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + release_date = mobj.group('release_date').replace('-', '') + webpage = self._download_webpage(url, 'id') + video_id = self._html_search_regex( + r'https?://([a-z0-9]+\.)joj\.sk/embed/(?P<video_id>[a-f0-9\-]+)', + webpage, 'id', group='video_id') + xml_playlist_url = self.xml_source_url + video_id + xml_playlist_et = self._download_xml(xml_playlist_url, 'XML playlist') + formats = [] + for file_el in xml_playlist_et.findall('files/file'): + try: + height = int(file_el.attrib['id'].replace('p', '')) + except ValueError: + height = 0 + formats.append({'height': height, + 'url': self.media_src_url + file_el.attrib['path'].replace( # noqa + 'dat/', '', 1)}) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': self._og_search_title(webpage).title(), + 'formats': formats, + 'release_date': release_date + } -- cgit 1.4.1 From b71c18b4343d54ce8373e9a11df882aca1ae82a0 Mon Sep 17 00:00:00 2001 From: coreynicholson <coreynicholson@users.noreply.github.com> Date: Sun, 9 Jul 2017 22:24:04 +0100 Subject: [vlive:playlist] Add extractor --- youtube_dl/extractor/extractors.py | 3 +- youtube_dl/extractor/vlive.py | 56 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 58 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 9ee080895..eb1541729 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1206,7 +1206,8 @@ from .vk import ( ) from .vlive import ( VLiveIE, - VLiveChannelIE + VLiveChannelIE, + VLivePlaylistIE ) from .vodlocker import VodlockerIE from .vodpl import VODPlIE diff --git a/youtube_dl/extractor/vlive.py b/youtube_dl/extractor/vlive.py index e58940607..f3825db5c 100644 --- a/youtube_dl/extractor/vlive.py +++ b/youtube_dl/extractor/vlive.py @@ -49,6 +49,10 @@ class VLiveIE(InfoExtractor): }, }] + @classmethod + def suitable(cls, url): + return False if VLivePlaylistIE.suitable(url) else super(VLiveIE, cls).suitable(url) + def _real_extract(self, url): video_id = self._match_id(url) @@ -261,3 +265,55 @@ class VLiveChannelIE(InfoExtractor): return self.playlist_result( entries, channel_code, channel_name) + + +class VLivePlaylistIE(InfoExtractor): + IE_NAME = 'vlive:playlist' + _VALID_URL = r'https?://(?:(?:www|m)\.)?vlive\.tv/video/(?P<video_id>[0-9]+)/playlist/(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.vlive.tv/video/22867/playlist/22912', + 'info_dict': { + 'id': '22912', + 'title': 'Valentine Day Message from TWICE' + }, + 'playlist_mincount': 9 + } + + def _real_extract(self, url): + playlist_id = self._match_id(url) + video_id_match = re.match(self._VALID_URL, url) + assert video_id_match + video_id = compat_str(video_id_match.group('video_id')) + + VIDEO_URL_TEMPLATE = 'http://www.vlive.tv/video/%s' + if self._downloader.params.get('noplaylist'): + self.to_screen( + 'Downloading just video %s because of --no-playlist' % video_id) + return self.url_result( + VIDEO_URL_TEMPLATE % video_id, + ie=VLiveIE.ie_key(), video_id=video_id) + + self.to_screen( + 'Downloading playlist %s - add --no-playlist to just download video' % playlist_id) + + webpage = self._download_webpage( + 'http://www.vlive.tv/video/%s/playlist/%s' % (video_id, playlist_id), video_id) + + playlist_name = self._html_search_regex( + r'<div[^>]+class="[^"]*multicam_playlist[^>]*>\s*<h3[^>]+>([^<]+)', + webpage, 'playlist name', fatal=False) + + item_ids = self._search_regex( + r'\bvar\s+playlistVideoSeqs\s*=\s*(\[[^]]+\])', + webpage, 'playlist item ids') + + entries = [] + for item_id in self._parse_json(item_ids, playlist_id): + item_id = compat_str(item_id) + entries.append( + self.url_result( + VIDEO_URL_TEMPLATE % item_id, + ie=VLiveIE.ie_key(), video_id=item_id)) + + return self.playlist_result( + entries, playlist_id, playlist_name) -- cgit 1.4.1 From 4e826cd9aec383768a7b25aa3161efd4672f9310 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 15 Jul 2017 06:48:12 +0700 Subject: [nexx] Add extractor (closes #10807, closes #13465) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/generic.py | 22 ++++ youtube_dl/extractor/nexx.py | 221 +++++++++++++++++++++++++++++++++++++ 3 files changed, 244 insertions(+) create mode 100644 youtube_dl/extractor/nexx.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index eb1541729..9d34447a9 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -653,6 +653,7 @@ from .nextmedia import ( AppleDailyIE, NextTVIE, ) +from .nexx import NexxIE from .nfb import NFBIE from .nfl import NFLIE from .nhk import NhkVodIE diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 8c2ff39d5..123a21296 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -36,6 +36,7 @@ from .brightcove import ( BrightcoveLegacyIE, BrightcoveNewIE, ) +from .nexx import NexxIE from .nbc import NBCSportsVPlayerIE from .ooyala import OoyalaIE from .rutv import RUTVIE @@ -1549,6 +1550,22 @@ class GenericIE(InfoExtractor): }, 'add_ie': ['BrightcoveLegacy'], }, + # Nexx embed + { + 'url': 'https://www.funk.net/serien/5940e15073f6120001657956/items/593efbb173f6120001657503', + 'info_dict': { + 'id': '247746', + 'ext': 'mp4', + 'title': "Yesterday's Jam (OV)", + 'description': 'md5:09bc0984723fed34e2581624a84e05f0', + 'timestamp': 1492594816, + 'upload_date': '20170419', + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + }, # Facebook <iframe> embed { 'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html', @@ -2133,6 +2150,11 @@ class GenericIE(InfoExtractor): if bc_urls: return self.playlist_from_matches(bc_urls, video_id, video_title, ie='BrightcoveNew') + # Look for Nexx embeds + nexx_urls = NexxIE._extract_urls(webpage) + if nexx_urls: + return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key()) + # Look for ThePlatform embeds tp_urls = ThePlatformIE._extract_urls(webpage) if tp_urls: diff --git a/youtube_dl/extractor/nexx.py b/youtube_dl/extractor/nexx.py new file mode 100644 index 000000000..60b42cb7d --- /dev/null +++ b/youtube_dl/extractor/nexx.py @@ -0,0 +1,221 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import hashlib +import random +import re +import time + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + ExtractorError, + int_or_none, + parse_duration, + try_get, + urlencode_postdata, +) + + +class NexxIE(InfoExtractor): + _VALID_URL = r'https?://api\.nexx(?:\.cloud|cdn\.com)/v3/(?P<domain_id>\d+)/videos/byid/(?P<id>\d+)' + _TESTS = [{ + # movie + 'url': 'https://api.nexx.cloud/v3/748/videos/byid/128907', + 'md5': '16746bfc28c42049492385c989b26c4a', + 'info_dict': { + 'id': '128907', + 'ext': 'mp4', + 'title': 'Stiftung Warentest', + 'alt_title': 'Wie ein Test abläuft', + 'description': 'md5:d1ddb1ef63de721132abd38639cc2fd2', + 'release_year': 2013, + 'creator': 'SPIEGEL TV', + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 2509, + 'timestamp': 1384264416, + 'upload_date': '20131112', + }, + 'params': { + 'format': 'bestvideo', + }, + }, { + # episode + 'url': 'https://api.nexx.cloud/v3/741/videos/byid/247858', + 'info_dict': { + 'id': '247858', + 'ext': 'mp4', + 'title': 'Return of the Golden Child (OV)', + 'description': 'md5:5d969537509a92b733de21bae249dc63', + 'release_year': 2017, + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 1397, + 'timestamp': 1495033267, + 'upload_date': '20170517', + 'episode_number': 2, + 'season_number': 2, + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + }, { + 'url': 'https://api.nexxcdn.com/v3/748/videos/byid/128907', + 'only_matching': True, + }] + + @staticmethod + def _extract_urls(webpage): + # Reference: + # 1. https://nx-s.akamaized.net/files/201510/44.pdf + + entries = [] + + # JavaScript Integration + for domain_id, video_id in re.findall( + r'''(?isx) + <script\b[^>]+\bsrc=["\']https?://require\.nexx(?:\.cloud|cdn\.com)/(\d+).+? + onPLAYReady.+? + _play\.init\s*\(.+?\s*,\s*(\d+)\s*,\s*.+?\) + ''', webpage): + entries.append('https://api.nexx.cloud/v3/%s/videos/byid/%s' % (domain_id, video_id)) + + # TODO: support more embed formats + + return entries + + def _handle_error(self, response): + status = int_or_none(try_get( + response, lambda x: x['metadata']['status']) or 200) + if 200 <= status < 300: + return + raise ExtractorError( + '%s said: %s' % (self.IE_NAME, response['metadata']['errorhint']), + expected=True) + + def _call_api(self, domain_id, path, video_id, data=None, headers={}): + headers['Content-Type'] = 'application/x-www-form-urlencoded; charset=UTF-8' + result = self._download_json( + 'https://api.nexx.cloud/v3/%s/%s' % (domain_id, path), video_id, + 'Downloading %s JSON' % path, data=urlencode_postdata(data), + headers=headers) + self._handle_error(result) + return result['result'] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + domain_id, video_id = mobj.group('domain_id', 'id') + + # Reverse engineered from JS code (see getDeviceID function) + device_id = '%d:%d:%d%d' % ( + random.randint(1, 4), int(time.time()), + random.randint(1e4, 99999), random.randint(1, 9)) + + result = self._call_api(domain_id, 'session/init', video_id, data={ + 'nxp_devh': device_id, + 'nxp_userh': '', + 'precid': '0', + 'playlicense': '0', + 'screenx': '1920', + 'screeny': '1080', + 'playerversion': '6.0.00', + 'gateway': 'html5', + 'adGateway': '', + 'explicitlanguage': 'en-US', + 'addTextTemplates': '1', + 'addDomainData': '1', + 'addAdModel': '1', + }, headers={ + 'X-Request-Enable-Auth-Fallback': '1', + }) + + cid = result['general']['cid'] + + # As described in [1] X-Request-Token generation algorithm is + # as follows: + # md5( operation + domain_id + domain_secret ) + # where domain_secret is a static value that will be given by nexx.tv + # as per [1]. Here is how this "secret" is generated (reversed + # from _play.api.init function, search for clienttoken). So it's + # actually not static and not that much of a secret. + # 1. https://nexxtvstorage.blob.core.windows.net/files/201610/27.pdf + secret = result['device']['clienttoken'][int(device_id[0]):] + secret = secret[0:len(secret) - int(device_id[-1])] + + op = 'byid' + + # Reversed from JS code for _play.api.call function (search for + # X-Request-Token) + request_token = hashlib.md5( + ''.join((op, domain_id, secret)).encode('utf-8')).hexdigest() + + video = self._call_api( + domain_id, 'videos/%s/%s' % (op, video_id), video_id, data={ + 'additionalfields': 'language,channel,actors,studio,licenseby,slug,subtitle,teaser,description', + 'addInteractionOptions': '1', + 'addStatusDetails': '1', + 'addStreamDetails': '1', + 'addCaptions': '1', + 'addScenes': '1', + 'addHotSpots': '1', + 'addBumpers': '1', + 'captionFormat': 'data', + }, headers={ + 'X-Request-CID': cid, + 'X-Request-Token': request_token, + }) + + general = video['general'] + title = general['title'] + + stream_data = video['streamdata'] + language = general.get('language_raw') or '' + + # TODO: reverse more cdns and formats + + cdn = stream_data['cdnType'] + assert cdn == 'azure' + + azure_locator = stream_data['azureLocator'] + + AZURE_URL = 'http://nx-p%02d.akamaized.net/' + + for secure in ('s', ''): + cdn_shield = stream_data.get('cdnShieldHTTP%s' % secure.upper()) + if cdn_shield: + azure_base = 'http%s://%s' % (secure, cdn_shield) + break + else: + azure_base = AZURE_URL % int(stream_data['azureAccount'].replace('nexxplayplus', '')) + + is_ml = ',' in language + azure_m3u8_url = '%s%s/%s_src%s.ism/Manifest(format=m3u8-aapl)' % ( + azure_base, azure_locator, video_id, ('_manifest' if is_ml else '')) + + protection_token = try_get( + video, lambda x: x['protectiondata']['token'], compat_str) + if protection_token: + azure_m3u8_url += '?hdnts=%s' % protection_token + + formats = self._extract_m3u8_formats( + azure_m3u8_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='%s-hls' % cdn) + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': title, + 'alt_title': general.get('subtitle'), + 'description': general.get('description'), + 'release_year': int_or_none(general.get('year')), + 'creator': general.get('studio') or general.get('studio_adref'), + 'thumbnail': try_get( + video, lambda x: x['imagedata']['thumb'], compat_str), + 'duration': parse_duration(general.get('runtime')), + 'timestamp': int_or_none(general.get('uploaded')), + 'episode_number': int_or_none(try_get( + video, lambda x: x['episodedata']['episode'])), + 'season_number': int_or_none(try_get( + video, lambda x: x['episodedata']['season'])), + 'formats': formats, + } -- cgit 1.4.1 From 94b817edebb63c3d8485e1ae27cc394dd9e21f9d Mon Sep 17 00:00:00 2001 From: troywith77 <ruitang307@gmail.com> Date: Tue, 9 May 2017 13:10:18 +0800 Subject: [pearvideo] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/pear.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 youtube_dl/extractor/pear.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 9d34447a9..75c1a3d0e 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -762,6 +762,7 @@ from .pandoratv import PandoraTVIE from .parliamentliveuk import ParliamentLiveUKIE from .patreon import PatreonIE from .pbs import PBSIE +from .pear import PearIE from .people import PeopleIE from .periscope import ( PeriscopeIE, diff --git a/youtube_dl/extractor/pear.py b/youtube_dl/extractor/pear.py new file mode 100644 index 000000000..77fd46852 --- /dev/null +++ b/youtube_dl/extractor/pear.py @@ -0,0 +1,34 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class PearIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?pearvideo\.com/video_(?P<id>[0-9]+)' + _TEST = { + 'url': 'http://www.pearvideo.com/video_1076290', + 'info_dict': { + 'id': '1076290', + 'ext': 'mp4', + 'title': '小浣熊在主人家玻璃上滚石头:没砸', + 'description': '小浣熊找到一个小石头,仿佛发现了一个宝贝。它不停地用石头按在玻璃上,滚来滚去,吸引主人注意。', + 'url': 'http://video.pearvideo.com/mp4/short/20170508/cont-1076290-10438018-hd.mp4' + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex(r'<h1[^>]+class="video-tt">(.+)</h1>', webpage, 'title', fatal=False) + description = self._html_search_regex(r'<div[^>]+class="summary"[^>]*>([^<]+)<', webpage, 'description', fatal=False) + url = self._html_search_regex(r'hdUrl="(.*?)"', webpage, 'url', fatal=False) + + return { + 'id': video_id, + 'ext': 'mp4', + 'title': title, + 'description': description, + 'url': url + } -- cgit 1.4.1 From decf86044d17a8ec04e43a4805a0092622d976ae Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 16 Jul 2017 03:06:04 +0700 Subject: [pearvideo] Improve (closes #13031) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/pear.py | 34 -------------------- youtube_dl/extractor/pearvideo.py | 63 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+), 35 deletions(-) delete mode 100644 youtube_dl/extractor/pear.py create mode 100644 youtube_dl/extractor/pearvideo.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 75c1a3d0e..28f0d3f0d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -762,7 +762,7 @@ from .pandoratv import PandoraTVIE from .parliamentliveuk import ParliamentLiveUKIE from .patreon import PatreonIE from .pbs import PBSIE -from .pear import PearIE +from .pearvideo import PearVideoIE from .people import PeopleIE from .periscope import ( PeriscopeIE, diff --git a/youtube_dl/extractor/pear.py b/youtube_dl/extractor/pear.py deleted file mode 100644 index 77fd46852..000000000 --- a/youtube_dl/extractor/pear.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -from .common import InfoExtractor - - -class PearIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?pearvideo\.com/video_(?P<id>[0-9]+)' - _TEST = { - 'url': 'http://www.pearvideo.com/video_1076290', - 'info_dict': { - 'id': '1076290', - 'ext': 'mp4', - 'title': '小浣熊在主人家玻璃上滚石头:没砸', - 'description': '小浣熊找到一个小石头,仿佛发现了一个宝贝。它不停地用石头按在玻璃上,滚来滚去,吸引主人注意。', - 'url': 'http://video.pearvideo.com/mp4/short/20170508/cont-1076290-10438018-hd.mp4' - } - } - - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - title = self._html_search_regex(r'<h1[^>]+class="video-tt">(.+)</h1>', webpage, 'title', fatal=False) - description = self._html_search_regex(r'<div[^>]+class="summary"[^>]*>([^<]+)<', webpage, 'description', fatal=False) - url = self._html_search_regex(r'hdUrl="(.*?)"', webpage, 'url', fatal=False) - - return { - 'id': video_id, - 'ext': 'mp4', - 'title': title, - 'description': description, - 'url': url - } diff --git a/youtube_dl/extractor/pearvideo.py b/youtube_dl/extractor/pearvideo.py new file mode 100644 index 000000000..1d777221c --- /dev/null +++ b/youtube_dl/extractor/pearvideo.py @@ -0,0 +1,63 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + qualities, + unified_timestamp, +) + + +class PearVideoIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?pearvideo\.com/video_(?P<id>\d+)' + _TEST = { + 'url': 'http://www.pearvideo.com/video_1076290', + 'info_dict': { + 'id': '1076290', + 'ext': 'mp4', + 'title': '小浣熊在主人家玻璃上滚石头:没砸', + 'description': 'md5:01d576b747de71be0ee85eb7cac25f9d', + 'timestamp': 1494275280, + 'upload_date': '20170508', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + quality = qualities( + ('ldflv', 'ld', 'sdflv', 'sd', 'hdflv', 'hd', 'src')) + + formats = [{ + 'url': mobj.group('url'), + 'format_id': mobj.group('id'), + 'quality': quality(mobj.group('id')), + } for mobj in re.finditer( + r'(?P<id>[a-zA-Z]+)Url\s*=\s*(["\'])(?P<url>(?:https?:)?//.+?)\2', + webpage)] + self._sort_formats(formats) + + title = self._search_regex( + (r'<h1[^>]+\bclass=(["\'])video-tt\1[^>]*>(?P<value>[^<]+)', + r'<[^>]+\bdata-title=(["\'])(?P<value>(?:(?!\1).)+)\1'), + webpage, 'title', group='value') + description = self._search_regex( + (r'<div[^>]+\bclass=(["\'])summary\1[^>]*>(?P<value>[^<]+)', + r'<[^>]+\bdata-summary=(["\'])(?P<value>(?:(?!\1).)+)\1'), + webpage, 'description', default=None, + group='value') or self._html_search_meta('Description', webpage) + timestamp = unified_timestamp(self._search_regex( + r'<div[^>]+\bclass=["\']date["\'][^>]*>([^<]+)', + webpage, 'timestamp', fatal=False)) + + return { + 'id': video_id, + 'title': title, + 'description': description, + 'timestamp': timestamp, + 'formats': formats, + } -- cgit 1.4.1 From 3f59b0154a8b6dc85425edfbb3dfdc64f41a6ecb Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 16 Jul 2017 04:32:37 +0700 Subject: [nexx:embed] Add extractor for iframe embeds --- youtube_dl/extractor/extractors.py | 5 ++++- youtube_dl/extractor/generic.py | 31 ++++++++++++++++++++++++- youtube_dl/extractor/nexx.py | 46 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 80 insertions(+), 2 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 28f0d3f0d..e8a066b83 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -653,7 +653,10 @@ from .nextmedia import ( AppleDailyIE, NextTVIE, ) -from .nexx import NexxIE +from .nexx import ( + NexxIE, + NexxEmbedIE, +) from .nfb import NFBIE from .nfl import NFLIE from .nhk import NhkVodIE diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 123a21296..0ab2ef2d6 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -36,7 +36,10 @@ from .brightcove import ( BrightcoveLegacyIE, BrightcoveNewIE, ) -from .nexx import NexxIE +from .nexx import ( + NexxIE, + NexxEmbedIE, +) from .nbc import NBCSportsVPlayerIE from .ooyala import OoyalaIE from .rutv import RUTVIE @@ -1566,6 +1569,27 @@ class GenericIE(InfoExtractor): 'skip_download': True, }, }, + # Nexx iFrame embed + { + 'url': 'http://www.spiegel.de/sptv/spiegeltv/spiegel-tv-ueber-schnellste-katapult-achterbahn-der-welt-taron-a-1137884.html', + 'info_dict': { + 'id': '161464', + 'ext': 'mp4', + 'title': 'Nervenkitzel Achterbahn', + 'alt_title': 'Karussellbauer in Deutschland', + 'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc', + 'release_year': 2005, + 'creator': 'SPIEGEL TV', + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 2761, + 'timestamp': 1394021479, + 'upload_date': '20140305', + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + }, # Facebook <iframe> embed { 'url': 'https://www.hostblogger.de/blog/archives/6181-Auto-jagt-Betonmischer.html', @@ -2155,6 +2179,11 @@ class GenericIE(InfoExtractor): if nexx_urls: return self.playlist_from_matches(nexx_urls, video_id, video_title, ie=NexxIE.ie_key()) + # Look for Nexx iFrame embeds + nexx_embed_urls = NexxEmbedIE._extract_urls(webpage) + if nexx_embed_urls: + return self.playlist_from_matches(nexx_embed_urls, video_id, video_title, ie=NexxEmbedIE.ie_key()) + # Look for ThePlatform embeds tp_urls = ThePlatformIE._extract_urls(webpage) if tp_urls: diff --git a/youtube_dl/extractor/nexx.py b/youtube_dl/extractor/nexx.py index 12450d4c5..e2960273e 100644 --- a/youtube_dl/extractor/nexx.py +++ b/youtube_dl/extractor/nexx.py @@ -88,6 +88,10 @@ class NexxIE(InfoExtractor): return entries + @staticmethod + def _extract_url(webpage): + return NexxIE._extract_urls(webpage)[0] + def _handle_error(self, response): status = int_or_none(try_get( response, lambda x: x['metadata']['status']) or 200) @@ -223,3 +227,45 @@ class NexxIE(InfoExtractor): video, lambda x: x['episodedata']['season'])), 'formats': formats, } + + +class NexxEmbedIE(InfoExtractor): + _VALID_URL = r'https?://embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?P<id>[^/?#&]+)' + _TEST = { + 'url': 'http://embed.nexx.cloud/748/KC1614647Z27Y7T?autoplay=1', + 'md5': '16746bfc28c42049492385c989b26c4a', + 'info_dict': { + 'id': '161464', + 'ext': 'mp4', + 'title': 'Nervenkitzel Achterbahn', + 'alt_title': 'Karussellbauer in Deutschland', + 'description': 'md5:ffe7b1cc59a01f585e0569949aef73cc', + 'release_year': 2005, + 'creator': 'SPIEGEL TV', + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 2761, + 'timestamp': 1394021479, + 'upload_date': '20140305', + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + } + + @staticmethod + def _extract_urls(webpage): + # Reference: + # 1. https://nx-s.akamaized.net/files/201510/44.pdf + + # iFrame Embed Integration + return [mobj.group('url') for mobj in re.finditer( + r'<iframe[^>]+\bsrc=(["\'])(?P<url>(?:https?:)?//embed\.nexx(?:\.cloud|cdn\.com)/\d+/(?:(?!\1).)+)\1', + webpage)] + + def _real_extract(self, url): + embed_id = self._match_id(url) + + webpage = self._download_webpage(url, embed_id) + + return self.url_result(NexxIE._extract_url(webpage), ie=NexxIE.ie_key()) -- cgit 1.4.1 From dc6520aa3d1fe7afc52613e392f15dde90af4844 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Thu, 20 Jul 2017 23:22:36 +0700 Subject: [egghead:lesson] Add extractor (#6635) --- youtube_dl/extractor/egghead.py | 49 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 5 +++- 2 files changed, 53 insertions(+), 1 deletion(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/egghead.py b/youtube_dl/extractor/egghead.py index c86f52319..e4a3046af 100644 --- a/youtube_dl/extractor/egghead.py +++ b/youtube_dl/extractor/egghead.py @@ -2,6 +2,11 @@ from __future__ import unicode_literals from .common import InfoExtractor +from ..utils import ( + int_or_none, + try_get, + unified_timestamp, +) class EggheadCourseIE(InfoExtractor): @@ -33,3 +38,47 @@ class EggheadCourseIE(InfoExtractor): return self.playlist_result( entries, playlist_id, course.get('title'), course.get('description')) + + +class EggheadLessonIE(InfoExtractor): + IE_DESC = 'egghead.io lesson' + IE_NAME = 'egghead:lesson' + _VALID_URL = r'https://egghead\.io/lessons/(?P<id>[^/?#&]+)' + _TEST = { + 'url': 'https://egghead.io/lessons/javascript-linear-data-flow-with-container-style-types-box', + 'info_dict': { + 'id': 'fv5yotjxcg', + 'ext': 'mp4', + 'title': 'Create linear data flow with container style types (Box)', + 'description': 'md5:9aa2cdb6f9878ed4c39ec09e85a8150e', + 'thumbnail': r're:^https?:.*\.jpg$', + 'timestamp': 1481296768, + 'upload_date': '20161209', + 'duration': 304, + 'view_count': 0, + 'tags': ['javascript', 'free'], + }, + 'params': { + 'skip_download': True, + }, + } + + def _real_extract(self, url): + lesson_id = self._match_id(url) + + lesson = self._download_json( + 'https://egghead.io/api/v1/lessons/%s' % lesson_id, lesson_id) + + return { + '_type': 'url_transparent', + 'ie_key': 'Wistia', + 'url': 'wistia:%s' % lesson['wistia_id'], + 'id': lesson['wistia_id'], + 'title': lesson.get('title'), + 'description': lesson.get('summary'), + 'thumbnail': lesson.get('thumb_nail'), + 'timestamp': unified_timestamp(lesson.get('published_at')), + 'duration': int_or_none(lesson.get('duration')), + 'view_count': int_or_none(lesson.get('plays_count')), + 'tags': try_get(lesson, lambda x: x['tag_list'], list), + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index e8a066b83..db7616caa 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -298,7 +298,10 @@ from .dw import ( from .eagleplatform import EaglePlatformIE from .ebaumsworld import EbaumsWorldIE from .echomsk import EchoMskIE -from .egghead import EggheadCourseIE +from .egghead import ( + EggheadCourseIE, + EggheadLessonIE, +) from .ehow import EHowIE from .eighttracks import EightTracksIE from .einthusan import EinthusanIE -- cgit 1.4.1 From c99d6890cb46626870474e5c1092d9772096c4b6 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 23 Jul 2017 21:00:56 +0700 Subject: [nickru] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/nick.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index db7616caa..2513f2587 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -673,6 +673,7 @@ from .nick import ( NickIE, NickDeIE, NickNightIE, + NickRuIE, ) from .niconico import NiconicoIE, NiconicoPlaylistIE from .ninecninemedia import ( diff --git a/youtube_dl/extractor/nick.py b/youtube_dl/extractor/nick.py index 1fa19cde4..b688637bc 100644 --- a/youtube_dl/extractor/nick.py +++ b/youtube_dl/extractor/nick.py @@ -125,3 +125,21 @@ class NickNightIE(NickDeIE): return self._search_regex( r'mrss\s*:\s*(["\'])(?P<url>http.+?)\1', webpage, 'mrss url', group='url') + + +class NickRuIE(MTVServicesInfoExtractor): + IE_NAME = 'nickelodeonru' + _VALID_URL = r'https?://(?:www\.)nickelodeon\.ru/(?:playlist|shows|videos)/(?:[^/]+/)*(?P<id>[^/?#&]+)' + _TESTS = [{ + 'url': 'http://www.nickelodeon.ru/shows/henrydanger/videos/episodes/3-sezon-15-seriya-licenziya-na-polyot/pmomfb#playlist/7airc6', + 'only_matching': True, + }, { + 'url': 'http://www.nickelodeon.ru/videos/smotri-na-nickelodeon-v-iyule/g9hvh7', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + mgid = self._extract_mgid(webpage) + return self.url_result('http://media.mtvnservices.com/embed/%s' % mgid) -- cgit 1.4.1 From 24e966e8dab954136dabbc497064ac63b252495b Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister <phihag@phihag.de> Date: Fri, 28 Jul 2017 12:13:19 +0200 Subject: [megaphone] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/generic.py | 8 ++++++ youtube_dl/extractor/megaphone.py | 55 ++++++++++++++++++++++++++++++++++++++ 3 files changed, 64 insertions(+) create mode 100644 youtube_dl/extractor/megaphone.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 2513f2587..668248648 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -558,6 +558,7 @@ from .matchtv import MatchTVIE from .mdr import MDRIE from .mediaset import MediasetIE from .medici import MediciIE +from .megaphone import MegaphoneIE from .meipai import MeipaiIE from .melonvod import MelonVODIE from .meta import METAIE diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 36c81eda9..9678c32c4 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -97,6 +97,7 @@ from .washingtonpost import WashingtonPostIE from .wistia import WistiaIE from .mediaset import MediasetIE from .joj import JojIE +from .megaphone import MegaphoneIE class GenericIE(InfoExtractor): @@ -2790,6 +2791,13 @@ class GenericIE(InfoExtractor): return self.playlist_from_matches( joj_urls, video_id, video_title, ie=JojIE.ie_key()) + # Look for megaphone.fm embeds + mpfn_urls = MegaphoneIE._extract_urls(webpage) + if mpfn_urls: + return self.playlist_from_matches( + mpfn_urls, video_id, video_title, ie=MegaphoneIE.ie_key()) + + def merge_dicts(dict1, dict2): merged = {} for k, v in dict1.items(): diff --git a/youtube_dl/extractor/megaphone.py b/youtube_dl/extractor/megaphone.py new file mode 100644 index 000000000..60e3caf0d --- /dev/null +++ b/youtube_dl/extractor/megaphone.py @@ -0,0 +1,55 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import js_to_json + + +class MegaphoneIE(InfoExtractor): + IE_NAME = 'megaphone.fm' + IE_DESC = 'megaphone.fm embedded players' + _VALID_URL = r'https://player\.megaphone\.fm/(?P<id>[A-Z0-9]+)' + _TEST = { + 'url': 'https://player.megaphone.fm/GLT9749789991?"', + 'md5': '4816a0de523eb3e972dc0dda2c191f96', + 'info_dict': { + 'id': 'GLT9749789991', + 'ext': 'mp3', + 'title': '#97 What Kind Of Idiot Gets Phished?', + 'thumbnail': 're:^https://.*\.png.*$', + 'duration': 1776.26375, + 'author': 'Reply All', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._og_search_property('audio:title', webpage) + author = self._og_search_property('audio:artist', webpage) + thumbnail = self._og_search_thumbnail(webpage) + + episode_json = self._search_regex(r'(?s)var\s+episode\s*=\s*(\{.+?\});', webpage, 'episode JSON') + episode_data = self._parse_json(episode_json, video_id, js_to_json) + video_url = self._proto_relative_url(episode_data['mediaUrl'], 'https:') + + formats = [{ + 'url': video_url, + }] + + return { + 'id': video_id, + 'thumbnail': thumbnail, + 'title': title, + 'author': author, + 'duration': episode_data['duration'], + 'formats': formats, + } + + @classmethod + def _extract_urls(cls, webpage): + return [m[0] for m in re.findall( + r'<iframe[^>]*?\ssrc=["\'](%s)' % cls._VALID_URL, webpage)] -- cgit 1.4.1 From cbbe66635f3c23316f04a6f56ad57e025bc47263 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 29 Jul 2017 15:10:19 +0700 Subject: [yandexdisk] Add extractor (closes #13755) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/yandexdisk.py | 115 +++++++++++++++++++++++++++++++++++++ 2 files changed, 116 insertions(+) create mode 100644 youtube_dl/extractor/yandexdisk.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 668248648..852942e0d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1298,6 +1298,7 @@ from .yandexmusic import ( YandexMusicAlbumIE, YandexMusicPlaylistIE, ) +from .yandexdisk import YandexDiskIE from .yesjapan import YesJapanIE from .yinyuetai import YinYueTaiIE from .ynet import YnetIE diff --git a/youtube_dl/extractor/yandexdisk.py b/youtube_dl/extractor/yandexdisk.py new file mode 100644 index 000000000..11729f0f7 --- /dev/null +++ b/youtube_dl/extractor/yandexdisk.py @@ -0,0 +1,115 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + determine_ext, + float_or_none, + int_or_none, + try_get, + urlencode_postdata, +) + + +class YandexDiskIE(InfoExtractor): + _VALID_URL = r'https?://yadi\.sk/i/(?P<id>[^/?#&]+)' + + _TEST = { + 'url': 'https://yadi.sk/i/VdOeDou8eZs6Y', + 'md5': '33955d7ae052f15853dc41f35f17581c', + 'info_dict': { + 'id': 'VdOeDou8eZs6Y', + 'ext': 'mp4', + 'title': '4.mp4', + 'duration': 168.6, + 'uploader': 'y.botova', + 'uploader_id': '300043621', + 'view_count': int, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + status = self._download_webpage( + 'https://disk.yandex.com/auth/status', video_id, query={ + 'urlOrigin': url, + 'source': 'public', + 'md5': 'false', + }) + + sk = self._search_regex( + r'(["\'])sk(?:External)?\1\s*:\s*(["\'])(?P<value>(?:(?!\2).)+)\2', + status, 'sk', group='value') + + webpage = self._download_webpage(url, video_id) + + models = self._parse_json( + self._search_regex( + r'<script[^>]+id=["\']models-client[^>]+>\s*(\[.+?\])\s*</script', + webpage, 'video JSON'), + video_id) + + data = next( + model['data'] for model in models + if model.get('model') == 'resource') + + video_hash = data['id'] + title = data['name'] + + models = self._download_json( + 'https://disk.yandex.com/models/', video_id, + data=urlencode_postdata({ + '_model.0': 'videoInfo', + 'id.0': video_hash, + '_model.1': 'do-get-resource-url', + 'id.1': video_hash, + 'version': '13.6', + 'sk': sk, + }), query={'_m': 'videoInfo'})['models'] + + videos = try_get(models, lambda x: x[0]['data']['videos'], list) or [] + source_url = try_get( + models, lambda x: x[1]['data']['file'], compat_str) + + formats = [] + if source_url: + formats.append({ + 'url': source_url, + 'format_id': 'source', + 'ext': determine_ext(title, 'mp4'), + 'quality': 1, + }) + for video in videos: + format_url = video.get('url') + if not format_url: + continue + if determine_ext(format_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False)) + else: + formats.append({ + 'url': format_url, + }) + self._sort_formats(formats) + + duration = float_or_none(try_get( + models, lambda x: x[0]['data']['duration']), 1000) + uploader = try_get( + data, lambda x: x['user']['display_name'], compat_str) + uploader_id = try_get( + data, lambda x: x['user']['uid'], compat_str) + view_count = int_or_none(try_get( + data, lambda x: x['meta']['views_counter'])) + + return { + 'id': video_id, + 'title': title, + 'duration': duration, + 'uploader': uploader, + 'uploader_id': uploader_id, + 'view_count': view_count, + 'formats': formats, + } -- cgit 1.4.1 From 836ef2648613f4ca565b319af4769c02e35f60f6 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 29 Jul 2017 18:41:42 +0700 Subject: [soundcloud:trackstation] Add extractor (closes #13733) --- youtube_dl/extractor/extractors.py | 3 +- youtube_dl/extractor/soundcloud.py | 141 +++++++++++++++++++++++-------------- 2 files changed, 89 insertions(+), 55 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 852942e0d..d2c5e8030 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -935,8 +935,9 @@ from .soundcloud import ( SoundcloudIE, SoundcloudSetIE, SoundcloudUserIE, + SoundcloudTrackStationIE, SoundcloudPlaylistIE, - SoundcloudSearchIE + SoundcloudSearchIE, ) from .soundgasm import ( SoundgasmIE, diff --git a/youtube_dl/extractor/soundcloud.py b/youtube_dl/extractor/soundcloud.py index 3f1a46bb2..2f1b2978c 100644 --- a/youtube_dl/extractor/soundcloud.py +++ b/youtube_dl/extractor/soundcloud.py @@ -31,6 +31,7 @@ class SoundcloudIE(InfoExtractor): _VALID_URL = r'''(?x)^(?:https?://)? (?:(?:(?:www\.|m\.)?soundcloud\.com/ + (?!stations/track) (?P<uploader>[\w\d-]+)/ (?!(?:tracks|sets(?:/.+?)?|reposts|likes|spotlight)/?(?:$|[?#])) (?P<title>[\w\d-]+)/? @@ -330,7 +331,63 @@ class SoundcloudSetIE(SoundcloudPlaylistBaseIE): } -class SoundcloudUserIE(SoundcloudPlaylistBaseIE): +class SoundcloudPagedPlaylistBaseIE(SoundcloudPlaylistBaseIE): + _API_BASE = 'https://api.soundcloud.com' + _API_V2_BASE = 'https://api-v2.soundcloud.com' + + def _extract_playlist(self, base_url, playlist_id, playlist_title): + COMMON_QUERY = { + 'limit': 50, + 'client_id': self._CLIENT_ID, + 'linked_partitioning': '1', + } + + query = COMMON_QUERY.copy() + query['offset'] = 0 + + next_href = base_url + '?' + compat_urllib_parse_urlencode(query) + + entries = [] + for i in itertools.count(): + response = self._download_json( + next_href, playlist_id, 'Downloading track page %s' % (i + 1)) + + collection = response['collection'] + if not collection: + break + + def resolve_permalink_url(candidates): + for cand in candidates: + if isinstance(cand, dict): + permalink_url = cand.get('permalink_url') + entry_id = self._extract_id(cand) + if permalink_url and permalink_url.startswith('http'): + return permalink_url, entry_id + + for e in collection: + permalink_url, entry_id = resolve_permalink_url((e, e.get('track'), e.get('playlist'))) + if permalink_url: + entries.append(self.url_result(permalink_url, video_id=entry_id)) + + next_href = response.get('next_href') + if not next_href: + break + + parsed_next_href = compat_urlparse.urlparse(response['next_href']) + qs = compat_urlparse.parse_qs(parsed_next_href.query) + qs.update(COMMON_QUERY) + next_href = compat_urlparse.urlunparse( + parsed_next_href._replace(query=compat_urllib_parse_urlencode(qs, True))) + + return { + '_type': 'playlist', + 'id': playlist_id, + 'title': playlist_title, + 'entries': entries, + } + + +class SoundcloudUserIE(SoundcloudPagedPlaylistBaseIE): _VALID_URL = r'''(?x) https?:// (?:(?:www|m)\.)?soundcloud\.com/ @@ -385,16 +442,13 @@ class SoundcloudUserIE(SoundcloudPlaylistBaseIE): 'playlist_mincount': 1, }] - _API_BASE = 'https://api.soundcloud.com' - _API_V2_BASE = 'https://api-v2.soundcloud.com' - _BASE_URL_MAP = { - 'all': '%s/profile/soundcloud:users:%%s' % _API_V2_BASE, - 'tracks': '%s/users/%%s/tracks' % _API_BASE, - 'sets': '%s/users/%%s/playlists' % _API_V2_BASE, - 'reposts': '%s/profile/soundcloud:users:%%s/reposts' % _API_V2_BASE, - 'likes': '%s/users/%%s/likes' % _API_V2_BASE, - 'spotlight': '%s/users/%%s/spotlight' % _API_V2_BASE, + 'all': '%s/profile/soundcloud:users:%%s' % SoundcloudPagedPlaylistBaseIE._API_V2_BASE, + 'tracks': '%s/users/%%s/tracks' % SoundcloudPagedPlaylistBaseIE._API_BASE, + 'sets': '%s/users/%%s/playlists' % SoundcloudPagedPlaylistBaseIE._API_V2_BASE, + 'reposts': '%s/profile/soundcloud:users:%%s/reposts' % SoundcloudPagedPlaylistBaseIE._API_V2_BASE, + 'likes': '%s/users/%%s/likes' % SoundcloudPagedPlaylistBaseIE._API_V2_BASE, + 'spotlight': '%s/users/%%s/spotlight' % SoundcloudPagedPlaylistBaseIE._API_V2_BASE, } _TITLE_MAP = { @@ -416,57 +470,36 @@ class SoundcloudUserIE(SoundcloudPlaylistBaseIE): resolv_url, uploader, 'Downloading user info') resource = mobj.group('rsrc') or 'all' - base_url = self._BASE_URL_MAP[resource] % user['id'] - COMMON_QUERY = { - 'limit': 50, - 'client_id': self._CLIENT_ID, - 'linked_partitioning': '1', - } + return self._extract_playlist( + self._BASE_URL_MAP[resource] % user['id'], compat_str(user['id']), + '%s (%s)' % (user['username'], self._TITLE_MAP[resource])) - query = COMMON_QUERY.copy() - query['offset'] = 0 - next_href = base_url + '?' + compat_urllib_parse_urlencode(query) - - entries = [] - for i in itertools.count(): - response = self._download_json( - next_href, uploader, 'Downloading track page %s' % (i + 1)) - - collection = response['collection'] - if not collection: - break - - def resolve_permalink_url(candidates): - for cand in candidates: - if isinstance(cand, dict): - permalink_url = cand.get('permalink_url') - entry_id = self._extract_id(cand) - if permalink_url and permalink_url.startswith('http'): - return permalink_url, entry_id +class SoundcloudTrackStationIE(SoundcloudPagedPlaylistBaseIE): + _VALID_URL = r'https?://(?:(?:www|m)\.)?soundcloud\.com/stations/track/[^/]+/(?P<id>[^/?#&]+)' + IE_NAME = 'soundcloud:trackstation' + _TESTS = [{ + 'url': 'https://soundcloud.com/stations/track/officialsundial/your-text', + 'info_dict': { + 'id': '286017854', + 'title': 'Track station: your-text', + }, + 'playlist_mincount': 47, + }] - for e in collection: - permalink_url, entry_id = resolve_permalink_url((e, e.get('track'), e.get('playlist'))) - if permalink_url: - entries.append(self.url_result(permalink_url, video_id=entry_id)) + def _real_extract(self, url): + track_name = self._match_id(url) - next_href = response.get('next_href') - if not next_href: - break + webpage = self._download_webpage(url, track_name) - parsed_next_href = compat_urlparse.urlparse(response['next_href']) - qs = compat_urlparse.parse_qs(parsed_next_href.query) - qs.update(COMMON_QUERY) - next_href = compat_urlparse.urlunparse( - parsed_next_href._replace(query=compat_urllib_parse_urlencode(qs, True))) + track_id = self._search_regex( + r'soundcloud:track-stations:(\d+)', webpage, 'track id') - return { - '_type': 'playlist', - 'id': compat_str(user['id']), - 'title': '%s (%s)' % (user['username'], self._TITLE_MAP[resource]), - 'entries': entries, - } + return self._extract_playlist( + '%s/stations/soundcloud:track-stations:%s/tracks' + % (self._API_V2_BASE, track_id), + track_id, 'Track station: %s' % track_name) class SoundcloudPlaylistIE(SoundcloudPlaylistBaseIE): -- cgit 1.4.1 From 0ed4758023ddfb4d9630ba9114ef70ef7e6ac09d Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 30 Jul 2017 19:08:44 +0700 Subject: [clipfish] Remove extractor --- youtube_dl/extractor/clipfish.py | 67 -------------------------------------- youtube_dl/extractor/extractors.py | 1 - 2 files changed, 68 deletions(-) delete mode 100644 youtube_dl/extractor/clipfish.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/clipfish.py b/youtube_dl/extractor/clipfish.py deleted file mode 100644 index 0920f6219..000000000 --- a/youtube_dl/extractor/clipfish.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -from .common import InfoExtractor -from ..utils import ( - int_or_none, - unified_strdate, -) - - -class ClipfishIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?clipfish\.de/(?:[^/]+/)+video/(?P<id>[0-9]+)' - _TEST = { - 'url': 'http://www.clipfish.de/special/ugly-americans/video/4343170/s01-e01-ugly-americans-date-in-der-hoelle/', - 'md5': 'b9a5dc46294154c1193e2d10e0c95693', - 'info_dict': { - 'id': '4343170', - 'ext': 'mp4', - 'title': 'S01 E01 - Ugly Americans - Date in der Hölle', - 'description': 'Mark Lilly arbeitet im Sozialdienst der Stadt New York und soll Immigranten bei ihrer Einbürgerung in die USA zur Seite stehen.', - 'upload_date': '20161005', - 'duration': 1291, - 'view_count': int, - } - } - - def _real_extract(self, url): - video_id = self._match_id(url) - - video_info = self._download_json( - 'http://www.clipfish.de/devapi/id/%s?format=json&apikey=hbbtv' % video_id, - video_id)['items'][0] - - formats = [] - - m3u8_url = video_info.get('media_videourl_hls') - if m3u8_url: - formats.append({ - 'url': m3u8_url.replace('de.hls.fra.clipfish.de', 'hls.fra.clipfish.de'), - 'ext': 'mp4', - 'format_id': 'hls', - }) - - mp4_url = video_info.get('media_videourl') - if mp4_url: - formats.append({ - 'url': mp4_url, - 'format_id': 'mp4', - 'width': int_or_none(video_info.get('width')), - 'height': int_or_none(video_info.get('height')), - 'tbr': int_or_none(video_info.get('bitrate')), - }) - - descr = video_info.get('descr') - if descr: - descr = descr.strip() - - return { - 'id': video_id, - 'title': video_info['title'], - 'description': descr, - 'formats': formats, - 'thumbnail': video_info.get('media_content_thumbnail_large') or video_info.get('media_thumbnail'), - 'duration': int_or_none(video_info.get('media_length')), - 'upload_date': unified_strdate(video_info.get('pubDate')), - 'view_count': int_or_none(video_info.get('media_views')) - } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d2c5e8030..bdc7370cd 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -186,7 +186,6 @@ from .chirbit import ( ) from .cinchcast import CinchcastIE from .cjsw import CJSWIE -from .clipfish import ClipfishIE from .cliphunter import CliphunterIE from .cliprs import ClipRsIE from .clipsyndicate import ClipsyndicateIE -- cgit 1.4.1 From 8b9f50d7cb4cfab5d505f4233c3e176a8106d6db Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 30 Jul 2017 19:09:44 +0700 Subject: [watchbox] Add extractor (#13739) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/watchbox.py | 151 +++++++++++++++++++++++++++++++++++++ 2 files changed, 152 insertions(+) create mode 100644 youtube_dl/extractor/watchbox.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index bdc7370cd..3489e86f0 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1244,6 +1244,7 @@ from .washingtonpost import ( WashingtonPostArticleIE, ) from .wat import WatIE +from .watchbox import WatchBoxIE from .watchindianporn import WatchIndianPornIE from .wdr import ( WDRIE, diff --git a/youtube_dl/extractor/watchbox.py b/youtube_dl/extractor/watchbox.py new file mode 100644 index 000000000..b382338fa --- /dev/null +++ b/youtube_dl/extractor/watchbox.py @@ -0,0 +1,151 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..compat import compat_str +from ..utils import ( + int_or_none, + js_to_json, + strip_or_none, + try_get, + unified_timestamp, +) + + +class WatchBoxIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?watchbox\.de/(?P<kind>serien|filme)/(?:[^/]+/)*[^/]+-(?P<id>\d+)' + _TESTS = [{ + # film + 'url': 'https://www.watchbox.de/filme/free-jimmy-12325.html', + 'info_dict': { + 'id': '341368', + 'ext': 'mp4', + 'title': 'Free Jimmy', + 'description': 'md5:bcd8bafbbf9dc0ef98063d344d7cc5f6', + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 4890, + 'age_limit': 16, + 'release_year': 2009, + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + 'expected_warnings': ['Failed to download m3u8 information'], + }, { + # episode + 'url': 'https://www.watchbox.de/serien/ugly-americans-12231/staffel-1/date-in-der-hoelle-328286.html', + 'info_dict': { + 'id': '328286', + 'ext': 'mp4', + 'title': 'S01 E01 - Date in der Hölle', + 'description': 'md5:2f31c74a8186899f33cb5114491dae2b', + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 1291, + 'age_limit': 12, + 'release_year': 2010, + 'series': 'Ugly Americans', + 'season_number': 1, + 'episode': 'Date in der Hölle', + 'episode_number': 1, + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + 'expected_warnings': ['Failed to download m3u8 information'], + }, { + 'url': 'https://www.watchbox.de/serien/ugly-americans-12231/staffel-2/der-ring-des-powers-328270', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + kind, video_id = mobj.group('kind', 'id') + + webpage = self._download_webpage(url, video_id) + + source = self._parse_json( + self._search_regex( + r'(?s)source\s*:\s*({.+?})\s*,\s*\n', webpage, 'source', + default='{}'), + video_id, transform_source=js_to_json, fatal=False) or {} + + video_id = compat_str(source.get('videoId') or video_id) + + devapi = self._download_json( + 'http://api.watchbox.de/devapi/id/%s' % video_id, video_id, query={ + 'format': 'json', + 'apikey': 'hbbtv', + }, fatal=False) + + item = try_get(devapi, lambda x: x['items'][0], dict) or {} + + title = item.get('title') or try_get( + item, lambda x: x['movie']['headline_movie'], + compat_str) or source['title'] + + formats = [] + hls_url = item.get('media_videourl_hls') or source.get('hls') + if hls_url: + formats.extend(self._extract_m3u8_formats( + hls_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls', fatal=False)) + dash_url = item.get('media_videourl_wv') or source.get('dash') + if dash_url: + formats.extend(self._extract_mpd_formats( + dash_url, video_id, mpd_id='dash', fatal=False)) + mp4_url = item.get('media_videourl') + if mp4_url: + formats.append({ + 'url': mp4_url, + 'format_id': 'mp4', + 'width': int_or_none(item.get('width')), + 'height': int_or_none(item.get('height')), + 'tbr': int_or_none(item.get('bitrate')), + }) + self._sort_formats(formats) + + description = strip_or_none(item.get('descr')) + thumbnail = item.get('media_content_thumbnail_large') or source.get('poster') or item.get('media_thumbnail') + duration = int_or_none(item.get('media_length') or source.get('length')) + timestamp = unified_timestamp(item.get('pubDate')) + view_count = int_or_none(item.get('media_views')) + age_limit = int_or_none(try_get(item, lambda x: x['movie']['fsk'])) + release_year = int_or_none(try_get(item, lambda x: x['movie']['rel_year'])) + + info = { + 'id': video_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'duration': duration, + 'timestamp': timestamp, + 'view_count': view_count, + 'age_limit': age_limit, + 'release_year': release_year, + 'formats': formats, + } + + if kind.lower() == 'serien': + series = try_get( + item, lambda x: x['special']['title'], + compat_str) or source.get('format') + season_number = int_or_none(self._search_regex( + r'^S(\d{1,2})\s*E\d{1,2}', title, 'season number', + default=None) or self._search_regex( + r'/staffel-(\d+)/', url, 'season number', default=None)) + episode = source.get('title') + episode_number = int_or_none(self._search_regex( + r'^S\d{1,2}\s*E(\d{1,2})', title, 'episode number', + default=None)) + info.update({ + 'series': series, + 'season_number': season_number, + 'episode': episode, + 'episode_number': episode_number, + }) + + return info -- cgit 1.4.1 From 799802f368012f579750b26db117b3a9dfdcbe05 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Fri, 4 Aug 2017 23:54:28 +0700 Subject: [teamfour] Remove extractor (closes #13782) Now covered with generic extractor --- youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/teamfourstar.py | 48 ------------------------------------ 2 files changed, 49 deletions(-) delete mode 100644 youtube_dl/extractor/teamfourstar.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 3489e86f0..d0e04dd7d 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -994,7 +994,6 @@ from .teachertube import ( ) from .teachingchannel import TeachingChannelIE from .teamcoco import TeamcocoIE -from .teamfourstar import TeamFourStarIE from .techtalks import TechTalksIE from .ted import TEDIE from .tele13 import Tele13IE diff --git a/youtube_dl/extractor/teamfourstar.py b/youtube_dl/extractor/teamfourstar.py deleted file mode 100644 index a8c6ed7be..000000000 --- a/youtube_dl/extractor/teamfourstar.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 -from __future__ import unicode_literals - -from .common import InfoExtractor -from .jwplatform import JWPlatformIE -from ..utils import unified_strdate - - -class TeamFourStarIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?teamfourstar\.com/(?P<id>[a-z0-9\-]+)' - _TEST = { - 'url': 'http://teamfourstar.com/tfs-abridged-parody-episode-1-2/', - 'info_dict': { - 'id': '0WdZO31W', - 'title': 'TFS Abridged Parody Episode 1', - 'description': 'md5:d60bc389588ebab2ee7ad432bda953ae', - 'ext': 'mp4', - 'timestamp': 1394168400, - 'upload_date': '20080508', - }, - } - - def _real_extract(self, url): - display_id = self._match_id(url) - webpage = self._download_webpage(url, display_id) - - jwplatform_url = JWPlatformIE._extract_url(webpage) - - video_title = self._html_search_regex( - r'<h1[^>]+class="entry-title"[^>]*>(?P<title>.+?)</h1>', - webpage, 'title') - video_date = unified_strdate(self._html_search_regex( - r'<span[^>]+class="meta-date date updated"[^>]*>(?P<date>.+?)</span>', - webpage, 'date', fatal=False)) - video_description = self._html_search_regex( - r'(?s)<div[^>]+class="content-inner"[^>]*>.*?(?P<description><p>.+?)</div>', - webpage, 'description', fatal=False) - video_thumbnail = self._og_search_thumbnail(webpage) - - return { - '_type': 'url_transparent', - 'display_id': display_id, - 'title': video_title, - 'description': video_description, - 'upload_date': video_date, - 'thumbnail': video_thumbnail, - 'url': jwplatform_url, - } -- cgit 1.4.1 From daaaf5f5942252e9fbc367957bd3b8a96d0dd5bb Mon Sep 17 00:00:00 2001 From: Ashutosh Chaudhary <ashutosh.chaudhary@gmail.com> Date: Mon, 23 Jan 2017 05:12:52 +0530 Subject: [voot] Add extractor --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/voot.py | 55 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 56 insertions(+) create mode 100644 youtube_dl/extractor/voot.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d0e04dd7d..48dda8b8e 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1333,3 +1333,4 @@ from .zapiks import ZapiksIE from .zaq1 import Zaq1IE from .zdf import ZDFIE, ZDFChannelIE from .zingmp3 import ZingMp3IE +from .voot import VootIE diff --git a/youtube_dl/extractor/voot.py b/youtube_dl/extractor/voot.py new file mode 100644 index 000000000..db5bda660 --- /dev/null +++ b/youtube_dl/extractor/voot.py @@ -0,0 +1,55 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor + + +class VootIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?voot\.com/shows/(?:.+?[/-]?)/1/(?:.+?[0-9]?)/(?:.+?[/-]?)/(?P<id>[0-9]+)' + _TEST = { + 'url': 'https://www.voot.com/shows/ishq-ka-rang-safed/1/360558/is-this-the-end-of-kamini-/441353', + 'info_dict': { + 'id': '441353', + 'ext': 'mp4', + 'title': 'Ishq Ka Rang Safed - Season 01 - Episode 340', + 'thumbnail': r're:^https?://.*\.jpg$', + } + } + + _GET_CONTENT_TEMPLATE = 'https://wapi.voot.com/ws/ott/getMediaInfo.json?platform=Web&pId=3&mediaId=%s' + + def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True): + json_data = super(VootIE, self)._download_json(url_or_request, video_id, note, fatal=fatal) + if json_data['status']['code'] != 0: + if fatal: + raise ExtractorError(json_data['status']['message']) + return None + return json_data['assets'] + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._download_json( + self._GET_CONTENT_TEMPLATE % video_id, + video_id) + + thumbnail = '' + formats = [] + + if video_data: + format_url = video_data.get('URL') + formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + + if video_data['Pictures']: + for picture in video_data['Pictures']: + #Get only first available thumbnail + thumbnail = picture.get('URL') + break + + self._sort_formats(formats) + + return { + 'id': video_id, + 'title': video_data.get('MediaName'), + 'thumbnail': thumbnail, + 'formats':formats, + } -- cgit 1.4.1 From e2b4808fd8ed49424deaa6d800daf0950e55ffff Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 6 Aug 2017 08:04:51 +0700 Subject: [voot] Improve extraction (#10255, closes #11814) --- youtube_dl/extractor/extractors.py | 2 +- youtube_dl/extractor/voot.py | 111 +++++++++++++++++++++++++------------ 2 files changed, 78 insertions(+), 35 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 48dda8b8e..ebe414dae 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1222,6 +1222,7 @@ from .vodlocker import VodlockerIE from .vodpl import VODPlIE from .vodplatform import VODPlatformIE from .voicerepublic import VoiceRepublicIE +from .voot import VootIE from .voxmedia import VoxMediaIE from .vporn import VpornIE from .vrt import VRTIE @@ -1333,4 +1334,3 @@ from .zapiks import ZapiksIE from .zaq1 import Zaq1IE from .zdf import ZDFIE, ZDFChannelIE from .zingmp3 import ZingMp3IE -from .voot import VootIE diff --git a/youtube_dl/extractor/voot.py b/youtube_dl/extractor/voot.py index db5bda660..5de3deb8c 100644 --- a/youtube_dl/extractor/voot.py +++ b/youtube_dl/extractor/voot.py @@ -2,54 +2,97 @@ from __future__ import unicode_literals from .common import InfoExtractor +from .kaltura import KalturaIE +from ..utils import ( + ExtractorError, + int_or_none, + try_get, + unified_timestamp, +) class VootIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?voot\.com/shows/(?:.+?[/-]?)/1/(?:.+?[0-9]?)/(?:.+?[/-]?)/(?P<id>[0-9]+)' - _TEST = { + _VALID_URL = r'https?://(?:www\.)?voot\.com/(?:[^/]+/)+(?P<id>\d+)' + _GEO_COUNTRIES = ['IN'] + _TESTS = [{ 'url': 'https://www.voot.com/shows/ishq-ka-rang-safed/1/360558/is-this-the-end-of-kamini-/441353', 'info_dict': { - 'id': '441353', + 'id': '0_8ledb18o', 'ext': 'mp4', 'title': 'Ishq Ka Rang Safed - Season 01 - Episode 340', - 'thumbnail': r're:^https?://.*\.jpg$', - } - } - - _GET_CONTENT_TEMPLATE = 'https://wapi.voot.com/ws/ott/getMediaInfo.json?platform=Web&pId=3&mediaId=%s' - - def _download_json(self, url_or_request, video_id, note='Downloading JSON metadata', fatal=True): - json_data = super(VootIE, self)._download_json(url_or_request, video_id, note, fatal=fatal) - if json_data['status']['code'] != 0: - if fatal: - raise ExtractorError(json_data['status']['message']) - return None - return json_data['assets'] + 'description': 'md5:06291fbbbc4dcbe21235c40c262507c1', + 'uploader_id': 'batchUser', + 'timestamp': 1472162937, + 'upload_date': '20160825', + 'duration': 1146, + 'series': 'Ishq Ka Rang Safed', + 'season_number': 1, + 'episode': 'Is this the end of Kamini?', + 'episode_number': 340, + 'view_count': int, + 'like_count': int, + }, + 'params': { + 'skip_download': True, + }, + 'expected_warnings': ['Failed to download m3u8 information'], + }, { + 'url': 'https://www.voot.com/kids/characters/mighty-cat-masked-niyander-e-/400478/school-bag-disappears/440925', + 'only_matching': True, + }, { + 'url': 'https://www.voot.com/movies/pandavas-5/424627', + 'only_matching': True, + }] def _real_extract(self, url): video_id = self._match_id(url) - video_data = self._download_json( - self._GET_CONTENT_TEMPLATE % video_id, - video_id) - thumbnail = '' - formats = [] + media_info = self._download_json( + 'https://wapi.voot.com/ws/ott/getMediaInfo.json', video_id, + query={ + 'platform': 'Web', + 'pId': 2, + 'mediaId': video_id, + }) + + status_code = try_get(media_info, lambda x: x['status']['code'], int) + if status_code != 0: + raise ExtractorError(media_info['status']['message'], expected=True) + + media = media_info['assets'] - if video_data: - format_url = video_data.get('URL') - formats.extend(self._extract_m3u8_formats(format_url, video_id, 'mp4', m3u8_id='hls', fatal=False)) + entry_id = media['EntryId'] + title = media['MediaName'] - if video_data['Pictures']: - for picture in video_data['Pictures']: - #Get only first available thumbnail - thumbnail = picture.get('URL') - break + description, series, season_number, episode, episode_number = [None] * 5 - self._sort_formats(formats) + for meta in try_get(media, lambda x: x['Metas'], list) or []: + key, value = meta.get('Key'), meta.get('Value') + if not key or not value: + continue + if key == 'ContentSynopsis': + description = value + elif key == 'RefSeriesTitle': + series = value + elif key == 'RefSeriesSeason': + season_number = int_or_none(value) + elif key == 'EpisodeMainTitle': + episode = value + elif key == 'EpisodeNo': + episode_number = int_or_none(value) return { - 'id': video_id, - 'title': video_data.get('MediaName'), - 'thumbnail': thumbnail, - 'formats':formats, + '_type': 'url_transparent', + 'url': 'kaltura:1982551:%s' % entry_id, + 'ie_key': KalturaIE.ie_key(), + 'title': title, + 'description': description, + 'series': series, + 'season_number': season_number, + 'episode': episode, + 'episode_number': episode_number, + 'timestamp': unified_timestamp(media.get('CreationDate')), + 'duration': int_or_none(media.get('Duration')), + 'view_count': int_or_none(media.get('ViewCounter')), + 'like_count': int_or_none(media.get('like_counter')), } -- cgit 1.4.1 From 16afce174ea71690844d37776d518ae374b896ea Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 6 Aug 2017 08:18:16 +0700 Subject: [mpora] Remove extractor (closes #13826) --- youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/mpora.py | 62 -------------------------------------- 2 files changed, 63 deletions(-) delete mode 100644 youtube_dl/extractor/mpora.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index ebe414dae..897557f93 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -584,7 +584,6 @@ from .mixcloud import ( ) from .mlb import MLBIE from .mnet import MnetIE -from .mpora import MporaIE from .moevideo import MoeVideoIE from .mofosex import MofosexIE from .mojvideo import MojvideoIE diff --git a/youtube_dl/extractor/mpora.py b/youtube_dl/extractor/mpora.py deleted file mode 100644 index 5a1bee5c8..000000000 --- a/youtube_dl/extractor/mpora.py +++ /dev/null @@ -1,62 +0,0 @@ -from __future__ import unicode_literals - -from .common import InfoExtractor -from ..utils import int_or_none - - -class MporaIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?mpora\.(?:com|de)/videos/(?P<id>[^?#/]+)' - IE_NAME = 'MPORA' - - _TEST = { - 'url': 'http://mpora.de/videos/AAdo8okx4wiz/embed?locale=de', - 'md5': 'a7a228473eedd3be741397cf452932eb', - 'info_dict': { - 'id': 'AAdo8okx4wiz', - 'ext': 'mp4', - 'title': 'Katy Curd - Winter in the Forest', - 'duration': 416, - 'uploader': 'Peter Newman Media', - }, - } - - def _real_extract(self, url): - video_id = self._match_id(url) - webpage = self._download_webpage(url, video_id) - - data_json = self._search_regex( - [r"new FM\.Player\('[^']+',\s*(\{.*?)\).player;", - r"new\s+FM\.Kaltura\.Player\('[^']+'\s*,\s*({.+?})\);"], - webpage, 'json') - data = self._parse_json(data_json, video_id) - - uploader = data['info_overlay'].get('username') - duration = data['video']['duration'] // 1000 - thumbnail = data['video']['encodings']['sd']['poster'] - title = data['info_overlay']['title'] - - formats = [] - for encoding_id, edata in data['video']['encodings'].items(): - for src in edata['sources']: - width_str = self._search_regex( - r'_([0-9]+)\.[a-zA-Z0-9]+$', src['src'], - False, default=None) - vcodec = src['type'].partition('/')[2] - - formats.append({ - 'format_id': encoding_id + '-' + vcodec, - 'url': src['src'], - 'vcodec': vcodec, - 'width': int_or_none(width_str), - }) - - self._sort_formats(formats) - - return { - 'id': video_id, - 'title': title, - 'formats': formats, - 'uploader': uploader, - 'duration': duration, - 'thumbnail': thumbnail, - } -- cgit 1.4.1 From 0c43a481b91c657643eb42f72d293f245a410c52 Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sat, 12 Aug 2017 23:24:46 +0700 Subject: [reddit] Add extractors (closes #13847) --- youtube_dl/extractor/extractors.py | 4 ++ youtube_dl/extractor/reddit.py | 114 +++++++++++++++++++++++++++++++++++++ 2 files changed, 118 insertions(+) create mode 100644 youtube_dl/extractor/reddit.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 897557f93..f1a9f6edf 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -840,6 +840,10 @@ from .rai import ( from .rbmaradio import RBMARadioIE from .rds import RDSIE from .redbulltv import RedBullTVIE +from .reddit import ( + RedditIE, + RedditRIE, +) from .redtube import RedTubeIE from .regiotv import RegioTVIE from .rentv import ( diff --git a/youtube_dl/extractor/reddit.py b/youtube_dl/extractor/reddit.py new file mode 100644 index 000000000..01c85ee01 --- /dev/null +++ b/youtube_dl/extractor/reddit.py @@ -0,0 +1,114 @@ +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + ExtractorError, + int_or_none, + float_or_none, +) + + +class RedditIE(InfoExtractor): + _VALID_URL = r'https?://v\.redd\.it/(?P<id>[^/?#&]+)' + _TEST = { + # from https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/ + 'url': 'https://v.redd.it/zv89llsvexdz', + 'md5': '655d06ace653ea3b87bccfb1b27ec99d', + 'info_dict': { + 'id': 'zv89llsvexdz', + 'ext': 'mp4', + 'title': 'zv89llsvexdz', + }, + 'params': { + 'format': 'bestvideo', + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + + formats = self._extract_m3u8_formats( + 'https://v.redd.it/%s/HLSPlaylist.m3u8' % video_id, video_id, + 'mp4', entry_protocol='m3u8_native', m3u8_id='hls', fatal=False) + + formats.extend(self._extract_mpd_formats( + 'https://v.redd.it/%s/DASHPlaylist.mpd' % video_id, video_id, + mpd_id='dash', fatal=False)) + + return { + 'id': video_id, + 'title': video_id, + 'formats': formats, + } + + +class RedditRIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?reddit\.com/r/[^/]+/comments/(?P<id>[^/]+)' + _TESTS = [{ + 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj/that_small_heart_attack/', + 'info_dict': { + 'id': 'zv89llsvexdz', + 'ext': 'mp4', + 'title': 'That small heart attack.', + 'thumbnail': r're:^https?://.*\.jpg$', + 'timestamp': 1501941939, + 'upload_date': '20170805', + 'uploader': 'Antw87', + 'like_count': int, + 'dislike_count': int, + 'comment_count': int, + 'age_limit': 0, + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + }, { + 'url': 'https://www.reddit.com/r/videos/comments/6rrwyj', + 'only_matching': True, + }, { + # imgur + 'url': 'https://www.reddit.com/r/MadeMeSmile/comments/6t7wi5/wait_for_it/', + 'only_matching': True, + }, { + # streamable + 'url': 'https://www.reddit.com/r/videos/comments/6t7sg9/comedians_hilarious_joke_about_the_guam_flag/', + 'only_matching': True, + }, { + # youtube + 'url': 'https://www.reddit.com/r/videos/comments/6t75wq/southern_man_tries_to_speak_without_an_accent/', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + data = self._download_json( + url + '.json', video_id)[0]['data']['children'][0]['data'] + + video_url = data['url'] + + # Avoid recursing into the same reddit URL + if 'reddit.com/' in video_url and '/%s/' % video_id in video_url: + raise ExtractorError('No media found', expected=True) + + over_18 = data.get('over_18') + if over_18 is True: + age_limit = 18 + elif over_18 is False: + age_limit = 0 + else: + age_limit = None + + return { + '_type': 'url_transparent', + 'url': video_url, + 'title': data.get('title'), + 'thumbnail': data.get('thumbnail'), + 'timestamp': float_or_none(data.get('created_utc')), + 'uploader': data.get('author'), + 'like_count': int_or_none(data.get('ups')), + 'dislike_count': int_or_none(data.get('downs')), + 'comment_count': int_or_none(data.get('num_comments')), + 'age_limit': age_limit, + } -- cgit 1.4.1 From b3c6515365ed415bbf813c0c2e6c12585824b77a Mon Sep 17 00:00:00 2001 From: Sergey M․ <dstftw@gmail.com> Date: Sun, 13 Aug 2017 07:23:29 +0700 Subject: [fourtube] Add support for other sites (closes #6022, closes #7859, closes #13901) --- youtube_dl/extractor/extractors.py | 7 +- youtube_dl/extractor/fourtube.py | 174 +++++++++++++++++++++++++++++-------- 2 files changed, 146 insertions(+), 35 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index f1a9f6edf..fb79a1736 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -350,7 +350,12 @@ from .flipagram import FlipagramIE from .folketinget import FolketingetIE from .footyroom import FootyRoomIE from .formula1 import Formula1IE -from .fourtube import FourTubeIE +from .fourtube import ( + FourTubeIE, + PornTubeIE, + PornerBrosIE, + FuxIE, +) from .fox import FOXIE from .fox9 import FOX9IE from .foxgay import FoxgayIE diff --git a/youtube_dl/extractor/fourtube.py b/youtube_dl/extractor/fourtube.py index e3fd08bcf..ad273a0e7 100644 --- a/youtube_dl/extractor/fourtube.py +++ b/youtube_dl/extractor/fourtube.py @@ -3,39 +3,22 @@ from __future__ import unicode_literals import re from .common import InfoExtractor +from ..compat import compat_urlparse from ..utils import ( parse_duration, parse_iso8601, - sanitized_Request, str_to_int, ) -class FourTubeIE(InfoExtractor): - IE_NAME = '4tube' - _VALID_URL = r'https?://(?:www\.)?4tube\.com/videos/(?P<id>\d+)' +class FourTubeBaseIE(InfoExtractor): + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + kind, video_id, display_id = mobj.group('kind', 'id', 'display_id') - _TEST = { - 'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', - 'md5': '6516c8ac63b03de06bc8eac14362db4f', - 'info_dict': { - 'id': '209733', - 'ext': 'mp4', - 'title': 'Hot Babe Holly Michaels gets her ass stuffed by black', - 'uploader': 'WCP Club', - 'uploader_id': 'wcp-club', - 'upload_date': '20131031', - 'timestamp': 1383263892, - 'duration': 583, - 'view_count': int, - 'like_count': int, - 'categories': list, - 'age_limit': 18, - } - } + if kind == 'm' or not display_id: + url = self._URL_TEMPLATE % video_id - def _real_extract(self, url): - video_id = self._match_id(url) webpage = self._download_webpage(url, video_id) title = self._html_search_meta('name', webpage) @@ -43,10 +26,10 @@ class FourTubeIE(InfoExtractor): 'uploadDate', webpage)) thumbnail = self._html_search_meta('thumbnailUrl', webpage) uploader_id = self._html_search_regex( - r'<a class="item-to-subscribe" href="[^"]+/channels/([^/"]+)" title="Go to [^"]+ page">', + r'<a class="item-to-subscribe" href="[^"]+/(?:channel|user)s?/([^/"]+)" title="Go to [^"]+ page">', webpage, 'uploader id', fatal=False) uploader = self._html_search_regex( - r'<a class="item-to-subscribe" href="[^"]+/channels/[^/"]+" title="Go to ([^"]+) page">', + r'<a class="item-to-subscribe" href="[^"]+/(?:channel|user)s?/[^/"]+" title="Go to ([^"]+) page">', webpage, 'uploader', fatal=False) categories_html = self._search_regex( @@ -60,10 +43,10 @@ class FourTubeIE(InfoExtractor): view_count = str_to_int(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserPlays:([0-9,]+)">', - webpage, 'view count', fatal=False)) + webpage, 'view count', default=None)) like_count = str_to_int(self._search_regex( r'<meta[^>]+itemprop="interactionCount"[^>]+content="UserLikes:([0-9,]+)">', - webpage, 'like count', fatal=False)) + webpage, 'like count', default=None)) duration = parse_duration(self._html_search_meta('duration', webpage)) media_id = self._search_regex( @@ -87,12 +70,12 @@ class FourTubeIE(InfoExtractor): token_url = 'https://tkn.kodicdn.com/{0}/desktop/{1}'.format( media_id, '+'.join(sources)) - headers = { - b'Content-Type': b'application/x-www-form-urlencoded', - b'Origin': b'https://www.4tube.com', - } - token_req = sanitized_Request(token_url, b'{}', headers) - tokens = self._download_json(token_req, video_id) + + parsed_url = compat_urlparse.urlparse(url) + tokens = self._download_json(token_url, video_id, data=b'', headers={ + 'Origin': '%s://%s' % (parsed_url.scheme, parsed_url.hostname), + 'Referer': url, + }) formats = [{ 'url': tokens[format]['token'], 'format_id': format + 'p', @@ -115,3 +98,126 @@ class FourTubeIE(InfoExtractor): 'duration': duration, 'age_limit': 18, } + + +class FourTubeIE(FourTubeBaseIE): + IE_NAME = '4tube' + _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?4tube\.com/(?:videos|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' + _URL_TEMPLATE = 'https://www.4tube.com/videos/%s/video' + _TESTS = [{ + 'url': 'http://www.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', + 'md5': '6516c8ac63b03de06bc8eac14362db4f', + 'info_dict': { + 'id': '209733', + 'ext': 'mp4', + 'title': 'Hot Babe Holly Michaels gets her ass stuffed by black', + 'uploader': 'WCP Club', + 'uploader_id': 'wcp-club', + 'upload_date': '20131031', + 'timestamp': 1383263892, + 'duration': 583, + 'view_count': int, + 'like_count': int, + 'categories': list, + 'age_limit': 18, + }, + }, { + 'url': 'http://www.4tube.com/embed/209733', + 'only_matching': True, + }, { + 'url': 'http://m.4tube.com/videos/209733/hot-babe-holly-michaels-gets-her-ass-stuffed-by-black', + 'only_matching': True, + }] + + +class FuxIE(FourTubeBaseIE): + _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?fux\.com/(?:video|embed)/(?P<id>\d+)(?:/(?P<display_id>[^/?#&]+))?' + _URL_TEMPLATE = 'https://www.fux.com/video/%s/video' + _TESTS = [{ + 'url': 'https://www.fux.com/video/195359/awesome-fucking-kitchen-ends-cum-swallow', + 'info_dict': { + 'id': '195359', + 'ext': 'mp4', + 'title': 'Awesome fucking in the kitchen ends with cum swallow', + 'uploader': 'alenci2342', + 'uploader_id': 'alenci2342', + 'upload_date': '20131230', + 'timestamp': 1388361660, + 'duration': 289, + 'view_count': int, + 'like_count': int, + 'categories': list, + 'age_limit': 18, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'https://www.fux.com/embed/195359', + 'only_matching': True, + }, { + 'url': 'https://www.fux.com/video/195359/awesome-fucking-kitchen-ends-cum-swallow', + 'only_matching': True, + }] + + +class PornTubeIE(FourTubeBaseIE): + _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?porntube\.com/(?:videos/(?P<display_id>[^/]+)_|embed/)(?P<id>\d+)' + _URL_TEMPLATE = 'https://www.porntube.com/videos/video_%s' + _TESTS = [{ + 'url': 'https://www.porntube.com/videos/teen-couple-doing-anal_7089759', + 'info_dict': { + 'id': '7089759', + 'ext': 'mp4', + 'title': 'Teen couple doing anal', + 'uploader': 'Alexy', + 'uploader_id': 'Alexy', + 'upload_date': '20150606', + 'timestamp': 1433595647, + 'duration': 5052, + 'view_count': int, + 'like_count': int, + 'categories': list, + 'age_limit': 18, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'https://www.porntube.com/embed/7089759', + 'only_matching': True, + }, { + 'url': 'https://m.porntube.com/videos/teen-couple-doing-anal_7089759', + 'only_matching': True, + }] + + +class PornerBrosIE(FourTubeBaseIE): + _VALID_URL = r'https?://(?:(?P<kind>www|m)\.)?pornerbros\.com/(?:videos/(?P<display_id>[^/]+)_|embed/)(?P<id>\d+)' + _URL_TEMPLATE = 'https://www.pornerbros.com/videos/video_%s' + _TESTS = [{ + 'url': 'https://www.pornerbros.com/videos/skinny-brunette-takes-big-cock-down-her-anal-hole_181369', + 'md5': '6516c8ac63b03de06bc8eac14362db4f', + 'info_dict': { + 'id': '181369', + 'ext': 'mp4', + 'title': 'Skinny brunette takes big cock down her anal hole', + 'uploader': 'PornerBros HD', + 'uploader_id': 'pornerbros-hd', + 'upload_date': '20130130', + 'timestamp': 1359527401, + 'duration': 1224, + 'view_count': int, + 'categories': list, + 'age_limit': 18, + }, + 'params': { + 'skip_download': True, + }, + }, { + 'url': 'https://www.pornerbros.com/embed/181369', + 'only_matching': True, + }, { + 'url': 'https://m.pornerbros.com/videos/skinny-brunette-takes-big-cock-down-her-anal-hole_181369', + 'only_matching': True, + }] -- cgit 1.4.1 From bfabd17b33d47f1e973121483623768010880845 Mon Sep 17 00:00:00 2001 From: Genki Sky <genki@genki.is> Date: Tue, 8 Aug 2017 22:49:57 -0400 Subject: Add new extractor --- youtube_dl/extractor/clippit.py | 74 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 75 insertions(+) create mode 100644 youtube_dl/extractor/clippit.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/clippit.py b/youtube_dl/extractor/clippit.py new file mode 100644 index 000000000..a1a7a774c --- /dev/null +++ b/youtube_dl/extractor/clippit.py @@ -0,0 +1,74 @@ +# coding: utf-8 + +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + parse_iso8601, + qualities, +) + +import re + + +class ClippitIE(InfoExtractor): + + _VALID_URL = r'https?://(?:www\.)?clippituser\.tv/c/(?P<id>[a-z]+)' + _TEST = { + 'url': 'https://www.clippituser.tv/c/evmgm', + 'md5': '963ae7a59a2ec4572ab8bf2f2d2c5f09', + 'info_dict': { + 'id': 'evmgm', + 'ext': 'mp4', + 'title': 'Bye bye Brutus. #BattleBots - Clippit', + 'uploader': 'lizllove', + 'uploader_url': 'https://www.clippituser.tv/p/lizllove', + 'timestamp': 1472183818, + 'upload_date': '20160826', + 'description': 'BattleBots | ABC', + 'thumbnail': r're:^https?://.*\.jpg$', + } + } + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + title = self._html_search_regex(r'<title.*>(.+?)', webpage, 'title') + + FORMATS = ('sd', 'hd') + quality = qualities(FORMATS) + formats = [] + for format_id in FORMATS: + url = self._html_search_regex(r'data-%s-file="(.+?)"' % format_id, + webpage, 'url', fatal=False) + if not url: + continue + match = re.search(r'/(?P\d+)\.mp4', url) + formats.append({ + 'url': url, + 'format_id': format_id, + 'quality': quality(format_id), + 'height': int(match.group('height')) if match else None, + }) + + uploader = self._html_search_regex(r'class="username".*>\s+(.+?)\n', + webpage, 'uploader', fatal=False) + uploader_url = ('https://www.clippituser.tv/p/' + uploader + if uploader else None) + + timestamp = self._html_search_regex(r'datetime="(.+?)"', + webpage, 'date', fatal=False) + thumbnail = self._html_search_regex(r'data-image="(.+?)"', + webpage, 'thumbnail', fatal=False) + + return { + 'id': video_id, + 'title': title, + 'formats': formats, + 'uploader': uploader, + 'uploader_url': uploader_url, + 'timestamp': parse_iso8601(timestamp), + 'description': self._og_search_description(webpage), + 'thumbnail': thumbnail, + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index fb79a1736..ccfa14e7a 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -187,6 +187,7 @@ from .chirbit import ( from .cinchcast import CinchcastIE from .cjsw import CJSWIE from .cliphunter import CliphunterIE +from .clippit import ClippitIE from .cliprs import ClipRsIE from .clipsyndicate import ClipsyndicateIE from .closertotruth import CloserToTruthIE -- cgit 1.4.1 From f5469da9e6e259c1690c7ef54f1da1c19f65036f Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sat, 19 Aug 2017 19:48:20 +0700 Subject: [laola1tv] Add support for tv.ittf.com (closes #13965) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/laola1tv.py | 18 ++++++++++++++++++ 2 files changed, 19 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index ccfa14e7a..bda6826f1 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -509,6 +509,7 @@ from .la7 import LA7IE from .laola1tv import ( Laola1TvEmbedIE, Laola1TvIE, + ITTFIE, ) from .lci import LCIIE from .lcp import ( diff --git a/youtube_dl/extractor/laola1tv.py b/youtube_dl/extractor/laola1tv.py index 1f91ba017..c7f813370 100644 --- a/youtube_dl/extractor/laola1tv.py +++ b/youtube_dl/extractor/laola1tv.py @@ -215,3 +215,21 @@ class Laola1TvIE(Laola1TvEmbedIE): 'formats': formats, 'is_live': is_live, } + + +class ITTFIE(InfoExtractor): + _VALID_URL = r'https?://tv\.ittf\.com/video/[^/]+/(?P\d+)' + _TEST = { + 'url': 'https://tv.ittf.com/video/peng-wang-wei-matsudaira-kenta/951802', + 'only_matching': True, + } + + def _real_extract(self, url): + return self.url_result( + update_url_query('https://www.laola1.tv/titanplayer.php', { + 'videoid': self._match_id(url), + 'type': 'V', + 'lang': 'en', + 'portal': 'int', + 'customer': 1024, + }), Laola1TvEmbedIE.ie_key()) -- cgit 1.4.1 From 09747ba7663a9c6f89530c7ffbd95cb4776db6bf Mon Sep 17 00:00:00 2001 From: Yen Chi Hsuan Date: Sat, 19 Aug 2017 22:27:53 +0800 Subject: [liveleak] Support another liveleak embedding pattern (closes #13336) --- ChangeLog | 3 +++ youtube_dl/extractor/extractors.py | 5 ++++- youtube_dl/extractor/generic.py | 23 ++++++++++++++++++----- youtube_dl/extractor/liveleak.py | 35 ++++++++++++++++++++++++++++++----- 4 files changed, 55 insertions(+), 11 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/ChangeLog b/ChangeLog index 6c32747c4..4c7997b2e 100644 --- a/ChangeLog +++ b/ChangeLog @@ -4,6 +4,9 @@ Core * [cda] Fix extraction (closes #13935) * [utils] Fix unescapeHTML for misformed string like "&a"" (#13935) +Extractors ++ [liveleak] Support another liveleak embedding pattern (#13336) + version 2017.08.18 diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index bda6826f1..17048fd6e 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -537,7 +537,10 @@ from .limelight import ( LimelightChannelListIE, ) from .litv import LiTVIE -from .liveleak import LiveLeakIE +from .liveleak import ( + LiveLeakIE, + LiveLeakEmbedIE, +) from .livestream import ( LivestreamIE, LivestreamOriginalIE, diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index d2fb2627d..49b00b87e 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -1519,14 +1519,27 @@ class GenericIE(InfoExtractor): # LiveLeak embed { 'url': 'http://www.wykop.pl/link/3088787/', - 'md5': 'ace83b9ed19b21f68e1b50e844fdf95d', + 'md5': '7619da8c820e835bef21a1efa2a0fc71', 'info_dict': { 'id': '874_1459135191', 'ext': 'mp4', 'title': 'Man shows poor quality of new apartment building', 'description': 'The wall is like a sand pile.', 'uploader': 'Lake8737', - } + }, + 'add_ie': [LiveLeakIE.ie_key()], + }, + # Another LiveLeak embed pattern (#13336) + { + 'url': 'https://milo.yiannopoulos.net/2017/06/concealed-carry-robbery/', + 'info_dict': { + 'id': '2eb_1496309988', + 'ext': 'mp4', + 'title': 'Thief robs place where everyone was armed', + 'description': 'md5:694d73ee79e535953cf2488562288eee', + 'uploader': 'brazilwtf', + }, + 'add_ie': [LiveLeakIE.ie_key()], }, # Duplicated embedded video URLs { @@ -2757,9 +2770,9 @@ class GenericIE(InfoExtractor): self._proto_relative_url(instagram_embed_url), InstagramIE.ie_key()) # Look for LiveLeak embeds - liveleak_url = LiveLeakIE._extract_url(webpage) - if liveleak_url: - return self.url_result(liveleak_url, 'LiveLeak') + liveleak_urls = LiveLeakIE._extract_urls(webpage) + if liveleak_urls: + return self.playlist_from_matches(liveleak_urls, video_id, video_title) # Look for 3Q SDN embeds threeqsdn_url = ThreeQSDNIE._extract_url(webpage) diff --git a/youtube_dl/extractor/liveleak.py b/youtube_dl/extractor/liveleak.py index b2247a84d..d23eaa355 100644 --- a/youtube_dl/extractor/liveleak.py +++ b/youtube_dl/extractor/liveleak.py @@ -75,12 +75,10 @@ class LiveLeakIE(InfoExtractor): }] @staticmethod - def _extract_url(webpage): - mobj = re.search( - r']+src="https?://(?:\w+\.)?liveleak\.com/ll_embed\?(?:.*?)i=(?P[\w_]+)(?:.*)', + def _extract_urls(webpage): + return re.findall( + r']+src="(https?://(?:\w+\.)?liveleak\.com/ll_embed\?[^"]*[if]=[\w_]+[^"]+)"', webpage) - if mobj: - return 'http://www.liveleak.com/view?i=%s' % mobj.group('id') def _real_extract(self, url): video_id = self._match_id(url) @@ -131,3 +129,30 @@ class LiveLeakIE(InfoExtractor): }) return info_dict + + +class LiveLeakEmbedIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?liveleak\.com/ll_embed\?.*?\b(?P[if])=(?P[\w_]+)' + + # See generic.py for actual test cases + _TESTS = [{ + 'url': 'https://www.liveleak.com/ll_embed?i=874_1459135191', + 'only_matching': True, + }, { + 'url': 'https://www.liveleak.com/ll_embed?f=ab065df993c1', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + kind, video_id = mobj.group('kind', 'id') + + if kind == 'f': + webpage = self._download_webpage(url, video_id) + liveleak_url = self._search_regex( + r'logourl\s*:\s*(?P[\'"])(?P%s)(?P=q1)' % LiveLeakIE._VALID_URL, + webpage, 'LiveLeak URL', group='url') + elif kind == 'i': + liveleak_url = 'http://www.liveleak.com/view?i=%s' % video_id + + return self.url_result(liveleak_url, ie=LiveLeakIE.ie_key()) -- cgit 1.4.1 From 503115540d8f135dc944ae48e40ba78f36238867 Mon Sep 17 00:00:00 2001 From: dubber0 Date: Sat, 22 Jul 2017 21:32:51 +0200 Subject: [aliexpress:live] Add extractor --- youtube_dl/extractor/aliexpress.py | 40 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 41 insertions(+) create mode 100644 youtube_dl/extractor/aliexpress.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/aliexpress.py b/youtube_dl/extractor/aliexpress.py new file mode 100644 index 000000000..3997213f8 --- /dev/null +++ b/youtube_dl/extractor/aliexpress.py @@ -0,0 +1,40 @@ +# coding: utf-8 +from __future__ import unicode_literals + + +import re + +from .common import InfoExtractor +from ..utils import try_get, float_or_none +from ..compat import compat_str + + +class AliExpressLiveIE(InfoExtractor): + + _VALID_URL = r'https?://live\.aliexpress\.com/live/(?P[0-9]{16})' + _TEST = { + 'url': 'https://live.aliexpress.com/live/2800002704436634', + 'md5': '7ac2bc46afdd18f0b45a0a340fc47ffe', + 'info_dict': { + 'id': '2800002704436634', + 'ext': 'm3u8', + 'title': 'CASIMA7.22', + 'uploader': 'CASIMA Official Store', + 'upload_date': '20170714', + 'timestamp': 1500027138, + }, + } + + def _real_extract(self, url): + video_id = self._match_id(url) + page = self._download_webpage(url, video_id) + run_params_json = self._search_regex(r'runParams = (.+)[\s+]var myCtl', page, 'runParams', flags=re.DOTALL) + run_params = self._parse_json(run_params_json, video_id) + + return { + 'id': video_id, + 'title': run_params['title'], + 'url': run_params['replyStreamUrl'], + 'uploader': try_get(run_params, lambda x: x['followBar']['name'], compat_str), + 'timestamp': float_or_none(try_get(run_params, lambda x: x['followBar']['createTime']) / 1000), + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 17048fd6e..d335f9fff 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -45,6 +45,7 @@ from .anvato import AnvatoIE from .anysex import AnySexIE from .aol import AolIE from .allocine import AllocineIE +from .aliexpress import AliExpressLiveIE from .aparat import AparatIE from .appleconnect import AppleConnectIE from .appletrailers import ( -- cgit 1.4.1 From e9b865267aaa90e3b9e1b0468d20a4df31e13393 Mon Sep 17 00:00:00 2001 From: John D Date: Wed, 30 Aug 2017 00:14:43 -0700 Subject: [manyvids] Add support for preview videos (closes #14053) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/manyvids.py | 36 ++++++++++++++++++++++++++++++++++++ 2 files changed, 37 insertions(+) create mode 100644 youtube_dl/extractor/manyvids.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d335f9fff..46a11f3ef 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -564,6 +564,7 @@ from .mangomolo import ( MangomoloVideoIE, MangomoloLiveIE, ) +from .manyvids import ManyVidsIE from .matchtv import MatchTVIE from .mdr import MDRIE from .mediaset import MediasetIE diff --git a/youtube_dl/extractor/manyvids.py b/youtube_dl/extractor/manyvids.py new file mode 100644 index 000000000..ea739ce3f --- /dev/null +++ b/youtube_dl/extractor/manyvids.py @@ -0,0 +1,36 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..compat import compat_urllib_parse_unquote + + +class ManyVidsIE(InfoExtractor): + _VALID_URL = r'https?://www.manyvids\.com/Video/(?P[0-9]+)' + _TEST = { + 'url': 'https://www.manyvids.com/Video/133957/everthing-about-me/', + 'md5': '03f11bb21c52dd12a05be21a5c7dcc97', + 'info_dict': { + 'id': '133957', + 'ext': 'mp4', + 'title': 'everthing about me', + + } + } + + def _real_extract(self, url): + formats = [] + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + video_url = compat_urllib_parse_unquote(self._search_regex( + r'data-video-filepath=\"(.+?)\"', webpage, 'video URL', default='')) + + title = self._html_search_regex(r']+class="m-a-0"[^>]*>([^<]+)', webpage, 'title') + formats.append({ + 'url': video_url + }) + return { + 'id': video_id, + 'title': title, + 'formats': formats, + } -- cgit 1.4.1 From debed8d759e74507371758d2344ce5afe5e237c2 Mon Sep 17 00:00:00 2001 From: luceatnobis Date: Tue, 4 Jul 2017 11:26:02 +0200 Subject: [rutube:playlist] Add extractor (closes #13534) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/rutube.py | 84 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 85 insertions(+) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 46a11f3ef..aefadc56f 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -899,6 +899,7 @@ from .rutube import ( RutubeEmbedIE, RutubeMovieIE, RutubePersonIE, + RutubePlaylistIE, ) from .rutv import RUTVIE from .ruutu import RuutuIE diff --git a/youtube_dl/extractor/rutube.py b/youtube_dl/extractor/rutube.py index 889fa7628..a6b17c0ef 100644 --- a/youtube_dl/extractor/rutube.py +++ b/youtube_dl/extractor/rutube.py @@ -7,10 +7,14 @@ import itertools from .common import InfoExtractor from ..compat import ( compat_str, + compat_parse_qs, + compat_urllib_parse_urlparse, ) from ..utils import ( determine_ext, unified_strdate, + try_get, + int_or_none, ) @@ -42,8 +46,24 @@ class RutubeIE(InfoExtractor): }, { 'url': 'http://rutube.ru/embed/a10e53b86e8f349080f718582ce4c661', 'only_matching': True, + }, { + 'url': 'http://rutube.ru/video/3eac3b4561676c17df9132a9a1e62e3e/?pl_id=4252', + 'only_matching': True, }] + @classmethod + def suitable(cls, url): + parts = compat_urllib_parse_urlparse(url) + params = compat_parse_qs(parts.query) + + # see if URL without parameters is OK + res = super(RutubeIE, cls).suitable(url) + + if params: # we only allow pl_id parameter in the url + res = res and 'pl_id' in params and len(params) == 1 + + return res + @staticmethod def _extract_urls(webpage): return [mobj.group('url') for mobj in re.finditer( @@ -193,3 +213,67 @@ class RutubePersonIE(RutubeChannelIE): }] _PAGE_TEMPLATE = 'http://rutube.ru/api/video/person/%s/?page=%s&format=json' + + +class RutubePlaylistIE(InfoExtractor): + IE_NAME = 'rutube:playlist' + IE_DESC = 'Rutube playlists' + _TESTS = [{ + 'url': 'https://rutube.ru/video/10b3a03fc01d5bbcc632a2f3514e8aab/?pl_id=4252&pl_type=source', + 'info_dict': { + 'id': '4252', + }, + 'playlist_count': 25, + }] + + _VALID_URL = r'https?://rutube\.ru/(?:video|(?:play/)?embed)/[\da-z]{32}/\?(?:.+)?pl_id=(?P\d+)' + _PAGE_TEMPLATE = 'http://rutube.ru/api/playlist/source/%s/?page=%s' + + @staticmethod + def suitable(url): + params = compat_parse_qs(compat_urllib_parse_urlparse(url).query) + return params.get('pl_id') and int_or_none(params['pl_id'][0]) \ + and params.get('pl_type') + + def _real_extract(self, url): + playlist_id = self._match_id(url) + return self._extract_playlist(playlist_id) + + def _extract_playlist(self, playlist_id): + entries = [] + for pagenum in itertools.count(1): + page_url = self._PAGE_TEMPLATE % (playlist_id, pagenum) + + # download_json will sent an accept: application/xml header + page = self._download_json(page_url, playlist_id, + "Downloading metadata for page %s" % pagenum, + headers={'Accept': 'application/json'}) + + if not page['results']: + break + + results = page['results'] + for result in results: + entry = self.url_result(result.get('video_url'), 'Rutube') + category = try_get(result, lambda x: x['category']['name']) + entry.update({ + 'id': result.get('id'), + 'uploader': try_get(result, lambda x: x['author']['name']), + 'uploader_id': try_get(result, lambda x: x['author']['id']), + 'upload_date': unified_strdate(result.get('created_ts')), + 'title': result.get('title'), + 'description': result.get('description'), + 'thumbnail': result.get('thumbnail_url'), + 'duration': int_or_none(result.get('duration')), + 'category': [category] if category else None, + 'age_limit': 18 if result.get('is_adult') else 0, + 'view_count': int_or_none(result.get('hits')), + 'is_live': result.get('is_livestream'), + 'webpage_url': result.get('video_url'), + }) + entries.append(entry) + + if page['has_next'] is False: + break + + return self.playlist_result(entries, playlist_id, page['name']) -- cgit 1.4.1 From 0732a90579091ad60b124fd693bdda8ee526e305 Mon Sep 17 00:00:00 2001 From: Philipp Hagemeister Date: Thu, 14 Sep 2017 20:37:46 +0200 Subject: [orf] Add new extractor for f4m stories --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/orf.py | 114 +++++++++++++++++++++++++++++++++++-- 2 files changed, 111 insertions(+), 4 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index aefadc56f..a3a97e940 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -768,6 +768,7 @@ from .ora import OraTVIE from .orf import ( ORFTVthekIE, ORFFM4IE, + ORFFM4StoryIE, ORFOE1IE, ORFIPTVIE, ) diff --git a/youtube_dl/extractor/orf.py b/youtube_dl/extractor/orf.py index cc296eabd..74fe8017e 100644 --- a/youtube_dl/extractor/orf.py +++ b/youtube_dl/extractor/orf.py @@ -6,14 +6,15 @@ import re from .common import InfoExtractor from ..compat import compat_str from ..utils import ( + determine_ext, + float_or_none, HEADRequest, - unified_strdate, - strip_jsonp, int_or_none, - float_or_none, - determine_ext, + orderedSet, remove_end, + strip_jsonp, unescapeHTML, + unified_strdate, ) @@ -307,3 +308,108 @@ class ORFIPTVIE(InfoExtractor): 'upload_date': upload_date, 'formats': formats, } + + +class ORFFM4StoryIE(InfoExtractor): + IE_NAME = 'orf:fm4:story' + IE_DESC = 'fm4.orf.at stories' + _VALID_URL = r'https?://fm4\.orf\.at/stories/(?P\d+)' + + _TEST = { + 'url': 'http://fm4.orf.at/stories/2865738/', + 'playlist': [{ + 'md5': 'e1c2c706c45c7b34cf478bbf409907ca', + 'info_dict': { + 'id': '547792', + 'ext': 'flv', + 'title': 'Manu Delago und Inner Tongue live', + 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', + 'duration': 1748.52, + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20170913', + }, + }, { + 'md5': 'c6dd2179731f86f4f55a7b49899d515f', + 'info_dict': { + 'id': '547798', + 'ext': 'flv', + 'title': 'Manu Delago und Inner Tongue live (2)', + 'duration': 1504.08, + 'thumbnail': r're:^https?://.*\.jpg$', + 'upload_date': '20170913', + 'description': 'Manu Delago und Inner Tongue haben bei der FM4 Soundpark Session live alles gegeben. Hier gibt es Fotos und die gesamte Session als Video.', + }, + }], + } + + def _real_extract(self, url): + story_id = self._match_id(url) + webpage = self._download_webpage(url, story_id) + + entries = [] + all_ids = orderedSet(re.findall(r'data-video(?:id)?="(\d+)"', webpage)) + for idx, video_id in enumerate(all_ids): + data = self._download_json( + 'http://bits.orf.at/filehandler/static-api/json/current/data.json?file=%s' % video_id, + video_id)[0] + + duration = float_or_none(data['duration'], 1000) + + video = data['sources']['q8c'] + load_balancer_url = video['loadBalancerUrl'] + abr = int_or_none(video.get('audioBitrate')) + vbr = int_or_none(video.get('bitrate')) + fps = int_or_none(video.get('videoFps')) + width = int_or_none(video.get('videoWidth')) + height = int_or_none(video.get('videoHeight')) + thumbnail = video.get('preview') + + rendition = self._download_json( + load_balancer_url, video_id, transform_source=strip_jsonp) + + f = { + 'abr': abr, + 'vbr': vbr, + 'fps': fps, + 'width': width, + 'height': height, + } + + formats = [] + for format_id, format_url in rendition['redirect'].items(): + if format_id == 'rtmp': + ff = f.copy() + ff.update({ + 'url': format_url, + 'format_id': format_id, + }) + formats.append(ff) + elif determine_ext(format_url) == 'f4m': + formats.extend(self._extract_f4m_formats( + format_url, video_id, f4m_id=format_id)) + elif determine_ext(format_url) == 'm3u8': + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', m3u8_id=format_id)) + else: + continue + self._sort_formats(formats) + + title = remove_end(self._og_search_title(webpage), ' - fm4.ORF.at') + if idx >= 1: + # Titles are duplicates, make them unique + title += ' (' + str(idx + 1) + ')' + description = self._og_search_description(webpage) + upload_date = unified_strdate(self._html_search_meta( + 'dc.date', webpage, 'upload date')) + + entries.append({ + 'id': video_id, + 'title': title, + 'description': description, + 'duration': duration, + 'thumbnail': thumbnail, + 'upload_date': upload_date, + 'formats': formats, + }) + + return self.playlist_result(entries) -- cgit 1.4.1 From 9c2a17f2ce7b2b9dc45b603be413a943f6637498 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Sun, 17 Sep 2017 22:19:57 +0700 Subject: [popcorntv] Add extractor (closes #5914, closes #14211) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/popcorntv.py | 78 ++++++++++++++++++++++++++++++++++++++ 2 files changed, 79 insertions(+) create mode 100644 youtube_dl/extractor/popcorntv.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index a3a97e940..ab95c8575 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -808,6 +808,7 @@ from .polskieradio import ( PolskieRadioIE, PolskieRadioCategoryIE, ) +from .popcorntv import PopcornTVIE from .porn91 import Porn91IE from .porncom import PornComIE from .pornflip import PornFlipIE diff --git a/youtube_dl/extractor/popcorntv.py b/youtube_dl/extractor/popcorntv.py new file mode 100644 index 000000000..ac901f426 --- /dev/null +++ b/youtube_dl/extractor/popcorntv.py @@ -0,0 +1,78 @@ +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor +from ..utils import ( + extract_attributes, + int_or_none, + unified_timestamp, +) + + +class PopcornTVIE(InfoExtractor): + _VALID_URL = r'https?://[^/]+\.popcorntv\.it/guarda/(?P[^/]+)/(?P\d+)' + _TESTS = [{ + 'url': 'https://animemanga.popcorntv.it/guarda/food-wars-battaglie-culinarie-episodio-01/9183', + 'md5': '47d65a48d147caf692ab8562fe630b45', + 'info_dict': { + 'id': '9183', + 'display_id': 'food-wars-battaglie-culinarie-episodio-01', + 'ext': 'mp4', + 'title': 'Food Wars, Battaglie Culinarie | Episodio 01', + 'description': 'md5:b8bea378faae4651d3b34c6e112463d0', + 'thumbnail': r're:^https?://.*\.jpg$', + 'timestamp': 1497610857, + 'upload_date': '20170616', + 'duration': 1440, + 'view_count': int, + }, + }, { + 'url': 'https://cinema.popcorntv.it/guarda/smash-cut/10433', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + display_id, video_id = mobj.group('display_id', 'id') + + webpage = self._download_webpage(url, display_id) + + m3u8_url = extract_attributes( + self._search_regex( + r'(]+itemprop=["\'](?:content|embed)Url[^>]*>)', + webpage, 'content' + ))['href'] + + formats = self._extract_m3u8_formats( + m3u8_url, display_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id='hls') + + title = self._search_regex( + r']+itemprop=["\']name[^>]*>([^<]+)', webpage, + 'title', default=None) or self._og_search_title(webpage) + + description = self._html_search_regex( + r'(?s)]+itemprop=["\']description[^>]*>(.+?)', + webpage, 'description', fatal=False) + thumbnail = self._og_search_thumbnail(webpage) + timestamp = unified_timestamp(self._html_search_meta( + 'uploadDate', webpage, 'timestamp')) + print(self._html_search_meta( + 'duration', webpage)) + duration = int_or_none(self._html_search_meta( + 'duration', webpage), invscale=60) + view_count = int_or_none(self._html_search_meta( + 'interactionCount', webpage, 'view count')) + + return { + 'id': video_id, + 'display_id': display_id, + 'title': title, + 'description': description, + 'thumbnail': thumbnail, + 'timestamp': timestamp, + 'duration': duration, + 'view_count': view_count, + 'formats': formats, + } -- cgit 1.4.1 From 13de91c9e92bd831fee38fddbdabce7f6e82ef91 Mon Sep 17 00:00:00 2001 From: Dan Weber Date: Tue, 12 Sep 2017 22:52:54 -0400 Subject: [americastestkitchen] Add extractor (closes #10764) --- youtube_dl/extractor/americastestkitchen.py | 85 +++++++++++++++++++++++++++++ youtube_dl/extractor/extractors.py | 1 + 2 files changed, 86 insertions(+) create mode 100755 youtube_dl/extractor/americastestkitchen.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/americastestkitchen.py b/youtube_dl/extractor/americastestkitchen.py new file mode 100755 index 000000000..f231e7f6e --- /dev/null +++ b/youtube_dl/extractor/americastestkitchen.py @@ -0,0 +1,85 @@ +# coding: utf-8 +from __future__ import unicode_literals + +import re + +from .common import InfoExtractor + + +class AmericasTestKitchenIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?americastestkitchen\.com/episode/(?P\d+)' + _TESTS = [{ + 'url': + 'https://www.americastestkitchen.com/episode/548-summer-dinner-party', + 'md5': 'b861c3e365ac38ad319cfd509c30577f', + 'info_dict': { + 'id': '1_5g5zua6e', + 'title': 'atk_s17_e24.mp4', + 'ext': 'mp4', + 'description': '

Host Julia Collin Davison goes into the test kitchen with test cook Dan Souza to learn how to make the ultimate Grill-Roasted Beef Tenderloin. Next, equipment expert Adam Ried reviews gas grills in the Equipment Corner. Then, gadget guru Lisa McManus uncovers the best quirky gadgets. Finally, test cook Erin McMurrer shows host Bridget Lancaster how to make an elegant Pear-Walnut Upside-Down Cake.

', + 'timestamp': 1497285541, + 'upload_date': '20170612', + 'uploader_id': 'roger.metcalf@americastestkitchen.com', + 'release_date': '2017-06-17', + 'thumbnail': 'http://d3cizcpymoenau.cloudfront.net/images/35973/e24-tenderloin-16.jpg', + 'episode_number': 24, + 'episode': 'Summer Dinner Party', + 'episode_id': '548-summer-dinner-party', + 'season_number': 17 + }, + 'params': { + # m3u8 download + 'skip_download': True, + }, + }, { + 'url': + 'https://www.americastestkitchen.com/episode/546-a-spanish-affair', + 'only_matching': + True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + + partner_id = self._search_regex( + r'partner_id/(?P\d+)', + webpage, + 'partner_id', + group='partner_id') + + video_data = self._parse_json( + self._search_regex( + r'window\.__INITIAL_STATE__\s*=\s*({.+?});\s*', + webpage, 'initial context'), + video_id) + + episode_data = video_data['episodeDetail']['content']['data'] + episode_content_meta = episode_data['full_video'] + external_id = episode_content_meta['external_id'] + + # photo data + photo_data = episode_content_meta.get('photo') + thumbnail = photo_data.get('image_url') if photo_data else None + + # meta + release_date = episode_data.get('aired_at') + description = episode_content_meta.get('description') + episode_number = int(episode_content_meta.get('episode_number')) + episode = episode_content_meta.get('title') + episode_id = episode_content_meta.get('episode_slug') + season_number = int(episode_content_meta.get('season_number')) + + return { + '_type': 'url_transparent', + 'url': 'kaltura:%s:%s' % (partner_id, external_id), + 'ie_key': 'Kaltura', + 'id': video_id, + 'release_date': release_date, + 'thumbnail': thumbnail, + 'description': description, + 'episode_number': episode_number, + 'episode': episode, + 'episode_id': episode_id, + 'season_number': season_number + } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index ab95c8575..585300500 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -39,6 +39,7 @@ from .airmozilla import AirMozillaIE from .aljazeera import AlJazeeraIE from .alphaporno import AlphaPornoIE from .amcnetworks import AMCNetworksIE +from .americastestkitchen import AmericasTestKitchenIE from .animeondemand import AnimeOnDemandIE from .anitube import AnitubeIE from .anvato import AnvatoIE -- cgit 1.4.1 From 1c22d7a7f30917abfd2b7495f7bd02d51cb8528a Mon Sep 17 00:00:00 2001 From: Namnamseo <0201ssw+github@gmail.com> Date: Thu, 24 Aug 2017 11:32:24 +0900 Subject: [kakao] Add extractor (closes #12298) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/kakao.py | 140 +++++++++++++++++++++++++++++++++++++ 2 files changed, 141 insertions(+) create mode 100644 youtube_dl/extractor/kakao.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 585300500..4232a4fef 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -483,6 +483,7 @@ from .jove import JoveIE from .joj import JojIE from .jwplatform import JWPlatformIE from .jpopsukitv import JpopsukiIE +from .kakao import KakaoIE from .kaltura import KalturaIE from .kamcord import KamcordIE from .kanalplay import KanalPlayIE diff --git a/youtube_dl/extractor/kakao.py b/youtube_dl/extractor/kakao.py new file mode 100644 index 000000000..0caa41e9e --- /dev/null +++ b/youtube_dl/extractor/kakao.py @@ -0,0 +1,140 @@ +# coding: utf-8 + +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ( + int_or_none, + compat_str, + unified_timestamp, +) + + +class KakaoIE(InfoExtractor): + _VALID_URL = r'https?://tv.kakao.com/channel/(?P\d+)/cliplink/(?P\d+)' + IE_NAME = 'kakao.com' + + _TESTS = [{ + 'url': 'http://tv.kakao.com/channel/2671005/cliplink/301965083', + 'md5': '702b2fbdeb51ad82f5c904e8c0766340', + 'info_dict': { + 'id': '301965083', + 'ext': 'mp4', + 'title': '乃木坂46 バナナマン 「3期生紹介コーナーが始動!顔高低差GPも!」 『乃木坂工事中』', + 'uploader_id': 2671005, + 'uploader': '그랑그랑이', + 'timestamp': 1488160199, + 'upload_date': '20170227', + } + }, { + 'url': 'http://tv.kakao.com/channel/2653210/cliplink/300103180', + 'md5': 'a8917742069a4dd442516b86e7d66529', + 'info_dict': { + 'id': '300103180', + 'ext': 'mp4', + 'description': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)\r\n\r\n[쇼! 음악중심] 20160611, 507회', + 'title': '러블리즈 - Destiny (나의 지구) (Lovelyz - Destiny)', + 'uploader_id': 2653210, + 'uploader': '쇼 음악중심', + 'timestamp': 1485684628, + 'upload_date': '20170129', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + player_url = 'http://tv.kakao.com/embed/player/cliplink/' + video_id + \ + '?service=kakao_tv&autoplay=1&profile=HIGH&wmode=transparent' + player_header = {'Referer': player_url} + + impress = self._download_json( + 'http://tv.kakao.com/api/v1/ft/cliplinks/%s/impress' % video_id, + video_id, 'Downloading video info', + query={ + 'player': 'monet_html5', + 'referer': url, + 'uuid': '', + 'service': 'kakao_tv', + 'section': '', + 'dteType': 'PC', + 'fields': 'clipLink,clip,channel,hasPlusFriend,-service,-tagList' + }, headers=player_header) + + clipLink = impress['clipLink'] + clip = clipLink['clip'] + + video_info = { + 'id': video_id, + 'title': clip['title'], + 'description': clip.get('description'), + 'uploader': clipLink.get('channel', {}).get('name'), + 'uploader_id': clipLink.get('channelId'), + 'duration': int_or_none(clip.get('duration')), + 'view_count': int_or_none(clip.get('playCount')), + 'like_count': int_or_none(clip.get('likeCount')), + 'comment_count': int_or_none(clip.get('commentCount')), + } + + tid = impress.get('tid', '') + raw = self._download_json( + 'http://tv.kakao.com/api/v1/ft/cliplinks/%s/raw' % video_id, + video_id, 'Downloading video formats info', + query={ + 'player': 'monet_html5', + 'referer': url, + 'uuid': '', + 'service': 'kakao_tv', + 'section': '', + 'tid': tid, + 'profile': 'HIGH', + 'dteType': 'PC', + }, headers=player_header, fatal=False) + + formats = [] + for fmt in raw.get('outputList', []): + try: + profile_name = fmt['profile'] + fmt_url_json = self._download_json( + 'http://tv.kakao.com/api/v1/ft/cliplinks/%s/raw/videolocation' % video_id, + video_id, 'Downloading video URL for profile %s' % profile_name, + query={ + 'service': 'kakao_tv', + 'section': '', + 'tid': tid, + 'profile': profile_name + }, headers=player_header, fatal=False) + + if fmt_url_json is None: + continue + + fmt_url = fmt_url_json['url'] + formats.append({ + 'url': fmt_url, + 'format_id': profile_name, + 'width': int_or_none(fmt.get('width')), + 'height': int_or_none(fmt.get('height')), + 'format_note': fmt.get('label'), + 'filesize': int_or_none(fmt.get('filesize')) + }) + except KeyError: + pass + + self._sort_formats(formats) + video_info['formats'] = formats + + top_thumbnail = clip.get('thumbnailUrl') + thumbs = [] + for thumb in clip.get('clipChapterThumbnailList', []): + thumbs.append({ + 'url': thumb.get('thumbnailUrl'), + 'id': compat_str(thumb.get('timeInSec')), + 'preference': -1 if thumb.get('isDefault') else 0 + }) + video_info['thumbnail'] = top_thumbnail + video_info['thumbnails'] = thumbs + + upload_date = unified_timestamp(clipLink.get('createTime')) + video_info['timestamp'] = upload_date + + return video_info -- cgit 1.4.1 From 117589dfa2ac32566dc705ee7e712139105e6dd8 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Mon, 2 Oct 2017 04:14:36 +0700 Subject: [canvas] Generalize mediazone.vrt.be extractor and rework canvas and een --- youtube_dl/extractor/canvas.py | 142 +++++++++++++++++++++++++------------ youtube_dl/extractor/extractors.py | 5 +- 2 files changed, 99 insertions(+), 48 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/canvas.py b/youtube_dl/extractor/canvas.py index aada02917..6899f8431 100644 --- a/youtube_dl/extractor/canvas.py +++ b/youtube_dl/extractor/canvas.py @@ -3,24 +3,104 @@ from __future__ import unicode_literals import re from .common import InfoExtractor -from ..utils import float_or_none +from ..utils import ( + float_or_none, + strip_or_none, +) class CanvasIE(InfoExtractor): + _VALID_URL = r'https?://mediazone\.vrt\.be/api/v1/(?Pcanvas|een|ketnet)/assets/(?Pm[dz]-ast-[^/?#&]+)' + _TESTS = [{ + 'url': 'https://mediazone.vrt.be/api/v1/ketnet/assets/md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', + 'md5': '90139b746a0a9bd7bb631283f6e2a64e', + 'info_dict': { + 'id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', + 'display_id': 'md-ast-4ac54990-ce66-4d00-a8ca-9eac86f4c475', + 'ext': 'flv', + 'title': 'Nachtwacht: De Greystook', + 'description': 'md5:1db3f5dc4c7109c821261e7512975be7', + 'thumbnail': r're:^https?://.*\.jpg$', + 'duration': 1468.03, + }, + 'expected_warnings': ['is not a supported codec', 'Unknown MIME type'], + }, { + 'url': 'https://mediazone.vrt.be/api/v1/canvas/assets/mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e', + 'only_matching': True, + }] + + def _real_extract(self, url): + mobj = re.match(self._VALID_URL, url) + site_id, video_id = mobj.group('site_id'), mobj.group('id') + + data = self._download_json( + 'https://mediazone.vrt.be/api/v1/%s/assets/%s' + % (site_id, video_id), video_id) + + title = data['title'] + description = data.get('description') + + formats = [] + for target in data['targetUrls']: + format_url, format_type = target.get('url'), target.get('type') + if not format_url or not format_type: + continue + if format_type == 'HLS': + formats.extend(self._extract_m3u8_formats( + format_url, video_id, 'mp4', entry_protocol='m3u8_native', + m3u8_id=format_type, fatal=False)) + elif format_type == 'HDS': + formats.extend(self._extract_f4m_formats( + format_url, video_id, f4m_id=format_type, fatal=False)) + elif format_type == 'MPEG_DASH': + formats.extend(self._extract_mpd_formats( + format_url, video_id, mpd_id=format_type, fatal=False)) + elif format_type == 'HSS': + formats.extend(self._extract_ism_formats( + format_url, video_id, ism_id='mss', fatal=False)) + else: + formats.append({ + 'format_id': format_type, + 'url': format_url, + }) + self._sort_formats(formats) + + subtitles = {} + subtitle_urls = data.get('subtitleUrls') + if isinstance(subtitle_urls, list): + for subtitle in subtitle_urls: + subtitle_url = subtitle.get('url') + if subtitle_url and subtitle.get('type') == 'CLOSED': + subtitles.setdefault('nl', []).append({'url': subtitle_url}) + + return { + 'id': video_id, + 'display_id': video_id, + 'title': title, + 'description': description, + 'formats': formats, + 'duration': float_or_none(data.get('duration'), 1000), + 'thumbnail': data.get('posterImageUrl'), + 'subtitles': subtitles, + } + + +class CanvasEenIE(InfoExtractor): IE_DESC = 'canvas.be and een.be' _VALID_URL = r'https?://(?:www\.)?(?Pcanvas|een)\.be/(?:[^/]+/)*(?P[^/?#&]+)' _TESTS = [{ 'url': 'http://www.canvas.be/video/de-afspraak/najaar-2015/de-afspraak-veilt-voor-de-warmste-week', - 'md5': 'ea838375a547ac787d4064d8c7860a6c', + 'md5': 'ed66976748d12350b118455979cca293', 'info_dict': { 'id': 'mz-ast-5e5f90b6-2d72-4c40-82c2-e134f884e93e', 'display_id': 'de-afspraak-veilt-voor-de-warmste-week', - 'ext': 'mp4', + 'ext': 'flv', 'title': 'De afspraak veilt voor de Warmste Week', 'description': 'md5:24cb860c320dc2be7358e0e5aa317ba6', 'thumbnail': r're:^https?://.*\.jpg$', 'duration': 49.02, - } + }, + 'expected_warnings': ['is not a supported codec'], }, { # with subtitles 'url': 'http://www.canvas.be/video/panorama/2016/pieter-0167', @@ -40,7 +120,8 @@ class CanvasIE(InfoExtractor): }, 'params': { 'skip_download': True, - } + }, + 'skip': 'Pagina niet gevonden', }, { 'url': 'https://www.een.be/sorry-voor-alles/herbekijk-sorry-voor-alles', 'info_dict': { @@ -54,7 +135,8 @@ class CanvasIE(InfoExtractor): }, 'params': { 'skip_download': True, - } + }, + 'skip': 'Episode no longer available', }, { 'url': 'https://www.canvas.be/check-point/najaar-2016/de-politie-uw-vriend', 'only_matching': True, @@ -66,55 +148,21 @@ class CanvasIE(InfoExtractor): webpage = self._download_webpage(url, display_id) - title = (self._search_regex( + title = strip_or_none(self._search_regex( r']+class="video__body__header__title"[^>]*>(.+?)', webpage, 'title', default=None) or self._og_search_title( - webpage)).strip() + webpage, default=None)) video_id = self._html_search_regex( - r'data-video=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'video id', group='id') - - data = self._download_json( - 'https://mediazone.vrt.be/api/v1/%s/assets/%s' - % (site_id, video_id), display_id) - - formats = [] - for target in data['targetUrls']: - format_url, format_type = target.get('url'), target.get('type') - if not format_url or not format_type: - continue - if format_type == 'HLS': - formats.extend(self._extract_m3u8_formats( - format_url, display_id, entry_protocol='m3u8_native', - ext='mp4', preference=0, fatal=False, m3u8_id=format_type)) - elif format_type == 'HDS': - formats.extend(self._extract_f4m_formats( - format_url, display_id, f4m_id=format_type, fatal=False)) - elif format_type == 'MPEG_DASH': - formats.extend(self._extract_mpd_formats( - format_url, display_id, mpd_id=format_type, fatal=False)) - else: - formats.append({ - 'format_id': format_type, - 'url': format_url, - }) - self._sort_formats(formats) - - subtitles = {} - subtitle_urls = data.get('subtitleUrls') - if isinstance(subtitle_urls, list): - for subtitle in subtitle_urls: - subtitle_url = subtitle.get('url') - if subtitle_url and subtitle.get('type') == 'CLOSED': - subtitles.setdefault('nl', []).append({'url': subtitle_url}) + r'data-video=(["\'])(?P(?:(?!\1).)+)\1', webpage, 'video id', + group='id') return { + '_type': 'url_transparent', + 'url': 'https://mediazone.vrt.be/api/v1/%s/assets/%s' % (site_id, video_id), + 'ie_key': CanvasIE.ie_key(), 'id': video_id, 'display_id': display_id, 'title': title, 'description': self._og_search_description(webpage), - 'formats': formats, - 'duration': float_or_none(data.get('duration'), 1000), - 'thumbnail': data.get('posterImageUrl'), - 'subtitles': subtitles, } diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 4232a4fef..24e9acda6 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -150,7 +150,10 @@ from .camdemy import ( from .camwithher import CamWithHerIE from .canalplus import CanalplusIE from .canalc2 import Canalc2IE -from .canvas import CanvasIE +from .canvas import ( + CanvasIE, + CanvasEenIE, +) from .carambatv import ( CarambaTVIE, CarambaTVPageIE, -- cgit 1.4.1 From d0f2d6411406a35c9593bbb85375c2d7f8300c77 Mon Sep 17 00:00:00 2001 From: Jakub Wilk Date: Tue, 10 Oct 2017 18:45:10 +0200 Subject: [slideslive] Add extractor (closes #2680) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/slideslive.py | 34 ++++++++++++++++++++++++++++++++++ 2 files changed, 35 insertions(+) create mode 100644 youtube_dl/extractor/slideslive.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 24e9acda6..d0f71aecd 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -943,6 +943,7 @@ from .skynewsarabia import ( ) from .skysports import SkySportsIE from .slideshare import SlideshareIE +from .slideslive import SlidesLiveIE from .slutload import SlutloadIE from .smotri import ( SmotriIE, diff --git a/youtube_dl/extractor/slideslive.py b/youtube_dl/extractor/slideslive.py new file mode 100644 index 000000000..104576033 --- /dev/null +++ b/youtube_dl/extractor/slideslive.py @@ -0,0 +1,34 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from ..utils import ExtractorError + + +class SlidesLiveIE(InfoExtractor): + _VALID_URL = r'https?://slideslive\.com/(?P[0-9]+)' + _TESTS = [{ + 'url': 'https://slideslive.com/38902413/gcc-ia16-backend', + 'md5': 'b29fcd6c6952d0c79c5079b0e7a07e6f', + 'info_dict': { + 'id': 'LMtgR8ba0b0', + 'ext': 'mp4', + 'title': '38902413: external video', + 'description': '3890241320170925-9-1yd6ech.mp4', + 'uploader': 'SlidesLive Administrator', + 'uploader_id': 'UC62SdArr41t_-_fX40QCLRw', + 'upload_date': '20170925', + } + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + video_data = self._download_json( + url, video_id, headers={'Accept': 'application/json'}) + service_name = video_data['video_service_name'] + if service_name == 'YOUTUBE': + yt_video_id = video_data['video_service_id'] + return self.url_result(yt_video_id, 'Youtube', video_id=yt_video_id) + else: + raise ExtractorError( + 'Unsupported service name: {0}'.format(service_name), expected=True) -- cgit 1.4.1 From 04af3aca049588b6b3d4d4b57ee47224fdeee90f Mon Sep 17 00:00:00 2001 From: Khang Nguyen Date: Thu, 5 Oct 2017 21:37:18 +0700 Subject: Remove YoutubeSharedVideoIE https://github.com/rg3/youtube-dl/issues/14303 --- youtube_dl/extractor/extractors.py | 1 - youtube_dl/extractor/youtube.py | 33 --------------------------------- 2 files changed, 34 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d0f71aecd..d96eafbc3 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1346,7 +1346,6 @@ from .youtube import ( YoutubeSearchDateIE, YoutubeSearchIE, YoutubeSearchURLIE, - YoutubeSharedVideoIE, YoutubeShowIE, YoutubeSubscriptionsIE, YoutubeTruncatedIDIE, diff --git a/youtube_dl/extractor/youtube.py b/youtube_dl/extractor/youtube.py index 54f5d7279..6e2d57d6a 100644 --- a/youtube_dl/extractor/youtube.py +++ b/youtube_dl/extractor/youtube.py @@ -2040,39 +2040,6 @@ class YoutubeIE(YoutubeBaseInfoExtractor): } -class YoutubeSharedVideoIE(InfoExtractor): - _VALID_URL = r'(?:https?:)?//(?:www\.)?youtube\.com/shared\?.*\bci=(?P[0-9A-Za-z_-]{11})' - IE_NAME = 'youtube:shared' - - _TEST = { - 'url': 'https://www.youtube.com/shared?ci=1nEzmT-M4fU', - 'info_dict': { - 'id': 'uPDB5I9wfp8', - 'ext': 'webm', - 'title': 'Pocoyo: 90 minutos de episódios completos Português para crianças - PARTE 3', - 'description': 'md5:d9e4d9346a2dfff4c7dc4c8cec0f546d', - 'upload_date': '20160219', - 'uploader': 'Pocoyo - Português (BR)', - 'uploader_id': 'PocoyoBrazil', - }, - 'add_ie': ['Youtube'], - 'params': { - # There are already too many Youtube downloads - 'skip_download': True, - }, - } - - def _real_extract(self, url): - video_id = self._match_id(url) - - webpage = self._download_webpage(url, video_id) - - real_video_id = self._html_search_meta( - 'videoId', webpage, 'YouTube video id', fatal=True) - - return self.url_result(real_video_id, YoutubeIE.ie_key()) - - class YoutubePlaylistIE(YoutubePlaylistBaseInfoExtractor): IE_DESC = 'YouTube.com playlists' _VALID_URL = r"""(?x)(?: -- cgit 1.4.1 From cdab1df91242fb617b09a31c023822ef31ea37b8 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Wed, 11 Oct 2017 10:04:46 +0000 Subject: [afreecatv] remove AfreecaTVGlobalIE the website now show this message > Global AfreecaTV will be merged and integrated on July 20th, 2017. Every user around the world are now able to interact with one another on www.afreecatv.com! --- youtube_dl/extractor/afreecatv.py | 104 ------------------------------------- youtube_dl/extractor/extractors.py | 5 +- 2 files changed, 1 insertion(+), 108 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/afreecatv.py b/youtube_dl/extractor/afreecatv.py index 2c58f4617..e6513c7a4 100644 --- a/youtube_dl/extractor/afreecatv.py +++ b/youtube_dl/extractor/afreecatv.py @@ -271,107 +271,3 @@ class AfreecaTVIE(InfoExtractor): }) return info - - -class AfreecaTVGlobalIE(AfreecaTVIE): - IE_NAME = 'afreecatv:global' - _VALID_URL = r'https?://(?:www\.)?afreeca\.tv/(?P\d+)(?:/v/(?P\d+))?' - _TESTS = [{ - 'url': 'http://afreeca.tv/36853014/v/58301', - 'info_dict': { - 'id': '58301', - 'title': 'tryhard top100', - 'uploader_id': '36853014', - 'uploader': 'makgi Hearthstone Live!', - }, - 'playlist_count': 3, - }] - - def _real_extract(self, url): - channel_id, video_id = re.match(self._VALID_URL, url).groups() - video_type = 'video' if video_id else 'live' - query = { - 'pt': 'view', - 'bid': channel_id, - } - if video_id: - query['vno'] = video_id - video_data = self._download_json( - 'http://api.afreeca.tv/%s/view_%s.php' % (video_type, video_type), - video_id or channel_id, query=query)['channel'] - - if video_data.get('result') != 1: - raise ExtractorError('%s said: %s' % (self.IE_NAME, video_data['remsg'])) - - title = video_data['title'] - - info = { - 'thumbnail': video_data.get('thumb'), - 'view_count': int_or_none(video_data.get('vcnt')), - 'age_limit': int_or_none(video_data.get('grade')), - 'uploader_id': channel_id, - 'uploader': video_data.get('cname'), - } - - if video_id: - entries = [] - for i, f in enumerate(video_data.get('flist', [])): - video_key = self.parse_video_key(f.get('key', '')) - f_url = f.get('file') - if not video_key or not f_url: - continue - entries.append({ - 'id': '%s_%s' % (video_id, video_key.get('part', i + 1)), - 'title': title, - 'upload_date': video_key.get('upload_date'), - 'duration': int_or_none(f.get('length')), - 'url': f_url, - 'protocol': 'm3u8_native', - 'ext': 'mp4', - }) - - info.update({ - 'id': video_id, - 'title': title, - 'duration': int_or_none(video_data.get('length')), - }) - if len(entries) > 1: - info['_type'] = 'multi_video' - info['entries'] = entries - elif len(entries) == 1: - i = entries[0].copy() - i.update(info) - info = i - else: - formats = [] - for s in video_data.get('strm', []): - s_url = s.get('purl') - if not s_url: - continue - stype = s.get('stype') - if stype == 'HLS': - formats.extend(self._extract_m3u8_formats( - s_url, channel_id, 'mp4', m3u8_id=stype, fatal=False)) - elif stype == 'RTMP': - format_id = [stype] - label = s.get('label') - if label: - format_id.append(label) - formats.append({ - 'format_id': '-'.join(format_id), - 'url': s_url, - 'tbr': int_or_none(s.get('bps')), - 'height': int_or_none(s.get('brt')), - 'ext': 'flv', - 'rtmp_live': True, - }) - self._sort_formats(formats) - - info.update({ - 'id': channel_id, - 'title': self._live_title(title), - 'is_live': True, - 'formats': formats, - }) - - return info diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index d96eafbc3..a363d95bf 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -31,10 +31,7 @@ from .aenetworks import ( AENetworksIE, HistoryTopicIE, ) -from .afreecatv import ( - AfreecaTVIE, - AfreecaTVGlobalIE, -) +from .afreecatv import AfreecaTVIE from .airmozilla import AirMozillaIE from .aljazeera import AlJazeeraIE from .alphaporno import AlphaPornoIE -- cgit 1.4.1 From 9e38dbb19ca6874c7350647ea2883d5bbb3b50a1 Mon Sep 17 00:00:00 2001 From: Remita Amine Date: Wed, 11 Oct 2017 15:50:00 +0000 Subject: [voxmedia] add support for recode.net(fixes #14173) --- youtube_dl/extractor/extractors.py | 5 ++- youtube_dl/extractor/voxmedia.py | 66 ++++++++++++++++++++++++++++++-------- 2 files changed, 57 insertions(+), 14 deletions(-) (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index a363d95bf..5629c7623 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -1244,7 +1244,10 @@ from .vodpl import VODPlIE from .vodplatform import VODPlatformIE from .voicerepublic import VoiceRepublicIE from .voot import VootIE -from .voxmedia import VoxMediaIE +from .voxmedia import ( + VoxMediaVolumeIE, + VoxMediaIE, +) from .vporn import VpornIE from .vrt import VRTIE from .vrak import VrakIE diff --git a/youtube_dl/extractor/voxmedia.py b/youtube_dl/extractor/voxmedia.py index f8e331493..c7a0a88fe 100644 --- a/youtube_dl/extractor/voxmedia.py +++ b/youtube_dl/extractor/voxmedia.py @@ -2,11 +2,44 @@ from __future__ import unicode_literals from .common import InfoExtractor +from .once import OnceIE from ..compat import compat_urllib_parse_unquote +from ..utils import ExtractorError + + +class VoxMediaVolumeIE(OnceIE): + _VALID_URL = r'https?://volume\.vox-cdn\.com/embed/(?P[0-9a-f]{9})' + + def _real_extract(self, url): + video_id = self._match_id(url) + webpage = self._download_webpage(url, video_id) + video_data = self._parse_json(self._search_regex( + r'Volume\.createVideo\(({.+})\s*,\s*{.*}\s*,\s*\[.*\]\s*,\s*{.*}\);', webpage, 'video data'), video_id) + for provider_video_type in ('ooyala', 'youtube', 'brightcove'): + provider_video_id = video_data.get('%s_id' % provider_video_type) + if not provider_video_id: + continue + info = { + 'id': video_id, + 'title': video_data.get('title_short'), + 'description': video_data.get('description_long') or video_data.get('description_short'), + 'thumbnail': video_data.get('brightcove_thumbnail') + } + if provider_video_type == 'brightcove': + info['formats'] = self._extract_once_formats(provider_video_id) + self._sort_formats(info['formats']) + else: + info.update({ + '_type': 'url_transparent', + 'url': provider_video_id if provider_video_type == 'youtube' else '%s:%s' % (provider_video_type, provider_video_id), + 'ie_key': provider_video_type.capitalize(), + }) + return info + raise ExtractorError('Unable to find provider video id') class VoxMediaIE(InfoExtractor): - _VALID_URL = r'https?://(?:www\.)?(?:theverge|vox|sbnation|eater|polygon|curbed|racked)\.com/(?:[^/]+/)*(?P[^/?]+)' + _VALID_URL = r'https?://(?:www\.)?(?:(?:theverge|vox|sbnation|eater|polygon|curbed|racked)\.com|recode\.net)/(?:[^/]+/)*(?P[^/?]+)' _TESTS = [{ 'url': 'http://www.theverge.com/2014/6/27/5849272/material-world-how-google-discovered-what-software-is-made-of', 'info_dict': { @@ -31,6 +64,7 @@ class VoxMediaIE(InfoExtractor): 'description': 'md5:87a51fe95ff8cea8b5bdb9ac7ae6a6af', }, 'add_ie': ['Ooyala'], + 'skip': 'Video Not Found', }, { # volume embed 'url': 'http://www.vox.com/2016/3/31/11336640/mississippi-lgbt-religious-freedom-bill', @@ -84,6 +118,17 @@ class VoxMediaIE(InfoExtractor): 'description': 'md5:e02d56b026d51aa32c010676765a690d', }, }], + }, { + # volume embed, Brightcove Once + 'url': 'https://www.recode.net/2014/6/17/11628066/post-post-pc-ceo-the-full-code-conference-video-of-microsofts-satya', + 'md5': '01571a896281f77dc06e084138987ea2', + 'info_dict': { + 'id': '1231c973d', + 'ext': 'mp4', + 'title': 'Post-Post-PC CEO: The Full Code Conference Video of Microsoft\'s Satya Nadella', + 'description': 'The longtime veteran was chosen earlier this year as the software giant\'s third leader in its history.', + }, + 'add_ie': ['VoxMediaVolume'], }] def _real_extract(self, url): @@ -91,9 +136,14 @@ class VoxMediaIE(InfoExtractor): webpage = compat_urllib_parse_unquote(self._download_webpage(url, display_id)) def create_entry(provider_video_id, provider_video_type, title=None, description=None): + video_url = { + 'youtube': '%s', + 'ooyala': 'ooyala:%s', + 'volume': 'http://volume.vox-cdn.com/embed/%s', + }[provider_video_type] % provider_video_id return { '_type': 'url_transparent', - 'url': provider_video_id if provider_video_type == 'youtube' else '%s:%s' % (provider_video_type, provider_video_id), + 'url': video_url, 'title': title or self._og_search_title(webpage), 'description': description or self._og_search_description(webpage), } @@ -124,17 +174,7 @@ class VoxMediaIE(InfoExtractor): volume_uuid = self._search_regex( r'data-volume-uuid="([^"]+)"', webpage, 'volume uuid', default=None) if volume_uuid: - volume_webpage = self._download_webpage( - 'http://volume.vox-cdn.com/embed/%s' % volume_uuid, volume_uuid) - video_data = self._parse_json(self._search_regex( - r'Volume\.createVideo\(({.+})\s*,\s*{.*}\s*,\s*\[.*\]\s*,\s*{.*}\);', volume_webpage, 'video data'), volume_uuid) - for provider_video_type in ('ooyala', 'youtube'): - provider_video_id = video_data.get('%s_id' % provider_video_type) - if provider_video_id: - description = video_data.get('description_long') or video_data.get('description_short') - entries.append(create_entry( - provider_video_id, provider_video_type, video_data.get('title_short'), description)) - break + entries.append(create_entry(volume_uuid, 'volume')) if len(entries) == 1: return entries[0] -- cgit 1.4.1 From ff3f1a62f087332fa6409b5cbc39871d49e74f37 Mon Sep 17 00:00:00 2001 From: Sergey M․ Date: Thu, 12 Oct 2017 00:44:13 +0700 Subject: [funk] Add extractor (closes #14464) --- youtube_dl/extractor/extractors.py | 1 + youtube_dl/extractor/funk.py | 43 ++++++++++++++++++++++++++++++++++++++ youtube_dl/extractor/generic.py | 16 -------------- 3 files changed, 44 insertions(+), 16 deletions(-) create mode 100644 youtube_dl/extractor/funk.py (limited to 'youtube_dl/extractor/extractors.py') diff --git a/youtube_dl/extractor/extractors.py b/youtube_dl/extractor/extractors.py index 5629c7623..ecb33bc9e 100644 --- a/youtube_dl/extractor/extractors.py +++ b/youtube_dl/extractor/extractors.py @@ -381,6 +381,7 @@ from .freesound import FreesoundIE from .freespeech import FreespeechIE from .freshlive import FreshLiveIE from .funimation import FunimationIE +from .funk import FunkIE from .funnyordie import FunnyOrDieIE from .fusion import FusionIE from .fxnetworks import FXNetworksIE diff --git a/youtube_dl/extractor/funk.py b/youtube_dl/extractor/funk.py new file mode 100644 index 000000000..ce5c67fbb --- /dev/null +++ b/youtube_dl/extractor/funk.py @@ -0,0 +1,43 @@ +# coding: utf-8 +from __future__ import unicode_literals + +from .common import InfoExtractor +from .nexx import NexxIE +from ..utils import extract_attributes + + +class FunkIE(InfoExtractor): + _VALID_URL = r'https?://(?:www\.)?funk\.net/(?:mix|channel)/(?:[^/]+/)*(?P[^?/#]+)' + _TESTS = [{ + 'url': 'https://www.funk.net/mix/59d65d935f8b160001828b5b/0/59d517e741dca10001252574/', + 'md5': '4d40974481fa3475f8bccfd20c5361f8', + 'info_dict': { + 'id': '716599', + 'ext': 'mp4', + 'title': 'Neue Rechte Welle', + 'description': 'md5:a30a53f740ffb6bfd535314c2cc5fb69', + 'timestamp': 1501337639, + 'upload_date': '20170729', + }, + 'params': { + 'format': 'bestvideo', + 'skip_download': True, + }, + }, { + 'url': 'https://www.funk.net/channel/59d5149841dca100012511e3/0/59d52049999264000182e79d/', + 'only_matching': True, + }] + + def _real_extract(self, url): + video_id = self._match_id(url) + + webpage = self._download_webpage(url, video_id) + + domain_id = NexxIE._extract_domain_id(webpage) or '741' + nexx_id = extract_attributes(self._search_regex( + r'(]id=["\']mediaplayer-funk[^>]+>)', + webpage, 'media player'))['data-id'] + + return self.url_result( + 'nexx:%s:%s' % (domain_id, nexx_id), ie=NexxIE.ie_key(), + video_id=nexx_id) diff --git a/youtube_dl/extractor/generic.py b/youtube_dl/extractor/generic.py index 6dab4c7f4..39630b6f6 100644 --- a/youtube_dl/extractor/generic.py +++ b/youtube_dl/extractor/generic.py @@ -1613,22 +1613,6 @@ class GenericIE(InfoExtractor): }, 'add_ie': ['BrightcoveLegacy'], }, - # Nexx embed - { - 'url': 'https://www.funk.net/serien/5940e15073f6120001657956/items/593efbb173f6120001657503', - 'info_dict': { - 'id': '247746', - 'ext': 'mp4', - 'title': "Yesterday's Jam (OV)", - 'description': 'md5:09bc0984723fed34e2581624a84e05f0', - 'timestamp': 1492594816, - 'upload_date': '20170419', - }, - 'params': { - 'format': 'bestvideo', - 'skip_download': True, - }, - }, # Facebook