about summary refs log tree commit diff
path: root/youtube_dl/extractor/ustream.py
diff options
context:
space:
mode:
authorPhilipp Hagemeister <phihag@phihag.de>2014-08-28 00:58:24 +0200
committerPhilipp Hagemeister <phihag@phihag.de>2014-08-28 00:58:24 +0200
commit22a6f15061127045f4d6ae1ff4efc922fa372cc2 (patch)
treef0df2ab32f301b3c66b6d9d147e49fe177e27a55 /youtube_dl/extractor/ustream.py
parent259454525f9fe41947e6e05882336b7196fc8fce (diff)
downloadyoutube-dl-22a6f15061127045f4d6ae1ff4efc922fa372cc2.tar.gz
youtube-dl-22a6f15061127045f4d6ae1ff4efc922fa372cc2.tar.xz
youtube-dl-22a6f15061127045f4d6ae1ff4efc922fa372cc2.zip
Move playlist tests to extractors.
From now on, test_download will run these tests. That means we benefit not only from the networking setup in there, but also from the other tests (for example test_all_urls to find problems with _VALID_URLs).
Diffstat (limited to 'youtube_dl/extractor/ustream.py')
-rw-r--r--youtube_dl/extractor/ustream.py27
1 files changed, 21 insertions, 6 deletions
diff --git a/youtube_dl/extractor/ustream.py b/youtube_dl/extractor/ustream.py
index 488b10df9..816b101eb 100644
--- a/youtube_dl/extractor/ustream.py
+++ b/youtube_dl/extractor/ustream.py
@@ -68,21 +68,36 @@ class UstreamIE(InfoExtractor):
 class UstreamChannelIE(InfoExtractor):
     _VALID_URL = r'https?://www\.ustream\.tv/channel/(?P<slug>.+)'
     IE_NAME = 'ustream:channel'
+    _TEST = {
+        'url': 'http://www.ustream.tv/channel/channeljapan',
+        'info_dict': {
+            'id': '10874166',
+        },
+        'playlist_mincount': 54,
+    }
 
     def _real_extract(self, url):
         m = re.match(self._VALID_URL, url)
-        slug = m.group('slug')
-        webpage = self._download_webpage(url, slug)
+        display_id = m.group('slug')
+        webpage = self._download_webpage(url, display_id)
         channel_id = get_meta_content('ustream:channel_id', webpage)
 
         BASE = 'http://www.ustream.tv'
         next_url = '/ajax/socialstream/videos/%s/1.json' % channel_id
         video_ids = []
         while next_url:
-            reply = json.loads(self._download_webpage(compat_urlparse.urljoin(BASE, next_url), channel_id))
+            reply = self._download_json(
+                compat_urlparse.urljoin(BASE, next_url), display_id,
+                note='Downloading video information (next: %d)' % (len(video_ids) + 1))
             video_ids.extend(re.findall(r'data-content-id="(\d.*)"', reply['data']))
             next_url = reply['nextUrl']
 
-        urls = ['http://www.ustream.tv/recorded/' + vid for vid in video_ids]
-        url_entries = [self.url_result(eurl, 'Ustream') for eurl in urls]
-        return self.playlist_result(url_entries, channel_id)
+        entries = [
+            self.url_result('http://www.ustream.tv/recorded/' + vid, 'Ustream')
+            for vid in video_ids]
+        return {
+            '_type': 'playlist',
+            'id': channel_id,
+            'display_id': display_id,
+            'entries': entries,
+        }