summary refs log tree commit diff
diff options
context:
space:
mode:
authorPhilipp Hagemeister <phihag@phihag.de>2014-07-25 11:46:51 +0200
committerPhilipp Hagemeister <phihag@phihag.de>2014-07-25 11:46:53 +0200
commit3182f3e2dc62cf918fa12f578e2bbfc952862320 (patch)
treea3a209d58626a6f36a37f1d958490c8285620275
parentcbf915f3f63a85e090d561fe4de218b55cb7fa59 (diff)
downloadyoutube-dl-3182f3e2dc62cf918fa12f578e2bbfc952862320.tar.gz
youtube-dl-3182f3e2dc62cf918fa12f578e2bbfc952862320.tar.xz
youtube-dl-3182f3e2dc62cf918fa12f578e2bbfc952862320.zip
[justin.tv] Fix page reporting (#3352)
youtube-dl -j http://www.twitch.tv/fang_i3anger still fails though.
-rw-r--r--youtube_dl/extractor/justintv.py17
1 files changed, 9 insertions, 8 deletions
diff --git a/youtube_dl/extractor/justintv.py b/youtube_dl/extractor/justintv.py
index 7083db12e..27017e89f 100644
--- a/youtube_dl/extractor/justintv.py
+++ b/youtube_dl/extractor/justintv.py
@@ -1,5 +1,6 @@
 from __future__ import unicode_literals
 
+import itertools
 import json
 import os
 import re
@@ -43,10 +44,11 @@ class JustinTVIE(InfoExtractor):
     }
 
     # Return count of items, list of *valid* items
-    def _parse_page(self, url, video_id):
-        info_json = self._download_webpage(url, video_id,
-                                           'Downloading video info JSON',
-                                           'unable to download video info JSON')
+    def _parse_page(self, url, video_id, counter):
+        info_json = self._download_webpage(
+            url, video_id,
+            'Downloading video info JSON on page %d' % counter,
+            'Unable to download video info JSON %d' % counter)
 
         response = json.loads(info_json)
         if type(response) != list:
@@ -138,11 +140,10 @@ class JustinTVIE(InfoExtractor):
         entries = []
         offset = 0
         limit = self._JUSTIN_PAGE_LIMIT
-        while True:
-            if paged:
-                self.report_download_page(video_id, offset)
+        for counter in itertools.count(1):
             page_url = api + ('?offset=%d&limit=%d' % (offset, limit))
-            page_count, page_info = self._parse_page(page_url, video_id)
+            page_count, page_info = self._parse_page(
+                page_url, video_id, counter)
             entries.extend(page_info)
             if not paged or page_count != limit:
                 break