|
|
@ -1290,7 +1290,6 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
|
|
|
def _extract_playlist(self, playlist_id):
|
|
|
|
def _extract_playlist(self, playlist_id):
|
|
|
|
url = self._TEMPLATE_URL % playlist_id
|
|
|
|
url = self._TEMPLATE_URL % playlist_id
|
|
|
|
page = self._download_webpage(url, playlist_id)
|
|
|
|
page = self._download_webpage(url, playlist_id)
|
|
|
|
more_widget_html = content_html = page
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
|
|
|
|
for match in re.findall(r'<div class="yt-alert-message">([^<]+)</div>', page):
|
|
|
|
match = match.strip()
|
|
|
|
match = match.strip()
|
|
|
@ -1310,36 +1309,36 @@ class YoutubePlaylistIE(YoutubeBaseInfoExtractor):
|
|
|
|
self.report_warning('Youtube gives an alert message: ' + match)
|
|
|
|
self.report_warning('Youtube gives an alert message: ' + match)
|
|
|
|
|
|
|
|
|
|
|
|
# Extract the video ids from the playlist pages
|
|
|
|
# Extract the video ids from the playlist pages
|
|
|
|
ids = []
|
|
|
|
def _entries():
|
|
|
|
|
|
|
|
more_widget_html = content_html = page
|
|
|
|
for page_num in itertools.count(1):
|
|
|
|
for page_num in itertools.count(1):
|
|
|
|
matches = re.finditer(self._VIDEO_RE, content_html)
|
|
|
|
matches = re.finditer(self._VIDEO_RE, content_html)
|
|
|
|
# We remove the duplicates and the link with index 0
|
|
|
|
# We remove the duplicates and the link with index 0
|
|
|
|
# (it's not the first video of the playlist)
|
|
|
|
# (it's not the first video of the playlist)
|
|
|
|
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
|
|
|
|
new_ids = orderedSet(m.group('id') for m in matches if m.group('index') != '0')
|
|
|
|
ids.extend(new_ids)
|
|
|
|
for vid_id in new_ids:
|
|
|
|
|
|
|
|
yield self.url_result(vid_id, 'Youtube', video_id=vid_id)
|
|
|
|
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
|
|
|
|
|
|
|
|
if not mobj:
|
|
|
|
mobj = re.search(r'data-uix-load-more-href="/?(?P<more>[^"]+)"', more_widget_html)
|
|
|
|
break
|
|
|
|
if not mobj:
|
|
|
|
|
|
|
|
break
|
|
|
|
|
|
|
|
|
|
|
|
more = self._download_json(
|
|
|
|
more = self._download_json(
|
|
|
|
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
|
|
|
|
'https://youtube.com/%s' % mobj.group('more'), playlist_id,
|
|
|
|
'Downloading page #%s' % page_num,
|
|
|
|
'Downloading page #%s' % page_num,
|
|
|
|
transform_source=uppercase_escape)
|
|
|
|
transform_source=uppercase_escape)
|
|
|
|
content_html = more['content_html']
|
|
|
|
content_html = more['content_html']
|
|
|
|
if not content_html.strip():
|
|
|
|
if not content_html.strip():
|
|
|
|
# Some webpages show a "Load more" button but they don't
|
|
|
|
# Some webpages show a "Load more" button but they don't
|
|
|
|
# have more videos
|
|
|
|
# have more videos
|
|
|
|
break
|
|
|
|
break
|
|
|
|
more_widget_html = more['load_more_widget_html']
|
|
|
|
more_widget_html = more['load_more_widget_html']
|
|
|
|
|
|
|
|
|
|
|
|
playlist_title = self._html_search_regex(
|
|
|
|
playlist_title = self._html_search_regex(
|
|
|
|
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
|
|
|
|
r'(?s)<h1 class="pl-header-title[^"]*">\s*(.*?)\s*</h1>',
|
|
|
|
page, 'title')
|
|
|
|
page, 'title')
|
|
|
|
|
|
|
|
|
|
|
|
url_results = self._ids_to_results(ids)
|
|
|
|
return self.playlist_result(_entries(), playlist_id, playlist_title)
|
|
|
|
return self.playlist_result(url_results, playlist_id, playlist_title)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Extract playlist id
|
|
|
|
# Extract playlist id
|
|
|
|