From 056d857571158264aefb8d9f7d47c0dad768be63 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 20 Dec 2012 11:26:38 +0100 Subject: [PATCH 1/6] refactor YouTube subtitles code, it was ugly (my bad) --- youtube_dl/InfoExtractors.py | 60 +++++++++++++++++++----------------- youtube_dl/utils.py | 8 ----- 2 files changed, 31 insertions(+), 37 deletions(-) diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index cf5b51bd8..aa4a6500b 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -219,6 +219,34 @@ class YoutubeIE(InfoExtractor): srt += caption + '\n\n' return srt + def _extract_subtitles(self, video_id): + self.report_video_subtitles_download(video_id) + request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) + try: + srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) + srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) + if not srt_lang_list: + return (u'WARNING: video has no closed captions', None) + if self._downloader.params.get('subtitleslang', False): + srt_lang = self._downloader.params.get('subtitleslang') + elif 'en' in srt_lang_list: + srt_lang = 'en' + else: + srt_lang = srt_lang_list.keys()[0] + if not srt_lang in srt_lang_list: + return (u'WARNING: no closed captions found in the specified language', None) + request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) + try: + srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') + except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: + return (u'WARNING: unable to download video subtitles: %s' % compat_str(err), None) + if not srt_xml: + return (u'WARNING: unable to download video subtitles', None) + return (None, self._closed_captions_xml_to_srt(srt_xml)) + def _print_formats(self, formats): print('Available formats:') for x in formats: @@ -395,35 +423,9 @@ class YoutubeIE(InfoExtractor): # closed captions video_subtitles = None if self._downloader.params.get('writesubtitles', False): - try: - self.report_video_subtitles_download(video_id) - request = compat_urllib_request.Request('http://video.google.com/timedtext?hl=en&type=list&v=%s' % video_id) - try: - srt_list = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) - srt_lang_list = re.findall(r'name="([^"]*)"[^>]+lang_code="([\w\-]+)"', srt_list) - srt_lang_list = dict((l[1], l[0]) for l in srt_lang_list) - if not srt_lang_list: - raise Trouble(u'WARNING: video has no closed captions') - if self._downloader.params.get('subtitleslang', False): - srt_lang = self._downloader.params.get('subtitleslang') - elif 'en' in srt_lang_list: - srt_lang = 'en' - else: - srt_lang = srt_lang_list.keys()[0] - if not srt_lang in srt_lang_list: - raise Trouble(u'WARNING: no closed captions found in the specified language') - request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) - try: - srt_xml = compat_urllib_request.urlopen(request).read().decode('utf-8') - except (compat_urllib_error.URLError, compat_http_client.HTTPException, socket.error) as err: - raise Trouble(u'WARNING: unable to download video subtitles: %s' % compat_str(err)) - if not srt_xml: - raise Trouble(u'WARNING: unable to download video subtitles') - video_subtitles = self._closed_captions_xml_to_srt(srt_xml) - except Trouble as trouble: - self._downloader.trouble(str(trouble)) + (srt_error, video_subtitles) = self._extract_subtitles(video_id) + if srt_error: + self._downloader.trouble(srt_error) if 'length_seconds' not in video_info: self._downloader.trouble(u'WARNING: unable to extract video duration') diff --git a/youtube_dl/utils.py b/youtube_dl/utils.py index a5196b0ae..c18c9beed 100644 --- a/youtube_dl/utils.py +++ b/youtube_dl/utils.py @@ -465,14 +465,6 @@ class ContentTooShortError(Exception): self.downloaded = downloaded self.expected = expected - -class Trouble(Exception): - """Trouble helper exception - - This is an exception to be handled with - FileDownloader.trouble - """ - class YoutubeDLHandler(compat_urllib_request.HTTPHandler): """Handler for HTTP requests and responses. From 0eaf520d7758d57c61afe1832c1db9a4fb2ccc88 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 20 Dec 2012 14:14:43 +0100 Subject: [PATCH 2/6] add info_dict testing to test_download --- test/test_download.py | 18 +++++++++++++++++- test/tests.json | 8 +++++++- 2 files changed, 24 insertions(+), 2 deletions(-) diff --git a/test/test_download.py b/test/test_download.py index bce0e4fcd..9a6d4d604 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -7,6 +7,7 @@ import json import unittest import sys import socket +import hashlib # Allow direct execution sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -28,8 +29,12 @@ socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FileDownloader(youtube_dl.FileDownloader): def __init__(self, *args, **kwargs): - youtube_dl.FileDownloader.__init__(self, *args, **kwargs) self.to_stderr = self.to_screen + self.processed_info_dicts = [] + return youtube_dl.FileDownloader.__init__(self, *args, **kwargs) + def process_info(self, info_dict): + self.processed_info_dicts.append(info_dict) + return youtube_dl.FileDownloader.process_info(self, info_dict) def _file_md5(fn): with open(fn, 'rb') as f: @@ -40,6 +45,7 @@ with io.open(DEF_FILE, encoding='utf-8') as deff: with io.open(PARAMETERS_FILE, encoding='utf-8') as pf: parameters = json.load(pf) + class TestDownload(unittest.TestCase): def setUp(self): self.parameters = parameters @@ -68,18 +74,28 @@ def generator(test_case): if 'skip' in test_case: print('Skipping: {0}'.format(test_case['skip'])) return + params = dict(self.parameters) # Duplicate it locally for p in test_case.get('params', {}): params[p] = test_case['params'][p] + fd = FileDownloader(params) fd.add_info_extractor(ie()) for ien in test_case.get('add_ie', []): fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')()) fd.download([test_case['url']]) + self.assertTrue(os.path.exists(test_case['file'])) if 'md5' in test_case: md5_for_file = _file_md5(test_case['file']) self.assertEqual(md5_for_file, test_case['md5']) + info_dict = fd.processed_info_dicts[0] + for (info_element, value) in test_case.get('info_dict', {}).items(): + if value.startswith('md5:'): + md5_info_value = hashlib.md5(info_dict[info_element]).hexdigest() + self.assertEqual(value[3:], md5_info_value) + else: + self.assertEqual(value, info_dict[info_element]) return test_template diff --git a/test/tests.json b/test/tests.json index 5c4cf51bf..4f8f68f38 100644 --- a/test/tests.json +++ b/test/tests.json @@ -2,7 +2,13 @@ { "name": "Youtube", "url": "http://www.youtube.com/watch?v=BaW_jenozKc", - "file": "BaW_jenozKc.mp4" + "file": "BaW_jenozKc.mp4", + "info_dict": { + "title": "youtube-dl test video \"'/\\ä↭𝕐", + "uploader": "Philipp Hagemeister", + "upload_date": "20121002", + "description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." + } }, { "name": "Dailymotion", From 1a2c3c0f3ee8f91d650a2a252d2795ea88203ca0 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 20 Dec 2012 14:18:23 +0100 Subject: [PATCH 3/6] some py3 fixes, both needed and recommended; we should pass 2to3 as cleanly as possible now --- youtube_dl/FileDownloader.py | 7 ++----- youtube_dl/InfoExtractors.py | 10 +++++----- youtube_dl/__init__.py | 4 ++-- 3 files changed, 9 insertions(+), 12 deletions(-) diff --git a/youtube_dl/FileDownloader.py b/youtube_dl/FileDownloader.py index a861086c3..d9a4ecd3a 100644 --- a/youtube_dl/FileDownloader.py +++ b/youtube_dl/FileDownloader.py @@ -433,11 +433,8 @@ class FileDownloader(object): try: srtfn = filename.rsplit('.', 1)[0] + u'.srt' self.report_writesubtitles(srtfn) - srtfile = open(encodeFilename(srtfn), 'wb') - try: - srtfile.write(info_dict['subtitles'].encode('utf-8')) - finally: - srtfile.close() + with io.open(encodeFilename(srtfn), 'w', encoding='utf-8') as srtfile: + srtfile.write(info_dict['subtitles']) except (OSError, IOError): self.trouble(u'ERROR: Cannot write subtitles file ' + descfn) return diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index aa4a6500b..6201ccad7 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -235,7 +235,7 @@ class YoutubeIE(InfoExtractor): elif 'en' in srt_lang_list: srt_lang = 'en' else: - srt_lang = srt_lang_list.keys()[0] + srt_lang = list(srt_lang_list.keys())[0] if not srt_lang in srt_lang_list: return (u'WARNING: no closed captions found in the specified language', None) request = compat_urllib_request.Request('http://www.youtube.com/api/timedtext?lang=%s&name=%s&v=%s' % (srt_lang, srt_lang_list[srt_lang], video_id)) @@ -445,7 +445,7 @@ class YoutubeIE(InfoExtractor): elif 'url_encoded_fmt_stream_map' in video_info and len(video_info['url_encoded_fmt_stream_map']) >= 1: url_data_strs = video_info['url_encoded_fmt_stream_map'][0].split(',') url_data = [compat_parse_qs(uds) for uds in url_data_strs] - url_data = filter(lambda ud: 'itag' in ud and 'url' in ud, url_data) + url_data = [ud for ud in url_data if 'itag' in ud and 'url' in ud] url_map = dict((ud['itag'][0], ud['url'][0] + '&signature=' + ud['sig'][0]) for ud in url_data) format_limit = self._downloader.params.get('format_limit', None) @@ -2115,7 +2115,7 @@ class FacebookIE(InfoExtractor): video_description = video_info.get('description', 'No description available.') url_map = video_info['video_urls'] - if len(url_map.keys()) > 0: + if len(list(url_map.keys())) > 0: # Decide which formats to download req_format = self._downloader.params.get('format', None) format_limit = self._downloader.params.get('format_limit', None) @@ -2975,7 +2975,7 @@ class MixcloudIE(InfoExtractor): if file_url is not None: break # got it! else: - if req_format not in formats.keys(): + if req_format not in list(formats.keys()): self._downloader.trouble(u'ERROR: format is not available') return @@ -3274,7 +3274,7 @@ class YoukuIE(InfoExtractor): seed = config['data'][0]['seed'] format = self._downloader.params.get('format', None) - supported_format = config['data'][0]['streamfileids'].keys() + supported_format = list(config['data'][0]['streamfileids'].keys()) if format is None or format == 'best': if 'hd2' in supported_format: diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index efa8b813f..9b25ab3a2 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -453,8 +453,8 @@ def _real_main(): if opts.list_extractors: for ie in extractors: print(ie.IE_NAME + (' (CURRENTLY BROKEN)' if not ie._WORKING else '')) - matchedUrls = filter(lambda url: ie.suitable(url), all_urls) - all_urls = filter(lambda url: url not in matchedUrls, all_urls) + matchedUrls = [url for url in all_urls if ie.suitable(url)] + all_urls = [url for url in all_urls if url not in matchedUrls] for mu in matchedUrls: print(u' ' + mu) sys.exit(0) From 77c4beab8a0c13e158bad8af4e014d57766f1940 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 20 Dec 2012 16:28:16 +0100 Subject: [PATCH 4/6] new info_dict field: uploader_id --- README.md | 19 ++++++++++--------- test/tests.json | 1 + youtube_dl/InfoExtractors.py | 18 +++++++++++++++--- youtube_dl/__init__.py | 2 +- 4 files changed, 27 insertions(+), 13 deletions(-) diff --git a/README.md b/README.md index 2ca021125..9dffd99ab 100644 --- a/README.md +++ b/README.md @@ -46,15 +46,16 @@ which means you can modify it, redistribute it or use it however you like. -A, --auto-number number downloaded files starting from 00000 -o, --output TEMPLATE output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, - %(autonumber)s to get an automatically incremented - number, %(ext)s for the filename extension, - %(upload_date)s for the upload date (YYYYMMDD), - %(extractor)s for the provider (youtube, metacafe, - etc), %(id)s for the video id and %% for a literal - percent. Use - to output to stdout. Can also be - used to download to a different directory, for - example with -o '/my/downloads/%(uploader)s/%(title - )s-%(id)s.%(ext)s' . + %(uploader_id)s for the uploader nickname if + different, %(autonumber)s to get an automatically + incremented number, %(ext)s for the filename + extension, %(upload_date)s for the upload date + (YYYYMMDD), %(extractor)s for the provider + (youtube, metacafe, etc), %(id)s for the video id + and %% for a literal percent. Use - to output to + stdout. Can also be used to download to a different + directory, for example with -o '/my/downloads/%(upl + oader)s/%(title)s-%(id)s.%(ext)s' . --restrict-filenames Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames -a, --batch-file FILE file containing URLs to download ('-' for stdin) diff --git a/test/tests.json b/test/tests.json index 4f8f68f38..83afda985 100644 --- a/test/tests.json +++ b/test/tests.json @@ -6,6 +6,7 @@ "info_dict": { "title": "youtube-dl test video \"'/\\ä↭𝕐", "uploader": "Philipp Hagemeister", + "uploader_id": "phihag", "upload_date": "20121002", "description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ." } diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 6201ccad7..3eb070d4a 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -32,7 +32,7 @@ class InfoExtractor(object): id: Video identifier. url: Final video URL. - uploader: Nickname of the video uploader, unescaped. + uploader: Full name of the video uploader, unescaped. upload_date: Video upload date (YYYYMMDD). title: Video title, unescaped. ext: Video filename extension. @@ -42,6 +42,7 @@ class InfoExtractor(object): format: The video format, defaults to ext (used for --get-format) thumbnail: Full URL to a video thumbnail image. description: One-line video description. + uploader_id: Nickname or id of the video uploader. player_url: SWF Player URL (used for rtmpdump). subtitles: The .srt file contents. urlhandle: [internal] The urlHandle to be used to download the file, @@ -384,10 +385,18 @@ class YoutubeIE(InfoExtractor): # uploader if 'author' not in video_info: - self._downloader.trouble(u'ERROR: unable to extract uploader nickname') + self._downloader.trouble(u'ERROR: unable to extract uploader name') return video_uploader = compat_urllib_parse.unquote_plus(video_info['author'][0]) + # uploader_id + video_uploader_id = None + mobj = re.search(r'', video_webpage) + if mobj is not None: + video_uploader_id = mobj.group(1) + else: + self._downloader.trouble(u'WARNING: unable to extract uploader nickname') + # title if 'title' not in video_info: self._downloader.trouble(u'ERROR: unable to extract video title') @@ -495,6 +504,7 @@ class YoutubeIE(InfoExtractor): 'id': video_id, 'url': video_real_url, 'uploader': video_uploader, + 'uploader_id': video_uploader_id, 'upload_date': upload_date, 'title': video_title, 'ext': video_extension, @@ -994,8 +1004,9 @@ class VimeoIE(InfoExtractor): # Extract title video_title = config["video"]["title"] - # Extract uploader + # Extract uploader and uploader_id video_uploader = config["video"]["owner"]["name"] + video_uploader_id = config["video"]["owner"]["url"].split('/')[-1] # Extract video thumbnail video_thumbnail = config["video"]["thumbnail"] @@ -1047,6 +1058,7 @@ class VimeoIE(InfoExtractor): 'id': video_id, 'url': video_url, 'uploader': video_uploader, + 'uploader_id': video_uploader_id, 'upload_date': video_upload_date, 'title': video_title, 'ext': video_extension, diff --git a/youtube_dl/__init__.py b/youtube_dl/__init__.py index 9b25ab3a2..1102b2fce 100644 --- a/youtube_dl/__init__.py +++ b/youtube_dl/__init__.py @@ -307,7 +307,7 @@ def parseOpts(): action='store_true', dest='autonumber', help='number downloaded files starting from 00000', default=False) filesystem.add_option('-o', '--output', - dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .') + dest='outtmpl', metavar='TEMPLATE', help='output filename template. Use %(title)s to get the title, %(uploader)s for the uploader name, %(uploader_id)s for the uploader nickname if different, %(autonumber)s to get an automatically incremented number, %(ext)s for the filename extension, %(upload_date)s for the upload date (YYYYMMDD), %(extractor)s for the provider (youtube, metacafe, etc), %(id)s for the video id and %% for a literal percent. Use - to output to stdout. Can also be used to download to a different directory, for example with -o \'/my/downloads/%(uploader)s/%(title)s-%(id)s.%(ext)s\' .') filesystem.add_option('--restrict-filenames', action='store_true', dest='restrictfilenames', help='Restrict filenames to only ASCII characters, and avoid "&" and spaces in filenames', default=False) From 6b3aef80ceba9b4715065be924dcb1f83ec36655 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 20 Dec 2012 16:30:55 +0100 Subject: [PATCH 5/6] better Vimeo tests; fixed a couple of VimeoIE fields --- test/test_download.py | 9 ++++----- test/test_write_info_json.py | 2 -- test/test_youtube_lists.py | 2 -- test/test_youtube_subtitles.py | 2 -- test/tests.json | 13 ++++++++++--- youtube_dl/InfoExtractors.py | 4 ++-- youtube_dl/utils.py | 3 ++- 7 files changed, 18 insertions(+), 17 deletions(-) diff --git a/test/test_download.py b/test/test_download.py index 9a6d4d604..1ee1b334d 100644 --- a/test/test_download.py +++ b/test/test_download.py @@ -6,8 +6,8 @@ import os import json import unittest import sys -import socket import hashlib +import socket # Allow direct execution sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))) @@ -25,7 +25,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FileDownloader(youtube_dl.FileDownloader): def __init__(self, *args, **kwargs): @@ -90,12 +89,12 @@ def generator(test_case): md5_for_file = _file_md5(test_case['file']) self.assertEqual(md5_for_file, test_case['md5']) info_dict = fd.processed_info_dicts[0] - for (info_element, value) in test_case.get('info_dict', {}).items(): + for (info_field, value) in test_case.get('info_dict', {}).items(): if value.startswith('md5:'): - md5_info_value = hashlib.md5(info_dict[info_element]).hexdigest() + md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest() self.assertEqual(value[3:], md5_info_value) else: - self.assertEqual(value, info_dict[info_element]) + self.assertEqual(value, info_dict.get(info_field)) return test_template diff --git a/test/test_write_info_json.py b/test/test_write_info_json.py index ebf543980..8134dda37 100644 --- a/test/test_write_info_json.py +++ b/test/test_write_info_json.py @@ -3,7 +3,6 @@ import json import os -import socket import sys import unittest @@ -22,7 +21,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FileDownloader(youtube_dl.FileDownloader): def __init__(self, *args, **kwargs): diff --git a/test/test_youtube_lists.py b/test/test_youtube_lists.py index e352e5ab9..3044e0852 100644 --- a/test/test_youtube_lists.py +++ b/test/test_youtube_lists.py @@ -2,7 +2,6 @@ import sys import unittest -import socket import json # Allow direct execution @@ -22,7 +21,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FakeDownloader(object): def __init__(self): diff --git a/test/test_youtube_subtitles.py b/test/test_youtube_subtitles.py index 64a391d14..5d3566a35 100644 --- a/test/test_youtube_subtitles.py +++ b/test/test_youtube_subtitles.py @@ -2,7 +2,6 @@ import sys import unittest -import socket import json import io import hashlib @@ -24,7 +23,6 @@ cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar) proxy_handler = compat_urllib_request.ProxyHandler() opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler()) compat_urllib_request.install_opener(opener) -socket.setdefaulttimeout(300) # 5 minutes should be enough (famous last words) class FakeDownloader(object): def __init__(self): diff --git a/test/tests.json b/test/tests.json index 83afda985..d24bdf6fc 100644 --- a/test/tests.json +++ b/test/tests.json @@ -37,9 +37,16 @@ }, { "name": "Vimeo", - "md5": "60540a4ec7cc378ec84b919c0aed5023", - "url": "http://vimeo.com/14160053", - "file": "14160053.mp4" + "md5": "8879b6cc097e987f02484baf890129e5", + "url": "http://vimeo.com/56015672", + "file": "56015672.mp4", + "info_dict": { + "title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐", + "uploader": "Filippo Valsorda", + "uploader_id": "user7108434", + "upload_date": "20121220", + "description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐" + } }, { "name": "Soundcloud", diff --git a/youtube_dl/InfoExtractors.py b/youtube_dl/InfoExtractors.py index 3eb070d4a..5a9032331 100755 --- a/youtube_dl/InfoExtractors.py +++ b/youtube_dl/InfoExtractors.py @@ -1018,9 +1018,9 @@ class VimeoIE(InfoExtractor): # Extract upload date video_upload_date = None - mobj = re.search(r'', webpage) + mobj = re.search(r' html = html.replace('\n', ' ') - html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html) + html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html) + html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html) # Strip html tags html = re.sub('<.*?>', '', html) # Replace html entities From 162e3c52616a72f4ddc11b0e5de0f2425e512896 Mon Sep 17 00:00:00 2001 From: Filippo Valsorda Date: Thu, 20 Dec 2012 17:21:46 +0100 Subject: [PATCH 6/6] Temporary skip Escapist test as it fails only on Travis; we'll make a more specific workaround later if we can't fix it --- test/tests.json | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/test/tests.json b/test/tests.json index d24bdf6fc..b573affc5 100644 --- a/test/tests.json +++ b/test/tests.json @@ -95,7 +95,8 @@ "name": "Escapist", "url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate", "file": "6618-Breaking-Down-Baldurs-Gate.flv", - "md5": "c6793dbda81388f4264c1ba18684a74d" + "md5": "c6793dbda81388f4264c1ba18684a74d", + "skip": "Fails with timeout on Travis" }, { "name": "GooglePlus",