|
|
@ -14,10 +14,6 @@ import email.utils
|
|
|
|
import xml.etree.ElementTree
|
|
|
|
import xml.etree.ElementTree
|
|
|
|
import random
|
|
|
|
import random
|
|
|
|
import math
|
|
|
|
import math
|
|
|
|
import urllib
|
|
|
|
|
|
|
|
import urllib2
|
|
|
|
|
|
|
|
import httplib
|
|
|
|
|
|
|
|
from urlparse import parse_qs, urlparse
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from .utils import *
|
|
|
|
from .utils import *
|
|
|
|
|
|
|
|
|
|
|
@ -3744,43 +3740,37 @@ class YouPornIE(InfoExtractor):
|
|
|
|
"""Information extractor for youporn.com."""
|
|
|
|
"""Information extractor for youporn.com."""
|
|
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youporn\.com/watch/(?P<videoid>[0-9]+)/(?P<title>[^/]+)'
|
|
|
|
IE_NAME = u'youporn'
|
|
|
|
|
|
|
|
VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>'
|
|
|
|
|
|
|
|
VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>'
|
|
|
|
|
|
|
|
VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>'
|
|
|
|
|
|
|
|
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
|
|
|
|
|
|
|
LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
|
|
|
|
def report_id(self, video_id):
|
|
|
|
# def report_id(self, video_id):
|
|
|
|
"""Report finding video ID"""
|
|
|
|
# """Report finding video ID"""
|
|
|
|
self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id)
|
|
|
|
# self._downloader.to_screen(u'[youporn] Video ID: %s' % video_id)
|
|
|
|
|
|
|
|
|
|
|
|
def report_webpage(self, url):
|
|
|
|
# def report_webpage(self, url):
|
|
|
|
"""Report downloading page"""
|
|
|
|
# """Report downloading page"""
|
|
|
|
self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url)
|
|
|
|
# self._downloader.to_screen(u'[youporn] Downloaded page: %s' % url)
|
|
|
|
|
|
|
|
|
|
|
|
def report_title(self, video_title):
|
|
|
|
# def report_title(self, video_title):
|
|
|
|
"""Report dfinding title"""
|
|
|
|
# """Report dfinding title"""
|
|
|
|
self._downloader.to_screen(u'[youporn] Title: %s' % video_title)
|
|
|
|
# self._downloader.to_screen(u'[youporn] Title: %s' % video_title)
|
|
|
|
|
|
|
|
|
|
|
|
def report_uploader(self, uploader):
|
|
|
|
# def report_uploader(self, uploader):
|
|
|
|
"""Report dfinding title"""
|
|
|
|
# """Report dfinding title"""
|
|
|
|
self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader)
|
|
|
|
# self._downloader.to_screen(u'[youporn] Uploader: %s' % uploader)
|
|
|
|
|
|
|
|
|
|
|
|
def report_upload_date(self, video_date):
|
|
|
|
# def report_upload_date(self, video_date):
|
|
|
|
"""Report finding date"""
|
|
|
|
# """Report finding date"""
|
|
|
|
self._downloader.to_screen(u'[youporn] Date: %s' % video_date)
|
|
|
|
# self._downloader.to_screen(u'[youporn] Date: %s' % video_date)
|
|
|
|
|
|
|
|
|
|
|
|
def _print_formats(self, formats):
|
|
|
|
def _print_formats(self, formats):
|
|
|
|
"""Print all available formats"""
|
|
|
|
"""Print all available formats"""
|
|
|
|
print 'Available formats:'
|
|
|
|
print('Available formats:')
|
|
|
|
print u'ext\t\tformat'
|
|
|
|
print(u'ext\t\tformat')
|
|
|
|
print u'---------------------------------'
|
|
|
|
print(u'---------------------------------')
|
|
|
|
for format in formats:
|
|
|
|
for format in formats:
|
|
|
|
print u'%s\t\t%s' % (format['ext'], format['format'])
|
|
|
|
print(u'%s\t\t%s' % (format['ext'], format['format']))
|
|
|
|
|
|
|
|
|
|
|
|
def _specific(self, req_format, formats):
|
|
|
|
def _specific(self, req_format, formats):
|
|
|
|
for x in formats:
|
|
|
|
for x in formats:
|
|
|
@ -3788,58 +3778,57 @@ class YouPornIE(InfoExtractor):
|
|
|
|
return x
|
|
|
|
return x
|
|
|
|
return None
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
if mobj is None:
|
|
|
|
if mobj is None:
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
video_id = mobj.group('videoid').decode('utf-8')
|
|
|
|
video_id = mobj.group('videoid')
|
|
|
|
self.report_id(video_id)
|
|
|
|
#self.report_id(video_id)
|
|
|
|
|
|
|
|
|
|
|
|
# Get webpage content
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
try:
|
|
|
|
#self.report_webpage(url)
|
|
|
|
webpage = urllib2.urlopen(url).read()
|
|
|
|
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
self.report_webpage(url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Get the video title
|
|
|
|
# Get the video title
|
|
|
|
result = re.search(self.VIDEO_TITLE_RE, webpage)
|
|
|
|
VIDEO_TITLE_RE = r'videoTitleArea">(?P<title>.*)</h1>'
|
|
|
|
|
|
|
|
result = re.search(VIDEO_TITLE_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
video_title = result.group('title').decode('utf-8').strip()
|
|
|
|
video_title = result.group('title').strip()
|
|
|
|
self.report_title(video_title)
|
|
|
|
#self.report_title(video_title)
|
|
|
|
|
|
|
|
|
|
|
|
# Get the video date
|
|
|
|
# Get the video date
|
|
|
|
result = re.search(self.VIDEO_DATE_RE, webpage)
|
|
|
|
VIDEO_DATE_RE = r'Date:</b>(?P<date>.*)</li>'
|
|
|
|
|
|
|
|
result = re.search(VIDEO_DATE_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video date')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video date')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
upload_date = result.group('date').decode('utf-8').strip()
|
|
|
|
upload_date = result.group('date').strip()
|
|
|
|
self.report_upload_date(upload_date)
|
|
|
|
#self.report_upload_date(upload_date)
|
|
|
|
|
|
|
|
|
|
|
|
# Get the video uploader
|
|
|
|
# Get the video uploader
|
|
|
|
result = re.search(self.VIDEO_UPLOADER_RE, webpage)
|
|
|
|
VIDEO_UPLOADER_RE = r'Submitted:</b>(?P<uploader>.*)</li>'
|
|
|
|
|
|
|
|
result = re.search(VIDEO_UPLOADER_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract uploader')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract uploader')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
video_uploader = result.group('uploader').decode('utf-8').strip()
|
|
|
|
video_uploader = result.group('uploader').strip()
|
|
|
|
video_uploader = clean_html( video_uploader )
|
|
|
|
video_uploader = clean_html( video_uploader )
|
|
|
|
self.report_uploader(video_uploader)
|
|
|
|
#self.report_uploader(video_uploader)
|
|
|
|
|
|
|
|
|
|
|
|
# Get all of the formats available
|
|
|
|
# Get all of the formats available
|
|
|
|
result = re.search(self.DOWNLOAD_LIST_RE, webpage)
|
|
|
|
DOWNLOAD_LIST_RE = r'(?s)<ul class="downloadList">(?P<download_list>.*?)</ul>'
|
|
|
|
|
|
|
|
result = re.search(DOWNLOAD_LIST_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract download list')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract download list')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
download_list_html = result.group('download_list').decode('utf-8').strip()
|
|
|
|
download_list_html = result.group('download_list').strip()
|
|
|
|
|
|
|
|
|
|
|
|
# Get all of the links from the page
|
|
|
|
# Get all of the links from the page
|
|
|
|
links = re.findall(self.LINK_RE, download_list_html)
|
|
|
|
LINK_RE = r'(?s)<a href="(?P<url>[^"]+)">'
|
|
|
|
|
|
|
|
links = re.findall(LINK_RE, download_list_html)
|
|
|
|
if(len(links) == 0):
|
|
|
|
if(len(links) == 0):
|
|
|
|
self._downloader.trouble(u'ERROR: no known formats available for video')
|
|
|
|
self._downloader.trouble(u'ERROR: no known formats available for video')
|
|
|
|
return
|
|
|
|
return
|
|
|
@ -3853,8 +3842,8 @@ class YouPornIE(InfoExtractor):
|
|
|
|
# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
|
|
|
|
# http://cdn1.download.youporn.phncdn.com/201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4?nvb=20121113051249&nva=20121114051249&ir=1200&sr=1200&hash=014b882080310e95fb6a0
|
|
|
|
# A path looks like this:
|
|
|
|
# A path looks like this:
|
|
|
|
# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
|
|
|
|
# /201210/31/8004515/480p_370k_8004515/YouPorn%20-%20Nubile%20Films%20The%20Pillow%20Fight.mp4
|
|
|
|
video_url = unescapeHTML( link.decode('utf-8') )
|
|
|
|
video_url = unescapeHTML( link )
|
|
|
|
path = urlparse( video_url ).path
|
|
|
|
path = compat_urllib_parse_urlparse( video_url ).path
|
|
|
|
extension = os.path.splitext( path )[1][1:]
|
|
|
|
extension = os.path.splitext( path )[1][1:]
|
|
|
|
format = path.split('/')[4].split('_')[:2]
|
|
|
|
format = path.split('/')[4].split('_')[:2]
|
|
|
|
size = format[0]
|
|
|
|
size = format[0]
|
|
|
@ -3903,29 +3892,25 @@ class PornotubeIE(InfoExtractor):
|
|
|
|
"""Information extractor for pornotube.com."""
|
|
|
|
"""Information extractor for pornotube.com."""
|
|
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?pornotube\.com(/c/(?P<channel>[0-9]+))?(/m/(?P<videoid>[0-9]+))(/(?P<title>.+))$'
|
|
|
|
IE_NAME = u'pornotube'
|
|
|
|
|
|
|
|
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
|
|
|
|
|
|
|
|
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# def __init__(self, downloader=None):
|
|
|
|
|
|
|
|
# InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
# def report_extract_entry(self, url):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
# """Report downloading extry"""
|
|
|
|
|
|
|
|
# self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8'))
|
|
|
|
|
|
|
|
|
|
|
|
def report_extract_entry(self, url):
|
|
|
|
# def report_date(self, upload_date):
|
|
|
|
"""Report downloading extry"""
|
|
|
|
# """Report finding uploaded date"""
|
|
|
|
self._downloader.to_screen(u'[pornotube] Downloading entry: %s' % url.decode('utf-8'))
|
|
|
|
# self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
|
|
|
|
|
|
|
|
|
|
|
|
def report_date(self, upload_date):
|
|
|
|
|
|
|
|
"""Report finding uploaded date"""
|
|
|
|
|
|
|
|
self._downloader.to_screen(u'[pornotube] Entry date: %s' % upload_date)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def report_webpage(self, url):
|
|
|
|
# def report_webpage(self, url):
|
|
|
|
"""Report downloading page"""
|
|
|
|
# """Report downloading page"""
|
|
|
|
self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url)
|
|
|
|
# self._downloader.to_screen(u'[pornotube] Downloaded page: %s' % url)
|
|
|
|
|
|
|
|
|
|
|
|
def report_title(self, video_title):
|
|
|
|
# def report_title(self, video_title):
|
|
|
|
"""Report downloading extry"""
|
|
|
|
# """Report downloading extry"""
|
|
|
|
self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
|
|
|
|
# self._downloader.to_screen(u'[pornotube] Title: %s' % video_title.decode('utf-8'))
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
@ -3933,34 +3918,31 @@ class PornotubeIE(InfoExtractor):
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
return
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
video_id = mobj.group('videoid').decode('utf-8')
|
|
|
|
video_id = mobj.group('videoid')
|
|
|
|
video_title = mobj.group('title').decode('utf-8')
|
|
|
|
video_title = mobj.group('title')
|
|
|
|
self.report_title(video_title);
|
|
|
|
#self.report_title(video_title);
|
|
|
|
|
|
|
|
|
|
|
|
# Get webpage content
|
|
|
|
# Get webpage content
|
|
|
|
try:
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
webpage = urllib2.urlopen(url).read()
|
|
|
|
#self.report_webpage(url)
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
self.report_webpage(url)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Get the video URL
|
|
|
|
# Get the video URL
|
|
|
|
result = re.search(self.VIDEO_URL_RE, webpage)
|
|
|
|
VIDEO_URL_RE = r'url: "(?P<url>http://video[0-9].pornotube.com/.+\.flv)",'
|
|
|
|
|
|
|
|
result = re.search(VIDEO_URL_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video url')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video url')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
video_url = urllib.unquote(result.group('url').decode('utf-8'))
|
|
|
|
video_url = compat_urllib_parse.unquote(result.group('url'))
|
|
|
|
self.report_extract_entry(video_url)
|
|
|
|
#self.report_extract_entry(video_url)
|
|
|
|
|
|
|
|
|
|
|
|
#Get the uploaded date
|
|
|
|
#Get the uploaded date
|
|
|
|
result = re.search(self.VIDEO_UPLOADED_RE, webpage)
|
|
|
|
VIDEO_UPLOADED_RE = r'<div class="video_added_by">Added (?P<date>[0-9\/]+) by'
|
|
|
|
|
|
|
|
result = re.search(VIDEO_UPLOADED_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
upload_date = result.group('date').decode('utf-8')
|
|
|
|
upload_date = result.group('date')
|
|
|
|
self.report_date(upload_date);
|
|
|
|
#self.report_date(upload_date);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
info = {'id': video_id,
|
|
|
|
info = {'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'url': video_url,
|
|
|
@ -3980,71 +3962,69 @@ class PornotubeIE(InfoExtractor):
|
|
|
|
class YouJizzIE(InfoExtractor):
|
|
|
|
class YouJizzIE(InfoExtractor):
|
|
|
|
"""Information extractor for youjizz.com."""
|
|
|
|
"""Information extractor for youjizz.com."""
|
|
|
|
|
|
|
|
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/([^.]+).html$'
|
|
|
|
_VALID_URL = r'^(?:https?://)?(?:\w+\.)?youjizz\.com/videos/(?P<videoid>[^.]+).html$'
|
|
|
|
IE_NAME = u'youjizz'
|
|
|
|
|
|
|
|
VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
|
|
|
|
|
|
|
|
EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
|
|
|
|
|
|
|
|
SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
def __init__(self, downloader=None):
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
InfoExtractor.__init__(self, downloader)
|
|
|
|
|
|
|
|
|
|
|
|
def report_extract_entry(self, url):
|
|
|
|
# def report_extract_entry(self, url):
|
|
|
|
"""Report downloading extry"""
|
|
|
|
# """Report downloading extry"""
|
|
|
|
self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8'))
|
|
|
|
# self._downloader.to_screen(u'[youjizz] Downloading entry: %s' % url.decode('utf-8'))
|
|
|
|
|
|
|
|
|
|
|
|
def report_webpage(self, url):
|
|
|
|
# def report_webpage(self, url):
|
|
|
|
"""Report downloading page"""
|
|
|
|
# """Report downloading page"""
|
|
|
|
self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
|
|
|
|
# self._downloader.to_screen(u'[youjizz] Downloaded page: %s' % url)
|
|
|
|
|
|
|
|
|
|
|
|
def report_title(self, video_title):
|
|
|
|
# def report_title(self, video_title):
|
|
|
|
"""Report downloading extry"""
|
|
|
|
# """Report downloading extry"""
|
|
|
|
self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8'))
|
|
|
|
# self._downloader.to_screen(u'[youjizz] Title: %s' % video_title.decode('utf-8'))
|
|
|
|
|
|
|
|
|
|
|
|
def report_embed_page(self, embed_page):
|
|
|
|
# def report_embed_page(self, embed_page):
|
|
|
|
"""Report downloading extry"""
|
|
|
|
# """Report downloading extry"""
|
|
|
|
self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8'))
|
|
|
|
# self._downloader.to_screen(u'[youjizz] Embed Page: %s' % embed_page.decode('utf-8'))
|
|
|
|
|
|
|
|
|
|
|
|
def _real_extract(self, url):
|
|
|
|
def _real_extract(self, url):
|
|
|
|
# Get webpage content
|
|
|
|
mobj = re.match(self._VALID_URL, url)
|
|
|
|
try:
|
|
|
|
if mobj is None:
|
|
|
|
webpage = urllib2.urlopen(url).read()
|
|
|
|
self._downloader.trouble(u'ERROR: invalid URL: %s' % url)
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video webpage: %s' % err)
|
|
|
|
|
|
|
|
return
|
|
|
|
return
|
|
|
|
self.report_webpage(url)
|
|
|
|
|
|
|
|
|
|
|
|
video_id = mobj.group('videoid')
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Get webpage content
|
|
|
|
|
|
|
|
webpage = self._download_webpage(url, video_id)
|
|
|
|
|
|
|
|
#self.report_webpage(url)
|
|
|
|
|
|
|
|
|
|
|
|
# Get the video title
|
|
|
|
# Get the video title
|
|
|
|
result = re.search(self.VIDEO_TITLE_RE, webpage)
|
|
|
|
VIDEO_TITLE_RE = r'<title>(?P<title>.*)</title>'
|
|
|
|
|
|
|
|
result = re.search(VIDEO_TITLE_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video title')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
video_title = result.group('title').decode('utf-8').strip()
|
|
|
|
video_title = result.group('title').strip()
|
|
|
|
self.report_title(video_title)
|
|
|
|
#self.report_title(video_title)
|
|
|
|
|
|
|
|
|
|
|
|
# Get the embed page
|
|
|
|
# Get the embed page
|
|
|
|
result = re.search(self.EMBED_PAGE_RE, webpage)
|
|
|
|
EMBED_PAGE_RE = r'http://www.youjizz.com/videos/embed/(?P<videoid>[0-9]+)'
|
|
|
|
|
|
|
|
result = re.search(EMBED_PAGE_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract embed page')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract embed page')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
embed_page_url = result.group(0).decode('utf-8').strip()
|
|
|
|
embed_page_url = result.group(0).strip()
|
|
|
|
video_id = result.group('videoid').decode('utf-8')
|
|
|
|
video_id = result.group('videoid')
|
|
|
|
self.report_embed_page(embed_page_url)
|
|
|
|
#self.report_embed_page(embed_page_url)
|
|
|
|
|
|
|
|
|
|
|
|
try:
|
|
|
|
webpage = self._download_webpage(embed_page_url, video_id)
|
|
|
|
webpage = urllib2.urlopen(embed_page_url).read()
|
|
|
|
|
|
|
|
except (urllib2.URLError, httplib.HTTPException, socket.error), err:
|
|
|
|
|
|
|
|
self._downloader.trouble(u'ERROR: unable to download video embed page: %s' % err)
|
|
|
|
|
|
|
|
return
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
# Get the video URL
|
|
|
|
# Get the video URL
|
|
|
|
result = re.search(self.SOURCE_RE, webpage)
|
|
|
|
SOURCE_RE = r'so.addVariable\("file",encodeURIComponent\("(?P<source>[^"]+)"\)\);'
|
|
|
|
|
|
|
|
result = re.search(SOURCE_RE, webpage)
|
|
|
|
if result is None:
|
|
|
|
if result is None:
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video url')
|
|
|
|
self._downloader.trouble(u'ERROR: unable to extract video url')
|
|
|
|
return
|
|
|
|
return
|
|
|
|
video_url = result.group('source').decode('utf-8')
|
|
|
|
video_url = result.group('source')
|
|
|
|
self.report_extract_entry(video_url)
|
|
|
|
#self.report_extract_entry(video_url)
|
|
|
|
|
|
|
|
|
|
|
|
info = {'id': video_id,
|
|
|
|
info = {'id': video_id,
|
|
|
|
'url': video_url,
|
|
|
|
'url': video_url,
|
|
|
@ -4093,9 +4073,9 @@ def gen_extractors():
|
|
|
|
MTVIE(),
|
|
|
|
MTVIE(),
|
|
|
|
YoukuIE(),
|
|
|
|
YoukuIE(),
|
|
|
|
XNXXIE(),
|
|
|
|
XNXXIE(),
|
|
|
|
YouJizzIE(),
|
|
|
|
YouJizzIE(), # jefftimesten
|
|
|
|
PornotubeIE(),
|
|
|
|
PornotubeIE(), # jefftimesten
|
|
|
|
YouPornIE(),
|
|
|
|
YouPornIE(), # jefftimesten
|
|
|
|
GooglePlusIE(),
|
|
|
|
GooglePlusIE(),
|
|
|
|
ArteTvIE(),
|
|
|
|
ArteTvIE(),
|
|
|
|
NBAIE(),
|
|
|
|
NBAIE(),
|
|
|
|