Merge branch 'remitamine-srgssr'

pull/8/head
remitamine 9 years ago
commit 574dd17882

@ -643,7 +643,10 @@ from .sportbox import (
SportBoxEmbedIE, SportBoxEmbedIE,
) )
from .sportdeutschland import SportDeutschlandIE from .sportdeutschland import SportDeutschlandIE
from .srf import SrfIE from .srgssr import (
SRGSSRIE,
SRGSSRPlayIE,
)
from .srmediathek import SRMediathekIE from .srmediathek import SRMediathekIE
from .ssa import SSAIE from .ssa import SSAIE
from .stanfordoc import StanfordOpenClassroomIE from .stanfordoc import StanfordOpenClassroomIE

@ -3,7 +3,7 @@ from __future__ import unicode_literals
import re import re
from .common import InfoExtractor from .srgssr import SRGSSRIE
from ..compat import ( from ..compat import (
compat_str, compat_str,
compat_urllib_parse_urlparse, compat_urllib_parse_urlparse,
@ -17,23 +17,14 @@ from ..utils import (
) )
class RTSIE(InfoExtractor): class RTSIE(SRGSSRIE):
IE_DESC = 'RTS.ch' IE_DESC = 'RTS.ch'
_VALID_URL = r'''(?x) _VALID_URL = r'rts:(?P<rts_id>\d+)|https?://(?:www\.)?rts\.ch/(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html'
(?:
rts:(?P<rts_id>\d+)|
https?://
(?:www\.)?rts\.ch/
(?:
(?:[^/]+/){2,}(?P<id>[0-9]+)-(?P<display_id>.+?)\.html|
play/tv/[^/]+/video/(?P<display_id_new>.+?)\?id=(?P<id_new>[0-9]+)
)
)'''
_TESTS = [ _TESTS = [
{ {
'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html', 'url': 'http://www.rts.ch/archives/tv/divers/3449373-les-enfants-terribles.html',
'md5': '753b877968ad8afaeddccc374d4256a5', 'md5': 'f254c4b26fb1d3c183793d52bc40d3e7',
'info_dict': { 'info_dict': {
'id': '3449373', 'id': '3449373',
'display_id': 'les-enfants-terribles', 'display_id': 'les-enfants-terribles',
@ -47,13 +38,17 @@ class RTSIE(InfoExtractor):
'thumbnail': 're:^https?://.*\.image', 'thumbnail': 're:^https?://.*\.image',
'view_count': int, 'view_count': int,
}, },
'params': {
# m3u8 download
'skip_download': True,
}
}, },
{ {
'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html', 'url': 'http://www.rts.ch/emissions/passe-moi-les-jumelles/5624067-entre-ciel-et-mer.html',
'md5': 'c148457a27bdc9e5b1ffe081a7a8337b', 'md5': 'f1077ac5af686c76528dc8d7c5df29ba',
'info_dict': { 'info_dict': {
'id': '5624067', 'id': '5742494',
'display_id': 'entre-ciel-et-mer', 'display_id': '5742494',
'ext': 'mp4', 'ext': 'mp4',
'duration': 3720, 'duration': 3720,
'title': 'Les yeux dans les cieux - Mon homard au Canada', 'title': 'Les yeux dans les cieux - Mon homard au Canada',
@ -64,6 +59,10 @@ class RTSIE(InfoExtractor):
'thumbnail': 're:^https?://.*\.image', 'thumbnail': 're:^https?://.*\.image',
'view_count': int, 'view_count': int,
}, },
'params': {
# m3u8 download
'skip_download': True,
}
}, },
{ {
'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html', 'url': 'http://www.rts.ch/video/sport/hockey/5745975-1-2-kloten-fribourg-5-2-second-but-pour-gotteron-par-kwiatowski.html',
@ -85,7 +84,7 @@ class RTSIE(InfoExtractor):
}, },
{ {
'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html', 'url': 'http://www.rts.ch/video/info/journal-continu/5745356-londres-cachee-par-un-epais-smog.html',
'md5': '9bb06503773c07ce83d3cbd793cebb91', 'md5': '9f713382f15322181bb366cc8c3a4ff0',
'info_dict': { 'info_dict': {
'id': '5745356', 'id': '5745356',
'display_id': 'londres-cachee-par-un-epais-smog', 'display_id': 'londres-cachee-par-un-epais-smog',
@ -99,6 +98,10 @@ class RTSIE(InfoExtractor):
'thumbnail': 're:^https?://.*\.image', 'thumbnail': 're:^https?://.*\.image',
'view_count': int, 'view_count': int,
}, },
'params': {
# m3u8 download
'skip_download': True,
}
}, },
{ {
'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html', 'url': 'http://www.rts.ch/audio/couleur3/programmes/la-belle-video-de-stephane-laurenceau/5706148-urban-hippie-de-damien-krisl-03-04-2014.html',
@ -114,23 +117,6 @@ class RTSIE(InfoExtractor):
'timestamp': 1396551600, 'timestamp': 1396551600,
}, },
}, },
{
'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
'md5': '968777c8779e5aa2434be96c54e19743',
'info_dict': {
'id': '6348260',
'display_id': 'le-19h30',
'ext': 'mp4',
'duration': 1796,
'title': 'Le 19h30',
'description': '',
'uploader': 'Le 19h30',
'upload_date': '20141201',
'timestamp': 1417458600,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
},
{ {
# article with videos on rhs # article with videos on rhs
'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html', 'url': 'http://www.rts.ch/sport/hockey/6693917-hockey-davos-decroche-son-31e-titre-de-champion-de-suisse.html',
@ -139,42 +125,47 @@ class RTSIE(InfoExtractor):
'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse', 'title': 'Hockey: Davos décroche son 31e titre de champion de Suisse',
}, },
'playlist_mincount': 5, 'playlist_mincount': 5,
},
{
'url': 'http://www.rts.ch/play/tv/le-19h30/video/le-chantier-du-nouveau-parlement-vaudois-a-permis-une-trouvaille-historique?id=6348280',
'only_matching': True,
} }
] ]
def _real_extract(self, url): def _real_extract(self, url):
m = re.match(self._VALID_URL, url) m = re.match(self._VALID_URL, url)
video_id = m.group('rts_id') or m.group('id') or m.group('id_new') media_id = m.group('rts_id') or m.group('id')
display_id = m.group('display_id') or m.group('display_id_new') display_id = m.group('display_id') or media_id
def download_json(internal_id): def download_json(internal_id):
return self._download_json( return self._download_json(
'http://www.rts.ch/a/%s.html?f=json/article' % internal_id, 'http://www.rts.ch/a/%s.html?f=json/article' % internal_id,
display_id) display_id)
all_info = download_json(video_id) all_info = download_json(media_id)
# video_id extracted out of URL is not always a real id # media_id extracted out of URL is not always a real id
if 'video' not in all_info and 'audio' not in all_info: if 'video' not in all_info and 'audio' not in all_info:
page = self._download_webpage(url, display_id) page = self._download_webpage(url, display_id)
# article with videos on rhs # article with videos on rhs
videos = re.findall( videos = re.findall(
r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:rts:video:(\d+)"', r'<article[^>]+class="content-item"[^>]*>\s*<a[^>]+data-video-urn="urn:([^"]+)"',
page)
if not videos:
videos = re.findall(
r'(?s)<iframe[^>]+class="srg-player"[^>]+src="[^"]+urn:([^"]+)"',
page) page)
if videos: if videos:
entries = [self.url_result('rts:%s' % video_urn, 'RTS') for video_urn in videos] entries = [self.url_result('srgssr:%s' % video_urn, 'SRGSSR') for video_urn in videos]
return self.playlist_result(entries, video_id, self._og_search_title(page)) return self.playlist_result(entries, media_id, self._og_search_title(page))
internal_id = self._html_search_regex( internal_id = self._html_search_regex(
r'<(?:video|audio) data-id="([0-9]+)"', page, r'<(?:video|audio) data-id="([0-9]+)"', page,
'internal video id') 'internal video id')
all_info = download_json(internal_id) all_info = download_json(internal_id)
media_type = 'video' if 'video' in all_info else 'audio'
# check for errors
self.get_media_data('rts', media_type, media_id)
info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio'] info = all_info['video']['JSONinfo'] if 'video' in all_info else all_info['audio']
upload_timestamp = parse_iso8601(info.get('broadcast_date')) upload_timestamp = parse_iso8601(info.get('broadcast_date'))
@ -190,19 +181,23 @@ class RTSIE(InfoExtractor):
formats = [] formats = []
for format_id, format_url in info['streams'].items(): for format_id, format_url in info['streams'].items():
if format_id == 'hds_sd' and 'hds' in info['streams']:
continue
if format_id == 'hls_sd' and 'hls' in info['streams']:
continue
if format_url.endswith('.f4m'): if format_url.endswith('.f4m'):
token = self._download_xml( token = self._download_xml(
'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path, 'http://tp.srgssr.ch/token/akahd.xml?stream=%s/*' % compat_urllib_parse_urlparse(format_url).path,
video_id, 'Downloading %s token' % format_id) media_id, 'Downloading %s token' % format_id)
auth_params = xpath_text(token, './/authparams', 'auth params') auth_params = xpath_text(token, './/authparams', 'auth params')
if not auth_params: if not auth_params:
continue continue
formats.extend(self._extract_f4m_formats( formats.extend(self._extract_f4m_formats(
'%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params), '%s?%s&hdcore=3.4.0&plugin=aasp-3.4.0.132.66' % (format_url, auth_params),
video_id, f4m_id=format_id)) media_id, f4m_id=format_id, fatal=False))
elif format_url.endswith('.m3u8'): elif format_url.endswith('.m3u8'):
formats.extend(self._extract_m3u8_formats( formats.extend(self._extract_m3u8_formats(
format_url, video_id, 'mp4', m3u8_id=format_id)) format_url, media_id, 'mp4', m3u8_id=format_id, fatal=False))
else: else:
formats.append({ formats.append({
'format_id': format_id, 'format_id': format_id,
@ -217,11 +212,11 @@ class RTSIE(InfoExtractor):
'tbr': media['rate'] or extract_bitrate(media['url']), 'tbr': media['rate'] or extract_bitrate(media['url']),
} for media in info['media'] if media.get('rate')]) } for media in info['media'] if media.get('rate')])
self._check_formats(formats, video_id) self._check_formats(formats, media_id)
self._sort_formats(formats) self._sort_formats(formats)
return { return {
'id': video_id, 'id': media_id,
'display_id': display_id, 'display_id': display_id,
'formats': formats, 'formats': formats,
'title': info['title'], 'title': info['title'],

@ -1,120 +0,0 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
determine_ext,
parse_iso8601,
xpath_text,
)
class SrfIE(InfoExtractor):
_VALID_URL = r'https?://(?:www\.srf\.ch/play(?:er)?/(?:tv|radio)/[^/]+/(?P<media_type>video|audio)/(?P<display_id>[^?]+)\?id=|tp\.srgssr\.ch/p/flash\?urn=urn:srf:ais:video:)(?P<id>[0-9a-f\-]{36})'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': '4cd93523723beff51bb4bee974ee238d',
'info_dict': {
'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'display_id': 'snowden-beantragt-asyl-in-russland',
'ext': 'm4v',
'upload_date': '20130701',
'title': 'Snowden beantragt Asyl in Russland',
'timestamp': 1372713995,
}
}, {
# No Speichern (Save) button
'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
'md5': 'd97e236e80d1d24729e5d0953d276a4f',
'info_dict': {
'id': '677f5829-e473-4823-ac83-a1087fe97faa',
'display_id': 'jaguar-xk120-shadow-und-tornado-dampflokomotive',
'ext': 'flv',
'upload_date': '20130710',
'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
'timestamp': 1373493600,
},
}, {
'url': 'http://www.srf.ch/play/radio/hoerspielarchiv-srf-musikwelle/audio/saegel-ohni-wind-von-jakob-stebler?id=415bf3d3-6429-4de7-968d-95866e37cfbc',
'md5': '',
'info_dict': {
'id': '415bf3d3-6429-4de7-968d-95866e37cfbc',
'display_id': 'saegel-ohni-wind-von-jakob-stebler',
'ext': 'mp3',
'upload_date': '20080518',
'title': '«Sägel ohni Wind» von Jakob Stebler',
'timestamp': 1211112000,
},
'params': {
'skip_download': True, # requires rtmpdump
},
}, {
'url': 'http://www.srf.ch/player/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}, {
'url': 'https://tp.srgssr.ch/p/flash?urn=urn:srf:ais:video:28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'only_matching': True,
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
media_type = mobj.group('media_type')
display_id = mobj.group('display_id') or video_id
video_data = self._download_xml(
'http://il.srgssr.ch/integrationlayer/1.0/ue/srf/%s/play/%s.xml' % (media_type, video_id),
display_id)
title = xpath_text(
video_data, './AssetMetadatas/AssetMetadata/title', fatal=True)
thumbnails = [{
'url': s.text
} for s in video_data.findall('.//ImageRepresentation/url')]
timestamp = parse_iso8601(xpath_text(video_data, './createdDate'))
# The <duration> field in XML is different from the exact duration, skipping
formats = []
for item in video_data.findall('./Playlists/Playlist') + video_data.findall('./Downloads/Download'):
for url_node in item.findall('url'):
quality = url_node.attrib['quality']
full_url = url_node.text
original_ext = determine_ext(full_url).lower()
format_id = '%s-%s' % (quality, item.attrib['protocol'])
if original_ext == 'f4m':
formats.extend(self._extract_f4m_formats(
full_url + '?hdcore=3.4.0', display_id, f4m_id=format_id))
elif original_ext == 'm3u8':
formats.extend(self._extract_m3u8_formats(
full_url, display_id, 'mp4', m3u8_id=format_id))
else:
formats.append({
'url': full_url,
'ext': original_ext,
'format_id': format_id,
'quality': 0 if 'HD' in quality else -1,
'preference': 1,
})
self._sort_formats(formats)
subtitles = {}
subtitles_data = video_data.find('Subtitles')
if subtitles_data is not None:
subtitles_list = [{
'url': sub.text,
'ext': determine_ext(sub.text),
} for sub in subtitles_data]
if subtitles_list:
subtitles['de'] = subtitles_list
return {
'id': video_id,
'display_id': display_id,
'formats': formats,
'title': title,
'thumbnails': thumbnails,
'timestamp': timestamp,
'subtitles': subtitles,
}

@ -0,0 +1,155 @@
# coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..utils import (
ExtractorError,
parse_iso8601,
qualities,
)
class SRGSSRIE(InfoExtractor):
_VALID_URL = r'(?:https?://tp\.srgssr\.ch/p(?:/[^/]+)+\?urn=urn|srgssr):(?P<bu>srf|rts|rsi|rtr|swi):(?:[^:]+:)?(?P<type>video|audio):(?P<id>[0-9a-f\-]{36}|\d+)'
_ERRORS = {
'AGERATING12': 'To protect children under the age of 12, this video is only available between 8 p.m. and 6 a.m.',
'AGERATING18': 'To protect children under the age of 18, this video is only available between 11 p.m. and 5 a.m.',
# 'ENDDATE': 'For legal reasons, this video was only available for a specified period of time.',
'GEOBLOCK': 'For legal reasons, this video is only available in Switzerland.',
'LEGAL': 'The video cannot be transmitted for legal reasons.',
'STARTDATE': 'This video is not yet available. Please try again later.',
}
def get_media_data(self, bu, media_type, media_id):
media_data = self._download_json(
'http://il.srgssr.ch/integrationlayer/1.0/ue/%s/%s/play/%s.json' % (bu, media_type, media_id),
media_id)[media_type.capitalize()]
if media_data.get('block') and media_data['block'] in self._ERRORS:
raise ExtractorError('%s said: %s' % (self.IE_NAME, self._ERRORS[media_data['block']]), expected=True)
return media_data
def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
if bu == 'rts':
return self.url_result('rts:%s' % media_id, 'RTS')
media_data = self.get_media_data(bu, media_type, media_id)
metadata = media_data['AssetMetadatas']['AssetMetadata'][0]
title = metadata['title']
description = metadata.get('description')
created_date = media_data.get('createdDate') or metadata.get('createdDate')
timestamp = parse_iso8601(created_date)
thumbnails = [{
'id': image.get('id'),
'url': image['url'],
} for image in media_data.get('Image', {}).get('ImageRepresentations', {}).get('ImageRepresentation', [])]
preference = qualities(['LQ', 'MQ', 'SD', 'HQ', 'HD'])
formats = []
for source in media_data.get('Playlists', {}).get('Playlist', []) + media_data.get('Downloads', {}).get('Download', []):
protocol = source.get('@protocol')
if protocol in ('HTTP-HDS', 'HTTP-HLS'):
assets = {}
for quality in source['url']:
assets[quality['@quality']] = quality['text']
asset_url = assets.get('HD') or assets.get('HQ') or assets.get('SD') or assets.get('MQ') or assets.get('LQ')
if '.f4m' in asset_url:
formats.extend(self._extract_f4m_formats(asset_url + '?hdcore=3.4.0', media_id, f4m_id='hds', fatal=False))
elif '.m3u8' in asset_url:
formats.extend(self._extract_m3u8_formats(asset_url, media_id, m3u8_id='hls', fatal=False))
else:
for asset in source['url']:
asset_url = asset['text']
ext = None
if asset_url.startswith('rtmp'):
ext = self._search_regex(r'([a-z0-9]+):[^/]+', asset_url, 'ext')
formats.append({
'format_id': asset['@quality'],
'url': asset_url,
'preference': preference(asset['@quality']),
'ext': ext,
})
self._sort_formats(formats)
return {
'id': media_id,
'title': title,
'description': description,
'timestamp': timestamp,
'thumbnails': thumbnails,
'formats': formats,
}
class SRGSSRPlayIE(InfoExtractor):
_VALID_URL = r'https?://(?:(?:www|play)\.)?(?P<bu>srf|rts|rsi|rtr|swissinfo)\.ch/play/(?:tv|radio)/[^/]+/(?P<type>video|audio)/[^?]+\?id=(?P<id>[0-9a-f\-]{36}|\d+)'
_TESTS = [{
'url': 'http://www.srf.ch/play/tv/10vor10/video/snowden-beantragt-asyl-in-russland?id=28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'md5': '4cd93523723beff51bb4bee974ee238d',
'info_dict': {
'id': '28e1a57d-5b76-4399-8ab3-9097f071e6c5',
'ext': 'm4v',
'upload_date': '20130701',
'title': 'Snowden beantragt Asyl in Russland',
'timestamp': 1372713995,
}
}, {
# No Speichern (Save) button
'url': 'http://www.srf.ch/play/tv/top-gear/video/jaguar-xk120-shadow-und-tornado-dampflokomotive?id=677f5829-e473-4823-ac83-a1087fe97faa',
'md5': '0a274ce38fda48c53c01890651985bc6',
'info_dict': {
'id': '677f5829-e473-4823-ac83-a1087fe97faa',
'ext': 'flv',
'upload_date': '20130710',
'title': 'Jaguar XK120, Shadow und Tornado-Dampflokomotive',
'description': 'md5:88604432b60d5a38787f152dec89cd56',
'timestamp': 1373493600,
},
}, {
'url': 'http://www.rtr.ch/play/radio/actualitad/audio/saira-tujetsch-tuttina-cuntinuar-cun-sedrun-muster-turissem?id=63cb0778-27f8-49af-9284-8c7a8c6d15fc',
'info_dict': {
'id': '63cb0778-27f8-49af-9284-8c7a8c6d15fc',
'ext': 'mp3',
'upload_date': '20151013',
'title': 'Saira: Tujetsch - tuttina cuntinuar cun Sedrun Mustér Turissem',
'timestamp': 1444750398,
},
'params': {
# rtmp download
'skip_download': True,
},
}, {
'url': 'http://www.rts.ch/play/tv/-/video/le-19h30?id=6348260',
'md5': '67a2a9ae4e8e62a68d0e9820cc9782df',
'info_dict': {
'id': '6348260',
'display_id': '6348260',
'ext': 'mp4',
'duration': 1796,
'title': 'Le 19h30',
'description': '',
'uploader': '19h30',
'upload_date': '20141201',
'timestamp': 1417458600,
'thumbnail': 're:^https?://.*\.image',
'view_count': int,
},
'params': {
# m3u8 download
'skip_download': True,
}
}]
def _real_extract(self, url):
bu, media_type, media_id = re.match(self._VALID_URL, url).groups()
# other info can be extracted from url + '&layout=json'
return self.url_result('srgssr:%s:%s:%s' % (bu[:3], media_type, media_id), 'SRGSSR')
Loading…
Cancel
Save