Conflicts:
	.gitignore
	LATEST_VERSION
	Makefile
	youtube-dl
	youtube-dl.exe
	youtube_dl/InfoExtractors.py
	youtube_dl/__init__.py
pull/8/head
Jeff Crouse 12 years ago
commit 258d5850c9

24
.gitignore vendored

@ -1,17 +1,19 @@
*.pyc *.pyc
*.pyo *.pyo
*~ *~
*.DS_Store
wine-py2exe/ wine-py2exe/
py2exe.log py2exe.log
youtube-dl *.kate-swp
build/
dist/
MANIFEST
README.txt
youtube-dl.1 youtube-dl.1
LATEST_VERSION youtube-dl.bash-completion
youtube-dl
#OS X youtube-dl.exe
.DS_Store youtube-dl.tar.gz
.AppleDouble .coverage
.LSOverride cover/
Icon updates_key.pem
._*
.Spotlight-V100
.Trashes

@ -0,0 +1,17 @@
updates_key.pem
*.pyc
*.pyo
youtube-dl.exe
wine-py2exe/
py2exe.log
*.kate-swp
build/
dist/
MANIFEST
*.DS_Store
youtube-dl.tar.gz
.coverage
cover/
__pycache__/
.git/
*~

@ -1,9 +1,14 @@
language: python language: python
#specify the python version
python: python:
- "2.6" - "2.6"
- "2.7" - "2.7"
#command to install the setup - "3.3"
install: script: nosetests test --verbose
# command to run tests notifications:
script: nosetests test --nocapture email:
- filippo.valsorda@gmail.com
- phihag@phihag.de
irc:
channels:
- "irc.freenode.org#youtube-dl"
skip_join: true

@ -0,0 +1,14 @@
2013.01.02 Codename: GIULIA
* Add support for ComedyCentral clips <nto>
* Corrected Vimeo description fetching <Nick Daniels>
* Added the --no-post-overwrites argument <Barbu Paul - Gheorghe>
* --verbose offers more environment info
* New info_dict field: uploader_id
* New updates system, with signature checking
* New IEs: NBA, JustinTV, FunnyOrDie, TweetReel, Steam, Ustream
* Fixed IEs: BlipTv
* Fixed for Python 3 IEs: Xvideo, Youku, XNXX, Dailymotion, Vimeo, InfoQ
* Simplified IEs and test code
* Various (Python 3 and other) fixes
* Revamped and expanded tests

@ -0,0 +1 @@
2012.10.09

@ -0,0 +1,24 @@
This is free and unencumbered software released into the public domain.
Anyone is free to copy, modify, publish, use, compile, sell, or
distribute this software, either in source code form or as a compiled
binary, for any purpose, commercial or non-commercial, and by any
means.
In jurisdictions that recognize copyright laws, the author or authors
of this software dedicate any and all copyright interest in the
software to the public domain. We make this dedication for the benefit
of the public at large and to the detriment of our heirs and
successors. We intend this dedication to be an overt act of
relinquishment in perpetuity of all present and future rights to this
software under copyright law.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS BE LIABLE FOR ANY CLAIM, DAMAGES OR
OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
For more information, please refer to <http://unlicense.org/>

@ -0,0 +1,3 @@
include README.md
include test/*.py
include test/*.json

@ -1,8 +1,7 @@
all: youtube-dl README.md youtube-dl.1 youtube-dl.bash-completion LATEST_VERSION all: youtube-dl README.md README.txt youtube-dl.1 youtube-dl.bash-completion
# TODO: re-add youtube-dl.exe, and make sure it's 1. safe and 2. doesn't need sudo
clean: clean:
rm -f youtube-dl youtube-dl.exe youtube-dl.1 LATEST_VERSION youtube_dl/*.pyc rm -rf youtube-dl youtube-dl.exe youtube-dl.1 youtube-dl.bash-completion README.txt MANIFEST build/ dist/ .coverage cover/
PREFIX=/usr/local PREFIX=/usr/local
BINDIR=$(PREFIX)/bin BINDIR=$(PREFIX)/bin
@ -17,43 +16,32 @@ install: youtube-dl youtube-dl.1 youtube-dl.bash-completion
install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d install -d $(DESTDIR)$(SYSCONFDIR)/bash_completion.d
install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl install -m 644 youtube-dl.bash-completion $(DESTDIR)$(SYSCONFDIR)/bash_completion.d/youtube-dl
.PHONY: all clean install youtube-dl.bash-completion test:
# TODO un-phony README.md and youtube-dl.bash_completion by reading from .in files and generating from them #nosetests --with-coverage --cover-package=youtube_dl --cover-html --verbose --processes 4 test
nosetests --verbose test
.PHONY: all clean install test
youtube-dl: youtube_dl/*.py youtube-dl: youtube_dl/*.py
zip --quiet --junk-paths youtube-dl youtube_dl/*.py zip --quiet youtube-dl youtube_dl/*.py
zip --quiet --junk-paths youtube-dl youtube_dl/__main__.py
echo '#!/usr/bin/env python' > youtube-dl echo '#!/usr/bin/env python' > youtube-dl
cat youtube-dl.zip >> youtube-dl cat youtube-dl.zip >> youtube-dl
rm youtube-dl.zip rm youtube-dl.zip
chmod a+x youtube-dl chmod a+x youtube-dl
youtube-dl.exe: youtube_dl/*.py
bash devscripts/wine-py2exe.sh build_exe.py
README.md: youtube_dl/*.py README.md: youtube_dl/*.py
@options=$$(COLUMNS=80 python -m youtube_dl --help | sed -e '1,/.*General Options.*/ d' -e 's/^\W\{2\}\(\w\)/## \1/') && \ COLUMNS=80 python -m youtube_dl --help | python devscripts/make_readme.py
header=$$(sed -e '/.*# OPTIONS/,$$ d' README.md) && \
footer=$$(sed -e '1,/.*# FAQ/ d' README.md) && \
echo "$${header}" > README.md && \
echo >> README.md && \
echo '# OPTIONS' >> README.md && \
echo "$${options}" >> README.md&& \
echo >> README.md && \
echo '# FAQ' >> README.md && \
echo "$${footer}" >> README.md
youtube-dl.1:
pandoc -s -w man README.md -o youtube-dl.1
youtube-dl.bash-completion:
@options=`egrep -o '(--[a-z-]+) ' README.md | sort -u | xargs echo` && \
content=`sed "s/opts=\"[^\"]*\"/opts=\"$${options}\"/g" youtube-dl.bash-completion` && \
echo "$${content}" > youtube-dl.bash-completion
LATEST_VERSION: youtube_dl/__init__.py
python -m youtube_dl --version > LATEST_VERSION
test: README.txt: README.md
nosetests2 --nocapture test pandoc -f markdown -t plain README.md -o README.txt
youtube-dl.1: README.md
pandoc -s -f markdown -t man README.md -o youtube-dl.1
youtube-dl.bash-completion: youtube_dl/*.py devscripts/bash-completion.in
python devscripts/bash-completion.py
.PHONY: default compile update update-latest update-readme test clean youtube-dl.tar.gz: all
tar -cvzf youtube-dl.tar.gz -s "|^./|./youtube-dl/|" \
--exclude-from=".tarignore" -- .

@ -1,4 +1,4 @@
% youtube-dl(1) % YOUTUBE-DL(1)
# NAME # NAME
youtube-dl youtube-dl
@ -20,6 +20,11 @@ which means you can modify it, redistribute it or use it however you like.
-i, --ignore-errors continue on download errors -i, --ignore-errors continue on download errors
-r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m) -r, --rate-limit LIMIT download rate limit (e.g. 50k or 44.6m)
-R, --retries RETRIES number of retries (default is 10) -R, --retries RETRIES number of retries (default is 10)
--buffer-size SIZE size of download buffer (e.g. 1024 or 16k) (default
is 1024)
--no-resize-buffer do not automatically adjust the buffer size. By
default, the buffer size is automatically resized
from an initial value of SIZE.
--dump-user-agent display the current browser identification --dump-user-agent display the current browser identification
--user-agent UA specify a custom user agent --user-agent UA specify a custom user agent
--list-extractors List all supported extractors and the URLs they --list-extractors List all supported extractors and the URLs they
@ -37,16 +42,22 @@ which means you can modify it, redistribute it or use it however you like.
Filesystem Options: Filesystem Options:
-t, --title use title in file name -t, --title use title in file name
--id use video ID in file name --id use video ID in file name
-l, --literal use literal title in file name -l, --literal [deprecated] alias of --title
-A, --auto-number number downloaded files starting from 00000 -A, --auto-number number downloaded files starting from 00000
-o, --output TEMPLATE output filename template. Use %(stitle)s to get the -o, --output TEMPLATE output filename template. Use %(title)s to get the
title, %(uploader)s for the uploader name, title, %(uploader)s for the uploader name,
%(autonumber)s to get an automatically incremented %(uploader_id)s for the uploader nickname if
number, %(ext)s for the filename extension, different, %(autonumber)s to get an automatically
%(upload_date)s for the upload date (YYYYMMDD), incremented number, %(ext)s for the filename
%(extractor)s for the provider (youtube, metacafe, extension, %(upload_date)s for the upload date
etc), %(id)s for the video id and %% for a literal (YYYYMMDD), %(extractor)s for the provider
percent. Use - to output to stdout. (youtube, metacafe, etc), %(id)s for the video id
and %% for a literal percent. Use - to output to
stdout. Can also be used to download to a different
directory, for example with -o '/my/downloads/%(upl
oader)s/%(title)s-%(id)s.%(ext)s' .
--restrict-filenames Restrict filenames to only ASCII characters, and
avoid "&" and spaces in filenames
-a, --batch-file FILE file containing URLs to download ('-' for stdin) -a, --batch-file FILE file containing URLs to download ('-' for stdin)
-w, --no-overwrites do not overwrite files -w, --no-overwrites do not overwrite files
-c, --continue resume partially downloaded files -c, --continue resume partially downloaded files
@ -101,6 +112,34 @@ which means you can modify it, redistribute it or use it however you like.
specific bitrate like 128K (default 5) specific bitrate like 128K (default 5)
-k, --keep-video keeps the video file on disk after the post- -k, --keep-video keeps the video file on disk after the post-
processing; the video is erased by default processing; the video is erased by default
--no-post-overwrites do not overwrite post-processed files; the post-
processed files are overwritten by default
# CONFIGURATION
You can configure youtube-dl by placing default arguments (such as `--extract-audio --no-mtime` to always extract the audio and not copy the mtime) into `/etc/youtube-dl.conf` and/or `~/.local/config/youtube-dl.conf`.
# OUTPUT TEMPLATE
The `-o` option allows users to indicate a template for the output file names. The basic usage is not to set any template arguments when downloading a single file, like in `youtube-dl -o funny_video.flv "http://some/video"`. However, it may contain special sequences that will be replaced when downloading each video. The special sequences have the format `%(NAME)s`. To clarify, that is a percent symbol followed by a name in parenthesis, followed by a lowercase S. Allowed names are:
- `id`: The sequence will be replaced by the video identifier.
- `url`: The sequence will be replaced by the video URL.
- `uploader`: The sequence will be replaced by the nickname of the person who uploaded the video.
- `upload_date`: The sequence will be replaced by the upload date in YYYYMMDD format.
- `title`: The sequence will be replaced by the video title.
- `ext`: The sequence will be replaced by the appropriate extension (like flv or mp4).
- `epoch`: The sequence will be replaced by the Unix epoch when creating the file.
- `autonumber`: The sequence will be replaced by a five-digit number that will be increased with each download, starting at zero.
The current default template is `%(id)s.%(ext)s`, but that will be switchted to `%(title)s-%(id)s.%(ext)s` (which can be requested with `-t` at the moment).
In some cases, you don't want special characters such as 中, spaces, or &, such as when transferring the downloaded filename to a Windows system or the filename through an 8bit-unsafe channel. In these cases, add the `--restrict-filenames` flag to get a shorter title:
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc
youtube-dl test video ''_ä↭𝕐.mp4 # All kinds of weird characters
$ youtube-dl --get-filename -o "%(title)s.%(ext)s" BaW_jenozKc --restrict-filenames
youtube-dl_test_video_.mp4 # A simple file name
# FAQ # FAQ
@ -137,17 +176,9 @@ The error
means you're using an outdated version of Python. Please update to Python 2.6 or 2.7. means you're using an outdated version of Python. Please update to Python 2.6 or 2.7.
To run youtube-dl under Python 2.5, you'll have to manually check it out like this:
git clone git://github.com/rg3/youtube-dl.git
cd youtube-dl
python -m youtube_dl --help
Please note that Python 2.5 is not supported anymore.
### What is this binary file? Where has the code gone? ### What is this binary file? Where has the code gone?
Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repo to see the code. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make compile`. Since June 2012 (#342) youtube-dl is packed as an executable zipfile, simply unzip it (might need renaming to `youtube-dl.zip` first on some systems) or clone the git repository, as laid out above. If you modify the code, you can run it by executing the `__main__.py` file. To recompile the executable, run `make youtube-dl`.
### The exe throws a *Runtime error from Visual C++* ### The exe throws a *Runtime error from Visual C++*
@ -166,6 +197,9 @@ Bugs and suggestions should be reported at: <https://github.com/rg3/youtube-dl/i
Please include: Please include:
* Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem. * Your exact command line, like `youtube-dl -t "http://www.youtube.com/watch?v=uHlDtZ6Oc3s&feature=channel_video_title"`. A common mistake is not to escape the `&`. Putting URLs in quotes should solve this problem.
* If possible re-run the command with `--verbose`, and include the full output, it is really helpful to us.
* The output of `youtube-dl --version` * The output of `youtube-dl --version`
* The output of `python --version` * The output of `python --version`
* The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough). * The name and version of your Operating System ("Ubuntu 11.04 x64" or "Windows 7 x64" is usually enough).
For discussions, join us in the irc channel #youtube-dl on freenode.

@ -0,0 +1,6 @@
#!/usr/bin/env python
import youtube_dl
if __name__ == '__main__':
youtube_dl.main()

@ -1,48 +0,0 @@
from distutils.core import setup
import py2exe
import sys, os
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
# If run without args, build executables
if len(sys.argv) == 1:
sys.argv.append("py2exe")
# os.chdir(os.path.dirname(os.path.abspath(sys.argv[0]))) # conflict with wine-py2exe.sh
sys.path.append('./youtube_dl')
options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
console = [{
"script":"./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
init_file = open('./youtube_dl/__init__.py')
for line in init_file.readlines():
if line.startswith('__version__'):
version = line[11:].strip(" ='\n")
break
else:
version = ''
setup(name='youtube-dl',
version=version,
description='Small command-line program to download videos from YouTube.com and other video sites',
url='https://github.com/rg3/youtube-dl',
packages=['youtube_dl'],
console = console,
options = {"py2exe": options},
zipfile = None,
)
import shutil
shutil.rmtree("build")

@ -0,0 +1,14 @@
__youtube-dl()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="{{flags}}"
if [[ ${cur} == * ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -F __youtube-dl youtube-dl

@ -0,0 +1,26 @@
#!/usr/bin/env python
import os
from os.path import dirname as dirn
import sys
sys.path.append(dirn(dirn((os.path.abspath(__file__)))))
import youtube_dl
BASH_COMPLETION_FILE = "youtube-dl.bash-completion"
BASH_COMPLETION_TEMPLATE = "devscripts/bash-completion.in"
def build_completion(opt_parser):
opts_flag = []
for group in opt_parser.option_groups:
for option in group.option_list:
#for every long flag
opts_flag.append(option.get_opt_string())
with open(BASH_COMPLETION_TEMPLATE) as f:
template = f.read()
with open(BASH_COMPLETION_FILE, "w") as f:
#just using the special char
filled_template = template.replace("{{flags}}", " ".join(opts_flag))
f.write(filled_template)
parser = youtube_dl.parseOpts()[0]
build_completion(parser)

@ -0,0 +1,33 @@
#!/usr/bin/env python3
import json
import sys
import hashlib
import urllib.request
if len(sys.argv) <= 1:
print('Specify the version number as parameter')
sys.exit()
version = sys.argv[1]
with open('update/LATEST_VERSION', 'w') as f:
f.write(version)
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
new_version = {}
filenames = {'bin': 'youtube-dl', 'exe': 'youtube-dl.exe', 'tar': 'youtube-dl-%s.tar.gz' % version}
for key, filename in filenames.items():
print('Downloading and checksumming %s...' %filename)
url = 'http://youtube-dl.org/downloads/%s/%s' % (version, filename)
data = urllib.request.urlopen(url).read()
sha256sum = hashlib.sha256(data).hexdigest()
new_version[key] = (url, sha256sum)
versions_info['versions'][version] = new_version
versions_info['latest'] = version
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

@ -0,0 +1,32 @@
#!/usr/bin/env python3
import hashlib
import shutil
import subprocess
import tempfile
import urllib.request
import json
versions_info = json.load(open('update/versions.json'))
version = versions_info['latest']
URL = versions_info['versions'][version]['bin'][0]
data = urllib.request.urlopen(URL).read()
# Read template page
with open('download.html.in', 'r', encoding='utf-8') as tmplf:
template = tmplf.read()
md5sum = hashlib.md5(data).hexdigest()
sha1sum = hashlib.sha1(data).hexdigest()
sha256sum = hashlib.sha256(data).hexdigest()
template = template.replace('@PROGRAM_VERSION@', version)
template = template.replace('@PROGRAM_URL@', URL)
template = template.replace('@PROGRAM_MD5SUM@', md5sum)
template = template.replace('@PROGRAM_SHA1SUM@', sha1sum)
template = template.replace('@PROGRAM_SHA256SUM@', sha256sum)
template = template.replace('@EXE_URL@', versions_info['versions'][version]['exe'][0])
template = template.replace('@EXE_SHA256SUM@', versions_info['versions'][version]['exe'][1])
template = template.replace('@TAR_URL@', versions_info['versions'][version]['tar'][0])
template = template.replace('@TAR_SHA256SUM@', versions_info['versions'][version]['tar'][1])
with open('download.html', 'w', encoding='utf-8') as dlf:
dlf.write(template)

@ -0,0 +1,28 @@
#!/usr/bin/env python3
import rsa
import json
from binascii import hexlify
versions_info = json.load(open('update/versions.json'))
if 'signature' in versions_info:
del versions_info['signature']
print('Enter the PKCS1 private key, followed by a blank line:')
privkey = ''
while True:
try:
line = input()
except EOFError:
break
if line == '':
break
privkey += line + '\n'
privkey = bytes(privkey, 'ascii')
privkey = rsa.PrivateKey.load_pkcs1(privkey)
signature = hexlify(rsa.pkcs1.sign(json.dumps(versions_info, sort_keys=True).encode('utf-8'), privkey, 'SHA-256')).decode()
print('signature: ' + signature)
versions_info['signature'] = signature
json.dump(versions_info, open('update/versions.json', 'w'), indent=4, sort_keys=True)

@ -0,0 +1,21 @@
#!/usr/bin/env python
# coding: utf-8
from __future__ import with_statement
import datetime
import glob
import io # For Python 2 compatibilty
import os
import re
year = str(datetime.datetime.now().year)
for fn in glob.glob('*.html*'):
with io.open(fn, encoding='utf-8') as f:
content = f.read()
newc = re.sub(u'(?P<copyright>Copyright © 2006-)(?P<year>[0-9]{4})', u'Copyright © 2006-' + year, content)
if content != newc:
tmpFn = fn + '.part'
with io.open(tmpFn, 'wt', encoding='utf-8') as outf:
outf.write(newc)
os.rename(tmpFn, fn)

@ -0,0 +1,20 @@
import sys
import re
README_FILE = 'README.md'
helptext = sys.stdin.read()
with open(README_FILE) as f:
oldreadme = f.read()
header = oldreadme[:oldreadme.index('# OPTIONS')]
footer = oldreadme[oldreadme.index('# CONFIGURATION'):]
options = helptext[helptext.index(' General Options:')+19:]
options = re.sub(r'^ (\w.+)$', r'## \1', options, flags=re.M)
options = '# OPTIONS\n' + options + '\n'
with open(README_FILE, 'w') as f:
f.write(header)
f.write(options)
f.write(footer)

@ -1,11 +1,85 @@
#!/bin/sh #!/bin/sh
# IMPORTANT: the following assumptions are made
# * the GH repo is on the origin remote
# * the gh-pages branch is named so locally
# * the git config user.signingkey is properly set
# You will need
# pip install coverage nose rsa
# TODO
# release notes
# make hash on local files
set -e
if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi if [ -z "$1" ]; then echo "ERROR: specify version number like this: $0 1994.09.06"; exit 1; fi
version="$1" version="$1"
if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi if [ ! -z "`git tag | grep "$version"`" ]; then echo 'ERROR: version already present'; exit 1; fi
if [ ! -z "`git status --porcelain`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi if [ ! -z "`git status --porcelain | grep -v CHANGELOG`" ]; then echo 'ERROR: the working directory is not clean; commit or stash changes'; exit 1; fi
sed -i "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/__init__.py if [ ! -f "updates_key.pem" ]; then echo 'ERROR: updates_key.pem missing'; exit 1; fi
make all
git add -A echo "\n### First of all, testing..."
make clean
nosetests --with-coverage --cover-package=youtube_dl --cover-html test || exit 1
echo "\n### Changing version in version.py..."
sed -i~ "s/__version__ = '.*'/__version__ = '$version'/" youtube_dl/version.py
echo "\n### Committing CHANGELOG README.md and youtube_dl/version.py..."
make README.md
git add CHANGELOG README.md youtube_dl/version.py
git commit -m "release $version" git commit -m "release $version"
git tag -m "Release $version" "$version"
echo "\n### Now tagging, signing and pushing..."
git tag -s -m "Release $version" "$version"
git show "$version"
read -p "Is it good, can I push? (y/n) " -n 1
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
echo
MASTER=$(git rev-parse --abbrev-ref HEAD)
git push origin $MASTER:master
git push origin "$version"
echo "\n### OK, now it is time to build the binaries..."
REV=$(git rev-parse HEAD)
make youtube-dl youtube-dl.tar.gz
wget "http://jeromelaheurte.net:8142/download/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe || \
wget "http://jeromelaheurte.net:8142/build/rg3/youtube-dl/youtube-dl.exe?rev=$REV" -O youtube-dl.exe
mkdir -p "update_staging/$version"
mv youtube-dl youtube-dl.exe "update_staging/$version"
mv youtube-dl.tar.gz "update_staging/$version/youtube-dl-$version.tar.gz"
RELEASE_FILES=youtube-dl youtube-dl.exe youtube-dl-$version.tar.gz
(cd update_staging/$version/ && md5sum $RELEASE_FILES > MD5SUMS)
(cd update_staging/$version/ && sha1sum $RELEASE_FILES > SHA1SUMS)
(cd update_staging/$version/ && sha256sum $RELEASE_FILES > SHA2-256SUMS)
(cd update_staging/$version/ && sha512sum $RELEASE_FILES > SHA2-512SUMS)
git checkout HEAD -- youtube-dl youtube-dl.exe
echo "\n### Signing and uploading the new binaries to youtube-dl.org..."
for f in $RELEASE_FILES; do gpg --detach-sig "update_staging/$version/$f"; done
scp -r "update_staging/$version" ytdl@youtube-dl.org:html/downloads/
rm -r update_staging
echo "\n### Now switching to gh-pages..."
git checkout gh-pages
git checkout "$MASTER" -- devscripts/gh-pages/
git reset devscripts/gh-pages/
devscripts/gh-pages/add-version.py $version
devscripts/gh-pages/sign-versions.py < updates_key.pem
devscripts/gh-pages/generate-download.py
devscripts/gh-pages/update-copyright.py
rm -r test_coverage
mv cover test_coverage
git add *.html *.html.in update test_coverage
git commit -m "release $version"
git show HEAD
read -p "Is it good, can I push? (y/n) " -n 1
if [[ ! $REPLY =~ ^[Yy]$ ]]; then exit 1; fi
echo
git push origin gh-pages
echo "\n### DONE!"
rm -r devscripts
git checkout $MASTER

@ -0,0 +1,40 @@
#!/usr/bin/env python
import sys, os
try:
import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'The new location of the binaries is https://github.com/rg3/youtube-dl/downloads, not the git repository.\n\n')
try:
raw_input()
except NameError: # Python 3
input()
filename = sys.argv[0]
API_URL = "https://api.github.com/repos/rg3/youtube-dl/downloads"
BIN_URL = "https://github.com/downloads/rg3/youtube-dl/youtube-dl"
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
try:
urlh = compat_urllib_request.urlopen(BIN_URL)
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

@ -0,0 +1,12 @@
from distutils.core import setup
import py2exe
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
setup(console=['youtube-dl.py'], options={ "py2exe": py2exe_options }, zipfile=None)

@ -0,0 +1,102 @@
#!/usr/bin/env python
import sys, os
import urllib2
import json, hashlib
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
sys.stderr.write(u'Hi! We changed distribution method and now youtube-dl needs to update itself one more time.\n')
sys.stderr.write(u'This will only happen once. Simply press enter to go on. Sorry for the trouble!\n')
sys.stderr.write(u'From now on, get the binaries from http://rg3.github.com/youtube-dl/download.html, not from the git repository.\n\n')
raw_input()
filename = sys.argv[0]
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not os.access(filename, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % filename)
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
sys.exit('ERROR: no write permissions on %s' % directory)
try:
versions_info = urllib2.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
sys.exit(u'ERROR: can\'t obtain versions info. Please try again later.')
if not 'signature' in versions_info:
sys.exit(u'ERROR: the versions file is not signed or corrupted. Aborting.')
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True), signature, UPDATES_RSA_KEY):
sys.exit(u'ERROR: the versions file signature is invalid. Aborting.')
version = versions_info['versions'][versions_info['latest']]
try:
urlh = urllib2.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
sys.exit('ERROR: unable to download latest version')
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
sys.exit(u'ERROR: the downloaded file hash does not match. Aborting.')
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
sys.exit(u'ERROR: unable to write the new version')
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
b.write("""
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
\n""" %(exe, exe, bat))
b.close()
os.startfile(bat)
except (IOError, OSError) as err:
sys.exit('ERROR: unable to overwrite current version')
sys.stderr.write(u'Done! Now you can run youtube-dl.\n')

@ -0,0 +1,74 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import print_function
from distutils.core import setup
import pkg_resources
import sys
try:
import py2exe
"""This will create an exe that needs Microsoft Visual C++ 2008 Redistributable Package"""
except ImportError:
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
print("Cannot import py2exe", file=sys.stderr)
exit(1)
py2exe_options = {
"bundle_files": 1,
"compressed": 1,
"optimize": 2,
"dist_dir": '.',
"dll_excludes": ['w9xpopen.exe']
}
py2exe_console = [{
"script": "./youtube_dl/__main__.py",
"dest_base": "youtube-dl",
}]
py2exe_params = {
'console': py2exe_console,
'options': { "py2exe": py2exe_options },
'zipfile': None
}
if len(sys.argv) >= 2 and sys.argv[1] == 'py2exe':
params = py2exe_params
else:
params = {
'scripts': ['bin/youtube-dl'],
'data_files': [('etc/bash_completion.d', ['youtube-dl.bash-completion']), # Installing system-wide would require sudo...
('share/doc/youtube_dl', ['README.txt']),
('share/man/man1/', ['youtube-dl.1'])]
}
# Get the version from youtube_dl/version.py without importing the package
exec(compile(open('youtube_dl/version.py').read(), 'youtube_dl/version.py', 'exec'))
setup(
name = 'youtube_dl',
version = __version__,
description = 'YouTube video downloader',
long_description = 'Small command-line program to download videos from YouTube.com and other video sites.',
url = 'https://github.com/rg3/youtube-dl',
author = 'Ricardo Garcia',
maintainer = 'Philipp Hagemeister',
maintainer_email = 'phihag@phihag.de',
packages = ['youtube_dl'],
# Provokes warning on most systems (why?!)
#test_suite = 'nose.collector',
#test_requires = ['nosetest'],
classifiers = [
"Topic :: Multimedia :: Video",
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"License :: Public Domain",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3"
],
**params
)

@ -1 +1,40 @@
{"username": null, "listformats": null, "skip_download": false, "usenetrc": false, "max_downloads": null, "noprogress": false, "forcethumbnail": false, "forceformat": false, "format_limit": null, "ratelimit": null, "nooverwrites": false, "forceurl": false, "writeinfojson": false, "simulate": false, "playliststart": 1, "continuedl": true, "password": null, "prefer_free_formats": false, "nopart": false, "retries": 10, "updatetime": true, "consoletitle": false, "verbose": true, "forcefilename": false, "ignoreerrors": false, "logtostderr": false, "format": null, "subtitleslang": null, "quiet": false, "outtmpl": "%(id)s.%(ext)s", "rejecttitle": null, "playlistend": -1, "writedescription": false, "forcetitle": false, "forcedescription": false, "writesubtitles": false, "matchtitle": null} {
"consoletitle": false,
"continuedl": true,
"forcedescription": false,
"forcefilename": false,
"forceformat": false,
"forcethumbnail": false,
"forcetitle": false,
"forceurl": false,
"format": null,
"format_limit": null,
"ignoreerrors": false,
"listformats": null,
"logtostderr": false,
"matchtitle": null,
"max_downloads": null,
"nooverwrites": false,
"nopart": false,
"noprogress": false,
"outtmpl": "%(id)s.%(ext)s",
"password": null,
"playlistend": -1,
"playliststart": 1,
"prefer_free_formats": false,
"quiet": false,
"ratelimit": null,
"rejecttitle": null,
"retries": 10,
"simulate": false,
"skip_download": false,
"subtitleslang": null,
"test": true,
"updatetime": true,
"usenetrc": false,
"username": null,
"verbose": true,
"writedescription": false,
"writeinfojson": true,
"writesubtitles": false
}

@ -0,0 +1,27 @@
#!/usr/bin/env python
import sys
import unittest
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeIE, YoutubePlaylistIE
class TestAllURLsMatching(unittest.TestCase):
def test_youtube_playlist_matching(self):
self.assertTrue(YoutubePlaylistIE().suitable(u'ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8'))
self.assertTrue(YoutubePlaylistIE().suitable(u'PL63F0C78739B09958'))
self.assertFalse(YoutubePlaylistIE().suitable(u'PLtS2H6bU1M'))
def test_youtube_matching(self):
self.assertTrue(YoutubeIE().suitable(u'PLtS2H6bU1M'))
def test_youtube_extract(self):
self.assertEqual(YoutubeIE()._extract_id('http://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?&v=BaW_jenozKc'), 'BaW_jenozKc')
self.assertEqual(YoutubeIE()._extract_id('https://www.youtube.com/watch?feature=player_embedded&v=BaW_jenozKc'), 'BaW_jenozKc')
if __name__ == '__main__':
unittest.main()

@ -1,93 +1,125 @@
#!/usr/bin/env python2 #!/usr/bin/env python
import unittest
import errno
import hashlib import hashlib
import io
import os import os
import json import json
import unittest
import sys
import hashlib
import socket
# Allow direct execution
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl.FileDownloader
import youtube_dl.InfoExtractors
from youtube_dl.utils import *
DEF_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'tests.json')
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
def _try_rm(filename):
""" Remove a file if it exists """
try:
os.remove(filename)
except OSError as ose:
if ose.errno != errno.ENOENT:
raise
class FileDownloader(youtube_dl.FileDownloader):
def __init__(self, *args, **kwargs):
self.to_stderr = self.to_screen
self.processed_info_dicts = []
return youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
def process_info(self, info_dict):
self.processed_info_dicts.append(info_dict)
return youtube_dl.FileDownloader.process_info(self, info_dict)
def _file_md5(fn):
with open(fn, 'rb') as f:
return hashlib.md5(f.read()).hexdigest()
with io.open(DEF_FILE, encoding='utf-8') as deff:
defs = json.load(deff)
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
class TestDownload(unittest.TestCase):
def setUp(self):
self.parameters = parameters
self.defs = defs
### Dynamically generate tests
def generator(test_case):
def test_template(self):
ie = getattr(youtube_dl.InfoExtractors, test_case['name'] + 'IE')
if not ie._WORKING:
print('Skipping: IE marked as not _WORKING')
return
if 'playlist' not in test_case and not test_case['file']:
print('Skipping: No output file specified')
return
if 'skip' in test_case:
print('Skipping: {0}'.format(test_case['skip']))
return
params = self.parameters.copy()
params.update(test_case.get('params', {}))
fd = FileDownloader(params)
fd.add_info_extractor(ie())
for ien in test_case.get('add_ie', []):
fd.add_info_extractor(getattr(youtube_dl.InfoExtractors, ien + 'IE')())
test_cases = test_case.get('playlist', [test_case])
for tc in test_cases:
_try_rm(tc['file'])
_try_rm(tc['file'] + '.part')
_try_rm(tc['file'] + '.info.json')
try:
fd.download([test_case['url']])
for tc in test_cases:
if not test_case.get('params', {}).get('skip_download', False):
self.assertTrue(os.path.exists(tc['file']))
self.assertTrue(os.path.exists(tc['file'] + '.info.json'))
if 'md5' in tc:
md5_for_file = _file_md5(tc['file'])
self.assertEqual(md5_for_file, tc['md5'])
with io.open(tc['file'] + '.info.json', encoding='utf-8') as infof:
info_dict = json.load(infof)
for (info_field, value) in tc.get('info_dict', {}).items():
if value.startswith('md5:'):
md5_info_value = hashlib.md5(info_dict.get(info_field, '')).hexdigest()
self.assertEqual(value[3:], md5_info_value)
else:
self.assertEqual(value, info_dict.get(info_field))
finally:
for tc in test_cases:
_try_rm(tc['file'])
_try_rm(tc['file'] + '.part')
_try_rm(tc['file'] + '.info.json')
return test_template
### And add them to TestDownload
for test_case in defs:
test_method = generator(test_case)
test_method.__name__ = "test_{0}".format(test_case["name"])
setattr(TestDownload, test_method.__name__, test_method)
del test_method
from youtube_dl.FileDownloader import FileDownloader if __name__ == '__main__':
from youtube_dl.InfoExtractors import YoutubeIE, DailymotionIE unittest.main()
from youtube_dl.InfoExtractors import MetacafeIE, BlipTVIE
class DownloadTest(unittest.TestCase):
PARAMETERS_FILE = "test/parameters.json"
#calculated with md5sum:
#md5sum (GNU coreutils) 8.19
YOUTUBE_SIZE = 1993883
YOUTUBE_URL = "http://www.youtube.com/watch?v=BaW_jenozKc"
YOUTUBE_FILE = "BaW_jenozKc.mp4"
DAILYMOTION_MD5 = "d363a50e9eb4f22ce90d08d15695bb47"
DAILYMOTION_URL = "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech"
DAILYMOTION_FILE = "x33vw9.mp4"
METACAFE_SIZE = 5754305
METACAFE_URL = "http://www.metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/"
METACAFE_FILE = "_aUehQsCQtM.flv"
BLIP_MD5 = "93c24d2f4e0782af13b8a7606ea97ba7"
BLIP_URL = "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352"
BLIP_FILE = "5779306.m4v"
XVIDEO_MD5 = ""
XVIDEO_URL = ""
XVIDEO_FILE = ""
def test_youtube(self):
#let's download a file from youtube
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(YoutubeIE())
fd.download([DownloadTest.YOUTUBE_URL])
self.assertTrue(os.path.exists(DownloadTest.YOUTUBE_FILE))
self.assertEqual(os.path.getsize(DownloadTest.YOUTUBE_FILE), DownloadTest.YOUTUBE_SIZE)
def test_dailymotion(self):
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(DailymotionIE())
fd.download([DownloadTest.DAILYMOTION_URL])
self.assertTrue(os.path.exists(DownloadTest.DAILYMOTION_FILE))
md5_down_file = md5_for_file(DownloadTest.DAILYMOTION_FILE)
self.assertEqual(md5_down_file, DownloadTest.DAILYMOTION_MD5)
def test_metacafe(self):
#this emulate a skip,to be 2.6 compatible
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(MetacafeIE())
fd.add_info_extractor(YoutubeIE())
fd.download([DownloadTest.METACAFE_URL])
self.assertTrue(os.path.exists(DownloadTest.METACAFE_FILE))
self.assertEqual(os.path.getsize(DownloadTest.METACAFE_FILE), DownloadTest.METACAFE_SIZE)
def test_blip(self):
with open(DownloadTest.PARAMETERS_FILE) as f:
fd = FileDownloader(json.load(f))
fd.add_info_extractor(BlipTVIE())
fd.download([DownloadTest.BLIP_URL])
self.assertTrue(os.path.exists(DownloadTest.BLIP_FILE))
md5_down_file = md5_for_file(DownloadTest.BLIP_FILE)
self.assertEqual(md5_down_file, DownloadTest.BLIP_MD5)
def tearDown(self):
if os.path.exists(DownloadTest.YOUTUBE_FILE):
os.remove(DownloadTest.YOUTUBE_FILE)
if os.path.exists(DownloadTest.DAILYMOTION_FILE):
os.remove(DownloadTest.DAILYMOTION_FILE)
if os.path.exists(DownloadTest.METACAFE_FILE):
os.remove(DownloadTest.METACAFE_FILE)
if os.path.exists(DownloadTest.BLIP_FILE):
os.remove(DownloadTest.BLIP_FILE)
def md5_for_file(filename, block_size=2**20):
with open(filename) as f:
md5 = hashlib.md5()
while True:
data = f.read(block_size)
if not data:
break
md5.update(data)
return md5.hexdigest()

@ -0,0 +1,26 @@
import unittest
import sys
import os
import subprocess
rootDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
try:
_DEV_NULL = subprocess.DEVNULL
except AttributeError:
_DEV_NULL = open(os.devnull, 'wb')
class TestExecution(unittest.TestCase):
def test_import(self):
subprocess.check_call([sys.executable, '-c', 'import youtube_dl'], cwd=rootDir)
def test_module_exec(self):
if sys.version_info >= (2,7): # Python 2.6 doesn't support package execution
subprocess.check_call([sys.executable, '-m', 'youtube_dl', '--version'], cwd=rootDir, stdout=_DEV_NULL)
def test_main_exec(self):
subprocess.check_call([sys.executable, 'youtube_dl/__main__.py', '--version'], cwd=rootDir, stdout=_DEV_NULL)
if __name__ == '__main__':
unittest.main()

@ -1,47 +1,100 @@
# -*- coding: utf-8 -*- #!/usr/bin/env python
# Various small unit tests # Various small unit tests
import sys
import unittest import unittest
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
#from youtube_dl.utils import htmlentity_transform #from youtube_dl.utils import htmlentity_transform
from youtube_dl.utils import timeconvert from youtube_dl.utils import timeconvert
from youtube_dl.utils import sanitize_filename from youtube_dl.utils import sanitize_filename
from youtube_dl.utils import unescapeHTML from youtube_dl.utils import unescapeHTML
from youtube_dl.utils import orderedSet from youtube_dl.utils import orderedSet
if sys.version_info < (3, 0):
_compat_str = lambda b: b.decode('unicode-escape')
else:
_compat_str = lambda s: s
class TestUtil(unittest.TestCase): class TestUtil(unittest.TestCase):
def test_timeconvert(self): def test_timeconvert(self):
self.assertTrue(timeconvert('') is None) self.assertTrue(timeconvert('') is None)
self.assertTrue(timeconvert('bougrg') is None) self.assertTrue(timeconvert('bougrg') is None)
def test_sanitize_filename(self):
self.assertEqual(sanitize_filename('abc'), 'abc')
self.assertEqual(sanitize_filename('abc_d-e'), 'abc_d-e')
self.assertEqual(sanitize_filename('123'), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de'))
self.assertFalse('/' in sanitize_filename('abc/de///'))
self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de'))
self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|'))
self.assertEqual('yes no', sanitize_filename('yes? no'))
self.assertEqual('this - that', sanitize_filename('this: that'))
self.assertEqual(sanitize_filename('AT&T'), 'AT&T')
aumlaut = _compat_str('\xe4')
self.assertEqual(sanitize_filename(aumlaut), aumlaut)
tests = _compat_str('\u043a\u0438\u0440\u0438\u043b\u043b\u0438\u0446\u0430')
self.assertEqual(sanitize_filename(tests), tests)
forbidden = '"\0\\/'
for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc))
def test_sanitize_filename_restricted(self):
self.assertEqual(sanitize_filename('abc', restricted=True), 'abc')
self.assertEqual(sanitize_filename('abc_d-e', restricted=True), 'abc_d-e')
self.assertEqual(sanitize_filename('123', restricted=True), '123')
self.assertEqual('abc_de', sanitize_filename('abc/de', restricted=True))
self.assertFalse('/' in sanitize_filename('abc/de///', restricted=True))
def test_sanitize_filename(self): self.assertEqual('abc_de', sanitize_filename('abc/<>\\*|de', restricted=True))
self.assertEqual(sanitize_filename(u'abc'), u'abc') self.assertEqual('xxx', sanitize_filename('xxx/<>\\*|', restricted=True))
self.assertEqual(sanitize_filename(u'abc_d-e'), u'abc_d-e') self.assertEqual('yes_no', sanitize_filename('yes? no', restricted=True))
self.assertEqual('this_-_that', sanitize_filename('this: that', restricted=True))
self.assertEqual(sanitize_filename(u'123'), u'123') tests = _compat_str('a\xe4b\u4e2d\u56fd\u7684c')
self.assertEqual(sanitize_filename(tests, restricted=True), 'a_b_c')
self.assertTrue(sanitize_filename(_compat_str('\xf6'), restricted=True) != '') # No empty filename
self.assertEqual(u'abc-de', sanitize_filename(u'abc/de')) forbidden = '"\0\\/&!: \'\t\n()[]{}$;`^,#'
self.assertFalse(u'/' in sanitize_filename(u'abc/de///')) for fc in forbidden:
for fbc in forbidden:
self.assertTrue(fbc not in sanitize_filename(fc, restricted=True))
self.assertEqual(u'abc-de', sanitize_filename(u'abc/<>\\*|de')) # Handle a common case more neatly
self.assertEqual(u'xxx', sanitize_filename(u'xxx/<>\\*|')) self.assertEqual(sanitize_filename(_compat_str('\u5927\u58f0\u5e26 - Song'), restricted=True), 'Song')
self.assertEqual(u'yes no', sanitize_filename(u'yes? no')) self.assertEqual(sanitize_filename(_compat_str('\u603b\u7edf: Speech'), restricted=True), 'Speech')
self.assertEqual(u'this - that', sanitize_filename(u'this: that')) # .. but make sure the file name is never empty
self.assertTrue(sanitize_filename('-', restricted=True) != '')
self.assertTrue(sanitize_filename(':', restricted=True) != '')
self.assertEqual(sanitize_filename(u'ä'), u'ä') def test_sanitize_ids(self):
self.assertEqual(sanitize_filename(u'кириллица'), u'кириллица') self.assertEqual(sanitize_filename('_n_cd26wFpw', is_id=True), '_n_cd26wFpw')
self.assertEqual(sanitize_filename('_BD_eEpuzXw', is_id=True), '_BD_eEpuzXw')
self.assertEqual(sanitize_filename('N0Y__7-UOdI', is_id=True), 'N0Y__7-UOdI')
for forbidden in u'"\0\\/': def test_ordered_set(self):
self.assertTrue(forbidden not in sanitize_filename(forbidden)) self.assertEqual(orderedSet([1, 1, 2, 3, 4, 4, 5, 6, 7, 3, 5]), [1, 2, 3, 4, 5, 6, 7])
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
#keep the list ordered
self.assertEqual(orderedSet([135, 1, 1, 1]), [135, 1])
def test_ordered_set(self): def test_unescape_html(self):
self.assertEqual(orderedSet([1,1,2,3,4,4,5,6,7,3,5]), [1,2,3,4,5,6,7]) self.assertEqual(unescapeHTML(_compat_str('%20;')), _compat_str('%20;'))
self.assertEqual(orderedSet([]), [])
self.assertEqual(orderedSet([1]), [1])
#keep the list ordered
self.assertEqual(orderedSet([135,1,1,1]), [135,1])
def test_unescape_html(self): if __name__ == '__main__':
self.assertEqual(unescapeHTML(u"%20;"), u"%20;") unittest.main()

@ -0,0 +1,77 @@
#!/usr/bin/env python
# coding: utf-8
import json
import os
import sys
import unittest
# Allow direct execution
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl.FileDownloader
import youtube_dl.InfoExtractors
from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FileDownloader(youtube_dl.FileDownloader):
def __init__(self, *args, **kwargs):
youtube_dl.FileDownloader.__init__(self, *args, **kwargs)
self.to_stderr = self.to_screen
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
params = json.load(pf)
params['writeinfojson'] = True
params['skip_download'] = True
params['writedescription'] = True
TEST_ID = 'BaW_jenozKc'
INFO_JSON_FILE = TEST_ID + '.mp4.info.json'
DESCRIPTION_FILE = TEST_ID + '.mp4.description'
EXPECTED_DESCRIPTION = u'''test chars: "'/\ä↭𝕐
This is a test video for youtube-dl.
For more information, contact phihag@phihag.de .'''
class TestInfoJSON(unittest.TestCase):
def setUp(self):
# Clear old files
self.tearDown()
def test_info_json(self):
ie = youtube_dl.InfoExtractors.YoutubeIE()
fd = FileDownloader(params)
fd.add_info_extractor(ie)
fd.download([TEST_ID])
self.assertTrue(os.path.exists(INFO_JSON_FILE))
with io.open(INFO_JSON_FILE, 'r', encoding='utf-8') as jsonf:
jd = json.load(jsonf)
self.assertEqual(jd['upload_date'], u'20121002')
self.assertEqual(jd['description'], EXPECTED_DESCRIPTION)
self.assertEqual(jd['id'], TEST_ID)
self.assertEqual(jd['extractor'], 'youtube')
self.assertEqual(jd['title'], u'''youtube-dl test video "'/\ä↭𝕐''')
self.assertEqual(jd['uploader'], 'Philipp Hagemeister')
self.assertTrue(os.path.exists(DESCRIPTION_FILE))
with io.open(DESCRIPTION_FILE, 'r', encoding='utf-8') as descf:
descr = descf.read()
self.assertEqual(descr, EXPECTED_DESCRIPTION)
def tearDown(self):
if os.path.exists(INFO_JSON_FILE):
os.remove(INFO_JSON_FILE)
if os.path.exists(DESCRIPTION_FILE):
os.remove(DESCRIPTION_FILE)
if __name__ == '__main__':
unittest.main()

@ -0,0 +1,73 @@
#!/usr/bin/env python
import sys
import unittest
import json
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeUserIE,YoutubePlaylistIE
from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FakeDownloader(object):
def __init__(self):
self.result = []
self.params = parameters
def to_screen(self, s):
print(s)
def trouble(self, s):
raise Exception(s)
def download(self, x):
self.result.append(x)
class TestYoutubeLists(unittest.TestCase):
def test_youtube_playlist(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=PLwiyx1dc3P2JR9N8gQaQN_BCvlSlap7re')
self.assertEqual(DL.result, [
['http://www.youtube.com/watch?v=bV9L5Ht9LgY'],
['http://www.youtube.com/watch?v=FXxLjLQi3Fg'],
['http://www.youtube.com/watch?v=tU3Bgo5qJZE']
])
def test_youtube_playlist_long(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
IE.extract('https://www.youtube.com/playlist?list=UUBABnxM4Ar9ten8Mdjj1j0Q')
self.assertTrue(len(DL.result) >= 799)
def test_youtube_course(self):
DL = FakeDownloader()
IE = YoutubePlaylistIE(DL)
# TODO find a > 100 (paginating?) videos course
IE.extract('https://www.youtube.com/course?list=ECUl4u3cNGP61MdtwGTqZA0MreSaDybji8')
self.assertEqual(DL.result[0], ['http://www.youtube.com/watch?v=j9WZyLZCBzs'])
self.assertEqual(len(DL.result), 25)
self.assertEqual(DL.result[-1], ['http://www.youtube.com/watch?v=rYefUsYuEp0'])
def test_youtube_channel(self):
# I give up, please find a channel that does paginate and test this like test_youtube_playlist_long
pass # TODO
def test_youtube_user(self):
DL = FakeDownloader()
IE = YoutubeUserIE(DL)
IE.extract('https://www.youtube.com/user/TheLinuxFoundation')
self.assertTrue(len(DL.result) >= 320)
if __name__ == '__main__':
unittest.main()

@ -0,0 +1,57 @@
#!/usr/bin/env python
import sys
import unittest
import json
import io
import hashlib
# Allow direct execution
import os
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
from youtube_dl.InfoExtractors import YoutubeIE
from youtube_dl.utils import *
PARAMETERS_FILE = os.path.join(os.path.dirname(os.path.abspath(__file__)), "parameters.json")
with io.open(PARAMETERS_FILE, encoding='utf-8') as pf:
parameters = json.load(pf)
# General configuration (from __init__, not very elegant...)
jar = compat_cookiejar.CookieJar()
cookie_processor = compat_urllib_request.HTTPCookieProcessor(jar)
proxy_handler = compat_urllib_request.ProxyHandler()
opener = compat_urllib_request.build_opener(proxy_handler, cookie_processor, YoutubeDLHandler())
compat_urllib_request.install_opener(opener)
class FakeDownloader(object):
def __init__(self):
self.result = []
self.params = parameters
def to_screen(self, s):
print(s)
def trouble(self, s):
raise Exception(s)
def download(self, x):
self.result.append(x)
md5 = lambda s: hashlib.md5(s.encode('utf-8')).hexdigest()
class TestYoutubeSubtitles(unittest.TestCase):
def test_youtube_subtitles(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(md5(info_dict[0]['subtitles']), 'c3228550d59116f3c29fba370b55d033')
def test_youtube_subtitles_it(self):
DL = FakeDownloader()
DL.params['writesubtitles'] = True
DL.params['subtitleslang'] = 'it'
IE = YoutubeIE(DL)
info_dict = IE.extract('QRS8MkLhQmM')
self.assertEqual(md5(info_dict[0]['subtitles']), '132a88a0daf8e1520f393eb58f1f646a')
if __name__ == '__main__':
unittest.main()

@ -0,0 +1,164 @@
[
{
"name": "Youtube",
"url": "http://www.youtube.com/watch?v=BaW_jenozKc",
"file": "BaW_jenozKc.mp4",
"info_dict": {
"title": "youtube-dl test video \"'/\\ä↭𝕐",
"uploader": "Philipp Hagemeister",
"uploader_id": "phihag",
"upload_date": "20121002",
"description": "test chars: \"'/\\ä↭𝕐\n\nThis is a test video for youtube-dl.\n\nFor more information, contact phihag@phihag.de ."
}
},
{
"name": "Dailymotion",
"md5": "392c4b85a60a90dc4792da41ce3144eb",
"url": "http://www.dailymotion.com/video/x33vw9_tutoriel-de-youtubeur-dl-des-video_tech",
"file": "x33vw9.mp4"
},
{
"name": "Metacafe",
"add_ie": ["Youtube"],
"url": "http://metacafe.com/watch/yt-_aUehQsCQtM/the_electric_company_short_i_pbs_kids_go/",
"file": "_aUehQsCQtM.flv"
},
{
"name": "BlipTV",
"md5": "b2d849efcf7ee18917e4b4d9ff37cafe",
"url": "http://blip.tv/cbr/cbr-exclusive-gotham-city-imposters-bats-vs-jokerz-short-3-5796352",
"file": "5779306.m4v"
},
{
"name": "XVideos",
"md5": "1d0c835822f0a71a7bf011855db929d0",
"url": "http://www.xvideos.com/video939581/funny_porns_by_s_-1",
"file": "939581.flv"
},
{
"name": "Vimeo",
"md5": "8879b6cc097e987f02484baf890129e5",
"url": "http://vimeo.com/56015672",
"file": "56015672.mp4",
"info_dict": {
"title": "youtube-dl test video - ★ \" ' 幸 / \\ ä ↭ 𝕐",
"uploader": "Filippo Valsorda",
"uploader_id": "user7108434",
"upload_date": "20121220",
"description": "This is a test case for youtube-dl.\nFor more information, see github.com/rg3/youtube-dl\nTest chars: ★ \" ' 幸 / \\ ä ↭ 𝕐"
}
},
{
"name": "Soundcloud",
"md5": "ebef0a451b909710ed1d7787dddbf0d7",
"url": "http://soundcloud.com/ethmusic/lostin-powers-she-so-heavy",
"file": "62986583.mp3"
},
{
"name": "StanfordOpenClassroom",
"md5": "544a9468546059d4e80d76265b0443b8",
"url": "http://openclassroom.stanford.edu/MainFolder/VideoPage.php?course=PracticalUnix&video=intro-environment&speed=100",
"file": "PracticalUnix_intro-environment.mp4"
},
{
"name": "XNXX",
"md5": "0831677e2b4761795f68d417e0b7b445",
"url": "http://video.xnxx.com/video1135332/lida_naked_funny_actress_5_",
"file": "1135332.flv"
},
{
"name": "Youku",
"url": "http://v.youku.com/v_show/id_XNDgyMDQ2NTQw.html",
"file": "XNDgyMDQ2NTQw_part00.flv",
"md5": "ffe3f2e435663dc2d1eea34faeff5b5b",
"params": { "test": false }
},
{
"name": "NBA",
"url": "http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html",
"file": "0021200253-okc-bkn-recap.nba.mp4",
"md5": "c0edcfc37607344e2ff8f13c378c88a4"
},
{
"name": "JustinTV",
"url": "http://www.twitch.tv/thegamedevhub/b/296128360",
"file": "296128360.flv",
"md5": "ecaa8a790c22a40770901460af191c9a"
},
{
"name": "MyVideo",
"url": "http://www.myvideo.de/watch/8229274/bowling_fail_or_win",
"file": "8229274.flv",
"md5": "2d2753e8130479ba2cb7e0a37002053e"
},
{
"name": "Escapist",
"url": "http://www.escapistmagazine.com/videos/view/the-escapist-presents/6618-Breaking-Down-Baldurs-Gate",
"file": "6618-Breaking-Down-Baldurs-Gate.flv",
"md5": "c6793dbda81388f4264c1ba18684a74d",
"skip": "Fails with timeout on Travis"
},
{
"name": "GooglePlus",
"url": "https://plus.google.com/u/0/108897254135232129896/posts/ZButuJc6CtH",
"file": "ZButuJc6CtH.flv"
},
{
"name": "FunnyOrDie",
"url": "http://www.funnyordie.com/videos/0732f586d7/heart-shaped-box-literal-video-version",
"file": "0732f586d7.mp4",
"md5": "f647e9e90064b53b6e046e75d0241fbd"
},
{
"name": "TweetReel",
"url": "http://tweetreel.com/?77smq",
"file": "77smq.mov",
"md5": "56b4d9ca9de467920f3f99a6d91255d6",
"info_dict": {
"uploader": "itszero",
"uploader_id": "itszero",
"upload_date": "20091225",
"description": "Installing Gentoo Linux on Powerbook G4, it turns out the sleep indicator becomes HDD activity indicator :D"
}
},
{
"name": "Steam",
"url": "http://store.steampowered.com/video/105600/",
"playlist": [
{
"file": "81300.flv",
"md5": "f870007cee7065d7c76b88f0a45ecc07",
"info_dict": {
"title": "Terraria 1.1 Trailer"
}
},
{
"file": "80859.flv",
"md5": "61aaf31a5c5c3041afb58fb83cbb5751",
"info_dict": {
"title": "Terraria Trailer"
}
}
]
},
{
"name": "Ustream",
"url": "http://www.ustream.tv/recorded/20274954",
"file": "20274954.flv",
"md5": "088f151799e8f572f84eb62f17d73e5c",
"info_dict": {
"title": "Young Americans for Liberty February 7, 2012 2:28 AM"
}
},
{
"name": "InfoQ",
"url": "http://www.infoq.com/presentations/A-Few-of-My-Favorite-Python-Things",
"file": "12-jan-pythonthings.mp4",
"info_dict": {
"title": "A Few of My Favorite [Python] Things"
},
"params": {
"skip_download": true
}
}
]

Binary file not shown.

@ -1,14 +0,0 @@
__youtube-dl()
{
local cur prev opts
COMPREPLY=()
cur="${COMP_WORDS[COMP_CWORD]}"
opts="--all-formats --audio-format --audio-quality --auto-number --batch-file --console-title --continue --cookies --dump-user-agent --extract-audio --format --get-description --get-filename --get-format --get-thumbnail --get-title --get-url --help --id --ignore-errors --keep-video --list-extractors --list-formats --literal --match-title --max-downloads --max-quality --netrc --no-continue --no-mtime --no-overwrites --no-part --no-progress --output --password --playlist-end --playlist-start --prefer-free-formats --quiet --rate-limit --reject-title --retries --simulate --skip-download --srt-lang --title --update --user-agent --username --verbose --version --write-description --write-info-json --write-srt"
if [[ ${cur} == * ]] ; then
COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) )
return 0
fi
}
complete -F __youtube-dl youtube-dl

Binary file not shown.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

@ -1,198 +1,204 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
from __future__ import absolute_import
import os import os
import subprocess import subprocess
import sys import sys
import time import time
from utils import * from .utils import *
class PostProcessor(object): class PostProcessor(object):
"""Post Processor class. """Post Processor class.
PostProcessor objects can be added to downloaders with their PostProcessor objects can be added to downloaders with their
add_post_processor() method. When the downloader has finished a add_post_processor() method. When the downloader has finished a
successful download, it will take its internal chain of PostProcessors successful download, it will take its internal chain of PostProcessors
and start calling the run() method on each one of them, first with and start calling the run() method on each one of them, first with
an initial argument and then with the returned value of the previous an initial argument and then with the returned value of the previous
PostProcessor. PostProcessor.
The chain will be stopped if one of them ever returns None or the end The chain will be stopped if one of them ever returns None or the end
of the chain is reached. of the chain is reached.
PostProcessor objects follow a "mutual registration" process similar PostProcessor objects follow a "mutual registration" process similar
to InfoExtractor objects. to InfoExtractor objects.
""" """
_downloader = None _downloader = None
def __init__(self, downloader=None): def __init__(self, downloader=None):
self._downloader = downloader self._downloader = downloader
def set_downloader(self, downloader): def set_downloader(self, downloader):
"""Sets the downloader for this PP.""" """Sets the downloader for this PP."""
self._downloader = downloader self._downloader = downloader
def run(self, information): def run(self, information):
"""Run the PostProcessor. """Run the PostProcessor.
The "information" argument is a dictionary like the ones The "information" argument is a dictionary like the ones
composed by InfoExtractors. The only difference is that this composed by InfoExtractors. The only difference is that this
one has an extra field called "filepath" that points to the one has an extra field called "filepath" that points to the
downloaded file. downloaded file.
When this method returns None, the postprocessing chain is When this method returns None, the postprocessing chain is
stopped. However, this method may return an information stopped. However, this method may return an information
dictionary that will be passed to the next postprocessing dictionary that will be passed to the next postprocessing
object in the chain. It can be the one it received after object in the chain. It can be the one it received after
changing some fields. changing some fields.
In addition, this method may raise a PostProcessingError In addition, this method may raise a PostProcessingError
exception that will be taken into account by the downloader exception that will be taken into account by the downloader
it was called from. it was called from.
""" """
return information # by default, do nothing return information # by default, do nothing
class AudioConversionError(BaseException): class AudioConversionError(BaseException):
def __init__(self, message): def __init__(self, message):
self.message = message self.message = message
class FFmpegExtractAudioPP(PostProcessor): class FFmpegExtractAudioPP(PostProcessor):
def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False): def __init__(self, downloader=None, preferredcodec=None, preferredquality=None, keepvideo=False, nopostoverwrites=False):
PostProcessor.__init__(self, downloader) PostProcessor.__init__(self, downloader)
if preferredcodec is None: if preferredcodec is None:
preferredcodec = 'best' preferredcodec = 'best'
self._preferredcodec = preferredcodec self._preferredcodec = preferredcodec
self._preferredquality = preferredquality self._preferredquality = preferredquality
self._keepvideo = keepvideo self._keepvideo = keepvideo
self._exes = self.detect_executables() self._nopostoverwrites = nopostoverwrites
self._exes = self.detect_executables()
@staticmethod
def detect_executables(): @staticmethod
def executable(exe): def detect_executables():
try: def executable(exe):
subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate() try:
except OSError: subprocess.Popen([exe, '-version'], stdout=subprocess.PIPE, stderr=subprocess.PIPE).communicate()
return False except OSError:
return exe return False
programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe'] return exe
return dict((program, executable(program)) for program in programs) programs = ['avprobe', 'avconv', 'ffmpeg', 'ffprobe']
return dict((program, executable(program)) for program in programs)
def get_audio_codec(self, path):
if not self._exes['ffprobe'] and not self._exes['avprobe']: return None def get_audio_codec(self, path):
try: if not self._exes['ffprobe'] and not self._exes['avprobe']: return None
cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)] try:
handle = subprocess.Popen(cmd, stderr=file(os.path.devnull, 'w'), stdout=subprocess.PIPE) cmd = [self._exes['avprobe'] or self._exes['ffprobe'], '-show_streams', '--', encodeFilename(path)]
output = handle.communicate()[0] handle = subprocess.Popen(cmd, stderr=compat_subprocess_get_DEVNULL(), stdout=subprocess.PIPE)
if handle.wait() != 0: output = handle.communicate()[0]
return None if handle.wait() != 0:
except (IOError, OSError): return None
return None except (IOError, OSError):
audio_codec = None return None
for line in output.split('\n'): audio_codec = None
if line.startswith('codec_name='): for line in output.decode('ascii', 'ignore').split('\n'):
audio_codec = line.split('=')[1].strip() if line.startswith('codec_name='):
elif line.strip() == 'codec_type=audio' and audio_codec is not None: audio_codec = line.split('=')[1].strip()
return audio_codec elif line.strip() == 'codec_type=audio' and audio_codec is not None:
return None return audio_codec
return None
def run_ffmpeg(self, path, out_path, codec, more_opts):
if not self._exes['ffmpeg'] and not self._exes['avconv']: def run_ffmpeg(self, path, out_path, codec, more_opts):
raise AudioConversionError('ffmpeg or avconv not found. Please install one.') if not self._exes['ffmpeg'] and not self._exes['avconv']:
if codec is None: raise AudioConversionError('ffmpeg or avconv not found. Please install one.')
acodec_opts = [] if codec is None:
else: acodec_opts = []
acodec_opts = ['-acodec', codec] else:
cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn'] acodec_opts = ['-acodec', codec]
+ acodec_opts + more_opts + cmd = ([self._exes['avconv'] or self._exes['ffmpeg'], '-y', '-i', encodeFilename(path), '-vn']
['--', encodeFilename(out_path)]) + acodec_opts + more_opts +
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE) ['--', encodeFilename(out_path)])
stdout,stderr = p.communicate() p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if p.returncode != 0: stdout,stderr = p.communicate()
msg = stderr.strip().split('\n')[-1] if p.returncode != 0:
raise AudioConversionError(msg) msg = stderr.strip().split('\n')[-1]
raise AudioConversionError(msg)
def run(self, information):
path = information['filepath'] def run(self, information):
path = information['filepath']
filecodec = self.get_audio_codec(path)
if filecodec is None: filecodec = self.get_audio_codec(path)
self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe') if filecodec is None:
return None self._downloader.to_stderr(u'WARNING: unable to obtain file audio codec with ffprobe')
return None
more_opts = []
if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'): more_opts = []
if self._preferredcodec == 'm4a' and filecodec == 'aac': if self._preferredcodec == 'best' or self._preferredcodec == filecodec or (self._preferredcodec == 'm4a' and filecodec == 'aac'):
# Lossless, but in another container if self._preferredcodec == 'm4a' and filecodec == 'aac':
acodec = 'copy' # Lossless, but in another container
extension = self._preferredcodec acodec = 'copy'
more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] extension = self._preferredcodec
elif filecodec in ['aac', 'mp3', 'vorbis']: more_opts = [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
# Lossless if possible elif filecodec in ['aac', 'mp3', 'vorbis']:
acodec = 'copy' # Lossless if possible
extension = filecodec acodec = 'copy'
if filecodec == 'aac': extension = filecodec
more_opts = ['-f', 'adts'] if filecodec == 'aac':
if filecodec == 'vorbis': more_opts = ['-f', 'adts']
extension = 'ogg' if filecodec == 'vorbis':
else: extension = 'ogg'
# MP3 otherwise. else:
acodec = 'libmp3lame' # MP3 otherwise.
extension = 'mp3' acodec = 'libmp3lame'
more_opts = [] extension = 'mp3'
if self._preferredquality is not None: more_opts = []
if int(self._preferredquality) < 10: if self._preferredquality is not None:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] if int(self._preferredquality) < 10:
else: more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] else:
else: more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
# We convert the audio (lossy) else:
acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec] # We convert the audio (lossy)
extension = self._preferredcodec acodec = {'mp3': 'libmp3lame', 'aac': 'aac', 'm4a': 'aac', 'vorbis': 'libvorbis', 'wav': None}[self._preferredcodec]
more_opts = [] extension = self._preferredcodec
if self._preferredquality is not None: more_opts = []
if int(self._preferredquality) < 10: if self._preferredquality is not None:
more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality] if int(self._preferredquality) < 10:
else: more_opts += [self._exes['avconv'] and '-q:a' or '-aq', self._preferredquality]
more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k'] else:
if self._preferredcodec == 'aac': more_opts += [self._exes['avconv'] and '-b:a' or '-ab', self._preferredquality + 'k']
more_opts += ['-f', 'adts'] if self._preferredcodec == 'aac':
if self._preferredcodec == 'm4a': more_opts += ['-f', 'adts']
more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc'] if self._preferredcodec == 'm4a':
if self._preferredcodec == 'vorbis': more_opts += [self._exes['avconv'] and '-bsf:a' or '-absf', 'aac_adtstoasc']
extension = 'ogg' if self._preferredcodec == 'vorbis':
if self._preferredcodec == 'wav': extension = 'ogg'
extension = 'wav' if self._preferredcodec == 'wav':
more_opts += ['-f', 'wav'] extension = 'wav'
more_opts += ['-f', 'wav']
prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
new_path = prefix + sep + extension prefix, sep, ext = path.rpartition(u'.') # not os.path.splitext, since the latter does not work on unicode in all setups
self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path) new_path = prefix + sep + extension
try: try:
self.run_ffmpeg(path, new_path, acodec, more_opts) if self._nopostoverwrites and os.path.exists(encodeFilename(new_path)):
except: self._downloader.to_screen(u'[youtube] Post-process file %s exists, skipping' % new_path)
etype,e,tb = sys.exc_info() else:
if isinstance(e, AudioConversionError): self._downloader.to_screen(u'[' + (self._exes['avconv'] and 'avconv' or 'ffmpeg') + '] Destination: ' + new_path)
self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message) self.run_ffmpeg(path, new_path, acodec, more_opts)
else: except:
self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg')) etype,e,tb = sys.exc_info()
return None if isinstance(e, AudioConversionError):
self._downloader.to_stderr(u'ERROR: audio conversion failed: ' + e.message)
# Try to update the date time for extracted audio file. else:
if information.get('filetime') is not None: self._downloader.to_stderr(u'ERROR: error running ' + (self._exes['avconv'] and 'avconv' or 'ffmpeg'))
try: return None
os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
except: # Try to update the date time for extracted audio file.
self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file') if information.get('filetime') is not None:
try:
if not self._keepvideo: os.utime(encodeFilename(new_path), (time.time(), information['filetime']))
try: except:
os.remove(encodeFilename(path)) self._downloader.to_stderr(u'WARNING: Cannot update utime of audio file')
except (IOError, OSError):
self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file') if not self._keepvideo:
return None try:
os.remove(encodeFilename(path))
information['filepath'] = new_path except (IOError, OSError):
return information self._downloader.to_stderr(u'WARNING: Unable to remove downloaded video file')
return None
information['filepath'] = new_path
return information

File diff suppressed because it is too large Load Diff

@ -1,7 +1,17 @@
#!/usr/bin/env python #!/usr/bin/env python
# -*- coding: utf-8 -*-
import __init__ # Execute with
# $ python youtube_dl/__main__.py (2.6+)
# $ python -m youtube_dl (2.7+)
import sys
if __package__ is None and not hasattr(sys, "frozen"):
# direct call of __main__.py
import os.path
sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
import youtube_dl
if __name__ == '__main__': if __name__ == '__main__':
__init__.main() youtube_dl.main()

@ -0,0 +1,160 @@
import json
import traceback
import hashlib
from zipimport import zipimporter
from .utils import *
from .version import __version__
def rsa_verify(message, signature, key):
from struct import pack
from hashlib import sha256
from sys import version_info
def b(x):
if version_info[0] == 2: return x
else: return x.encode('latin1')
assert(type(message) == type(b('')))
block_size = 0
n = key[0]
while n:
block_size += 1
n >>= 8
signature = pow(int(signature, 16), key[1], key[0])
raw_bytes = []
while signature:
raw_bytes.insert(0, pack("B", signature & 0xFF))
signature >>= 8
signature = (block_size - len(raw_bytes)) * b('\x00') + b('').join(raw_bytes)
if signature[0:2] != b('\x00\x01'): return False
signature = signature[2:]
if not b('\x00') in signature: return False
signature = signature[signature.index(b('\x00'))+1:]
if not signature.startswith(b('\x30\x31\x30\x0D\x06\x09\x60\x86\x48\x01\x65\x03\x04\x02\x01\x05\x00\x04\x20')): return False
signature = signature[19:]
if signature != sha256(message).digest(): return False
return True
def update_self(to_screen, verbose, filename):
"""Update the program file with the latest version from the repository"""
UPDATE_URL = "http://rg3.github.com/youtube-dl/update/"
VERSION_URL = UPDATE_URL + 'LATEST_VERSION'
JSON_URL = UPDATE_URL + 'versions.json'
UPDATES_RSA_KEY = (0x9d60ee4d8f805312fdb15a62f87b95bd66177b91df176765d13514a0f1754bcd2057295c5b6f1d35daa6742c3ffc9a82d3e118861c207995a8031e151d863c9927e304576bc80692bc8e094896fcf11b66f3e29e04e3a71e9a11558558acea1840aec37fc396fb6b65dc81a1c4144e03bd1c011de62e3f1357b327d08426fe93, 65537)
if not isinstance(globals().get('__loader__'), zipimporter) and not hasattr(sys, "frozen"):
to_screen(u'It looks like you installed youtube-dl with pip, setup.py or a tarball. Please use that to update.')
return
# Check if there is a new version
try:
newversion = compat_urllib_request.urlopen(VERSION_URL).read().decode('utf-8').strip()
except:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t find the current version. Please try again later.')
return
if newversion == __version__:
to_screen(u'youtube-dl is up-to-date (' + __version__ + ')')
return
# Download and check versions info
try:
versions_info = compat_urllib_request.urlopen(JSON_URL).read().decode('utf-8')
versions_info = json.loads(versions_info)
except:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: can\'t obtain versions info. Please try again later.')
return
if not 'signature' in versions_info:
to_screen(u'ERROR: the versions file is not signed or corrupted. Aborting.')
return
signature = versions_info['signature']
del versions_info['signature']
if not rsa_verify(json.dumps(versions_info, sort_keys=True).encode('utf-8'), signature, UPDATES_RSA_KEY):
to_screen(u'ERROR: the versions file signature is invalid. Aborting.')
return
to_screen(u'Updating to version ' + versions_info['latest'] + '...')
version = versions_info['versions'][versions_info['latest']]
if version.get('notes'):
to_screen(u'PLEASE NOTE:')
for note in version['notes']:
to_screen(note)
if not os.access(filename, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % filename)
return
# Py2EXE
if hasattr(sys, "frozen"):
exe = os.path.abspath(filename)
directory = os.path.dirname(exe)
if not os.access(directory, os.W_OK):
to_screen(u'ERROR: no write permissions on %s' % directory)
return
try:
urlh = compat_urllib_request.urlopen(version['exe'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['exe'][1]:
to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(exe + '.new', 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to write the new version')
return
try:
bat = os.path.join(directory, 'youtube-dl-updater.bat')
b = open(bat, 'w')
b.write("""
echo Updating youtube-dl...
ping 127.0.0.1 -n 5 -w 1000 > NUL
move /Y "%s.new" "%s"
del "%s"
\n""" %(exe, exe, bat))
b.close()
os.startfile(bat)
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
# Zip unix package
elif isinstance(globals().get('__loader__'), zipimporter):
try:
urlh = compat_urllib_request.urlopen(version['bin'][0])
newcontent = urlh.read()
urlh.close()
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to download latest version')
return
newcontent_hash = hashlib.sha256(newcontent).hexdigest()
if newcontent_hash != version['bin'][1]:
to_screen(u'ERROR: the downloaded file hash does not match. Aborting.')
return
try:
with open(filename, 'wb') as outf:
outf.write(newcontent)
except (IOError, OSError) as err:
if verbose: to_screen(compat_str(traceback.format_exc()))
to_screen(u'ERROR: unable to overwrite current version')
return
to_screen(u'Updated youtube-dl. Restart youtube-dl to use the new version.')

@ -2,363 +2,542 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import gzip import gzip
import htmlentitydefs import io
import HTMLParser import json
import locale import locale
import os import os
import re import re
import sys import sys
import traceback
import zlib import zlib
import urllib2
import email.utils import email.utils
import json import json
try: try:
import cStringIO as StringIO import urllib.request as compat_urllib_request
except ImportError: # Python 2
import urllib2 as compat_urllib_request
try:
import urllib.error as compat_urllib_error
except ImportError: # Python 2
import urllib2 as compat_urllib_error
try:
import urllib.parse as compat_urllib_parse
except ImportError: # Python 2
import urllib as compat_urllib_parse
try:
from urllib.parse import urlparse as compat_urllib_parse_urlparse
except ImportError: # Python 2
from urlparse import urlparse as compat_urllib_parse_urlparse
try:
import http.cookiejar as compat_cookiejar
except ImportError: # Python 2
import cookielib as compat_cookiejar
try:
import html.entities as compat_html_entities
except ImportError: # Python 2
import htmlentitydefs as compat_html_entities
try:
import html.parser as compat_html_parser
except ImportError: # Python 2
import HTMLParser as compat_html_parser
try:
import http.client as compat_http_client
except ImportError: # Python 2
import httplib as compat_http_client
try:
from subprocess import DEVNULL
compat_subprocess_get_DEVNULL = lambda: DEVNULL
except ImportError: except ImportError:
import StringIO compat_subprocess_get_DEVNULL = lambda: open(os.path.devnull, 'w')
try:
from urllib.parse import parse_qs as compat_parse_qs
except ImportError: # Python 2
# HACK: The following is the correct parse_qs implementation from cpython 3's stdlib.
# Python 2's version is apparently totally broken
def _unquote(string, encoding='utf-8', errors='replace'):
if string == '':
return string
res = string.split('%')
if len(res) == 1:
return string
if encoding is None:
encoding = 'utf-8'
if errors is None:
errors = 'replace'
# pct_sequence: contiguous sequence of percent-encoded bytes, decoded
pct_sequence = b''
string = res[0]
for item in res[1:]:
try:
if not item:
raise ValueError
pct_sequence += item[:2].decode('hex')
rest = item[2:]
if not rest:
# This segment was just a single percent-encoded character.
# May be part of a sequence of code units, so delay decoding.
# (Stored in pct_sequence).
continue
except ValueError:
rest = '%' + item
# Encountered non-percent-encoded characters. Flush the current
# pct_sequence.
string += pct_sequence.decode(encoding, errors) + rest
pct_sequence = b''
if pct_sequence:
# Flush the final pct_sequence
string += pct_sequence.decode(encoding, errors)
return string
def _parse_qsl(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
qs, _coerce_result = qs, unicode
pairs = [s2 for s1 in qs.split('&') for s2 in s1.split(';')]
r = []
for name_value in pairs:
if not name_value and not strict_parsing:
continue
nv = name_value.split('=', 1)
if len(nv) != 2:
if strict_parsing:
raise ValueError("bad query field: %r" % (name_value,))
# Handle case of a control-name with no equal sign
if keep_blank_values:
nv.append('')
else:
continue
if len(nv[1]) or keep_blank_values:
name = nv[0].replace('+', ' ')
name = _unquote(name, encoding=encoding, errors=errors)
name = _coerce_result(name)
value = nv[1].replace('+', ' ')
value = _unquote(value, encoding=encoding, errors=errors)
value = _coerce_result(value)
r.append((name, value))
return r
def compat_parse_qs(qs, keep_blank_values=False, strict_parsing=False,
encoding='utf-8', errors='replace'):
parsed_result = {}
pairs = _parse_qsl(qs, keep_blank_values, strict_parsing,
encoding=encoding, errors=errors)
for name, value in pairs:
if name in parsed_result:
parsed_result[name].append(value)
else:
parsed_result[name] = [value]
return parsed_result
try:
compat_str = unicode # Python 2
except NameError:
compat_str = str
try:
compat_chr = unichr # Python 2
except NameError:
compat_chr = chr
std_headers = { std_headers = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0', 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7', 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.7',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8', 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Encoding': 'gzip, deflate', 'Accept-Encoding': 'gzip, deflate',
'Accept-Language': 'en-us,en;q=0.5', 'Accept-Language': 'en-us,en;q=0.5',
} }
def preferredencoding(): def preferredencoding():
"""Get preferred encoding. """Get preferred encoding.
Returns the best encoding scheme for the system, based on Returns the best encoding scheme for the system, based on
locale.getpreferredencoding() and some further tweaks. locale.getpreferredencoding() and some further tweaks.
""" """
def yield_preferredencoding(): try:
try: pref = locale.getpreferredencoding()
pref = locale.getpreferredencoding() u'TEST'.encode(pref)
u'TEST'.encode(pref) except:
except: pref = 'UTF-8'
pref = 'UTF-8'
while True: return pref
yield pref
return yield_preferredencoding().next() if sys.version_info < (3,0):
def compat_print(s):
print(s.encode(preferredencoding(), 'xmlcharrefreplace'))
else:
def compat_print(s):
assert type(s) == type(u'')
print(s)
# In Python 2.x, json.dump expects a bytestream.
# In Python 3.x, it writes to a character stream
if sys.version_info < (3,0):
def write_json_file(obj, fn):
with open(fn, 'wb') as f:
json.dump(obj, f)
else:
def write_json_file(obj, fn):
with open(fn, 'w', encoding='utf-8') as f:
json.dump(obj, f)
def htmlentity_transform(matchobj): def htmlentity_transform(matchobj):
"""Transforms an HTML entity to a Unicode character. """Transforms an HTML entity to a character.
This function receives a match object and is intended to be used with This function receives a match object and is intended to be used with
the re.sub() function. the re.sub() function.
""" """
entity = matchobj.group(1) entity = matchobj.group(1)
# Known non-numeric HTML entity # Known non-numeric HTML entity
if entity in htmlentitydefs.name2codepoint: if entity in compat_html_entities.name2codepoint:
return unichr(htmlentitydefs.name2codepoint[entity]) return compat_chr(compat_html_entities.name2codepoint[entity])
# Unicode character mobj = re.match(u'(?u)#(x?\\d+)', entity)
mobj = re.match(ur'(?u)#(x?\d+)', entity) if mobj is not None:
if mobj is not None: numstr = mobj.group(1)
numstr = mobj.group(1) if numstr.startswith(u'x'):
if numstr.startswith(u'x'): base = 16
base = 16 numstr = u'0%s' % numstr
numstr = u'0%s' % numstr else:
else: base = 10
base = 10 return compat_chr(int(numstr, base))
return unichr(long(numstr, base))
# Unknown entity in name, return its literal representation
# Unknown entity in name, return its literal representation return (u'&%s;' % entity)
return (u'&%s;' % entity)
compat_html_parser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix
HTMLParser.locatestarttagend = re.compile(r"""<[a-zA-Z][-.a-zA-Z0-9:_]*(?:\s+(?:(?<=['"\s])[^\s/>][^\s/=>]*(?:\s*=+\s*(?:'[^']*'|"[^"]*"|(?!['"])[^>\s]*))?\s*)*)?\s*""", re.VERBOSE) # backport bugfix class AttrParser(compat_html_parser.HTMLParser):
class IDParser(HTMLParser.HTMLParser): """Modified HTMLParser that isolates a tag with the specified attribute"""
"""Modified HTMLParser that isolates a tag with the specified id""" def __init__(self, attribute, value):
def __init__(self, id): self.attribute = attribute
self.id = id self.value = value
self.result = None self.result = None
self.started = False self.started = False
self.depth = {} self.depth = {}
self.html = None self.html = None
self.watch_startpos = False self.watch_startpos = False
self.error_count = 0 self.error_count = 0
HTMLParser.HTMLParser.__init__(self) compat_html_parser.HTMLParser.__init__(self)
def error(self, message): def error(self, message):
#print >> sys.stderr, self.getpos() if self.error_count > 10 or self.started:
if self.error_count > 10 or self.started: raise compat_html_parser.HTMLParseError(message, self.getpos())
raise HTMLParser.HTMLParseError(message, self.getpos()) self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line
self.rawdata = '\n'.join(self.html.split('\n')[self.getpos()[0]:]) # skip one line self.error_count += 1
self.error_count += 1 self.goahead(1)
self.goahead(1)
def loads(self, html):
def loads(self, html): self.html = html
self.html = html self.feed(html)
self.feed(html) self.close()
self.close()
def handle_starttag(self, tag, attrs):
def handle_starttag(self, tag, attrs): attrs = dict(attrs)
attrs = dict(attrs) if self.started:
if self.started: self.find_startpos(None)
self.find_startpos(None) if self.attribute in attrs and attrs[self.attribute] == self.value:
if 'id' in attrs and attrs['id'] == self.id: self.result = [tag]
self.result = [tag] self.started = True
self.started = True self.watch_startpos = True
self.watch_startpos = True if self.started:
if self.started: if not tag in self.depth: self.depth[tag] = 0
if not tag in self.depth: self.depth[tag] = 0 self.depth[tag] += 1
self.depth[tag] += 1
def handle_endtag(self, tag):
def handle_endtag(self, tag): if self.started:
if self.started: if tag in self.depth: self.depth[tag] -= 1
if tag in self.depth: self.depth[tag] -= 1 if self.depth[self.result[0]] == 0:
if self.depth[self.result[0]] == 0: self.started = False
self.started = False self.result.append(self.getpos())
self.result.append(self.getpos())
def find_startpos(self, x):
def find_startpos(self, x): """Needed to put the start position of the result (self.result[1])
"""Needed to put the start position of the result (self.result[1]) after the opening tag with the requested id"""
after the opening tag with the requested id""" if self.watch_startpos:
if self.watch_startpos: self.watch_startpos = False
self.watch_startpos = False self.result.append(self.getpos())
self.result.append(self.getpos()) handle_entityref = handle_charref = handle_data = handle_comment = \
handle_entityref = handle_charref = handle_data = handle_comment = \ handle_decl = handle_pi = unknown_decl = find_startpos
handle_decl = handle_pi = unknown_decl = find_startpos
def get_result(self):
def get_result(self): if self.result is None:
if self.result == None: return None return None
if len(self.result) != 3: return None if len(self.result) != 3:
lines = self.html.split('\n') return None
lines = lines[self.result[1][0]-1:self.result[2][0]] lines = self.html.split('\n')
lines[0] = lines[0][self.result[1][1]:] lines = lines[self.result[1][0]-1:self.result[2][0]]
if len(lines) == 1: lines[0] = lines[0][self.result[1][1]:]
lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]] if len(lines) == 1:
lines[-1] = lines[-1][:self.result[2][1]] lines[-1] = lines[-1][:self.result[2][1]-self.result[1][1]]
return '\n'.join(lines).strip() lines[-1] = lines[-1][:self.result[2][1]]
return '\n'.join(lines).strip()
def get_element_by_id(id, html): def get_element_by_id(id, html):
"""Return the content of the tag with the specified id in the passed HTML document""" """Return the content of the tag with the specified ID in the passed HTML document"""
parser = IDParser(id) return get_element_by_attribute("id", id, html)
try:
parser.loads(html) def get_element_by_attribute(attribute, value, html):
except HTMLParser.HTMLParseError: """Return the content of the tag with the specified attribute in the passed HTML document"""
pass parser = AttrParser(attribute, value)
return parser.get_result() try:
parser.loads(html)
except compat_html_parser.HTMLParseError:
pass
return parser.get_result()
def clean_html(html): def clean_html(html):
"""Clean an HTML snippet into a readable string""" """Clean an HTML snippet into a readable string"""
# Newline vs <br /> # Newline vs <br />
html = html.replace('\n', ' ') html = html.replace('\n', ' ')
html = re.sub('\s*<\s*br\s*/?\s*>\s*', '\n', html) html = re.sub(r'\s*<\s*br\s*/?\s*>\s*', '\n', html)
# Strip html tags html = re.sub(r'<\s*/\s*p\s*>\s*<\s*p[^>]*>', '\n', html)
html = re.sub('<.*?>', '', html) # Strip html tags
# Replace html entities html = re.sub('<.*?>', '', html)
html = unescapeHTML(html) # Replace html entities
return html html = unescapeHTML(html)
return html
def sanitize_open(filename, open_mode): def sanitize_open(filename, open_mode):
"""Try to open the given filename, and slightly tweak it if this fails. """Try to open the given filename, and slightly tweak it if this fails.
Attempts to open the given filename. If this fails, it tries to change Attempts to open the given filename. If this fails, it tries to change
the filename slightly, step by step, until it's either able to open it the filename slightly, step by step, until it's either able to open it
or it fails and raises a final exception, like the standard open() or it fails and raises a final exception, like the standard open()
function. function.
It returns the tuple (stream, definitive_file_name). It returns the tuple (stream, definitive_file_name).
""" """
try: try:
if filename == u'-': if filename == u'-':
if sys.platform == 'win32': if sys.platform == 'win32':
import msvcrt import msvcrt
msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY) msvcrt.setmode(sys.stdout.fileno(), os.O_BINARY)
return (sys.stdout, filename) return (sys.stdout, filename)
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
except (IOError, OSError), err: except (IOError, OSError) as err:
# In case of error, try to remove win32 forbidden chars # In case of error, try to remove win32 forbidden chars
filename = re.sub(ur'[/<>:"\|\?\*]', u'#', filename) filename = re.sub(u'[/<>:"\\|\\\\?\\*]', u'#', filename)
# An exception here should be caught in the caller # An exception here should be caught in the caller
stream = open(encodeFilename(filename), open_mode) stream = open(encodeFilename(filename), open_mode)
return (stream, filename) return (stream, filename)
def timeconvert(timestr): def timeconvert(timestr):
"""Convert RFC 2822 defined time string into system timestamp""" """Convert RFC 2822 defined time string into system timestamp"""
timestamp = None timestamp = None
timetuple = email.utils.parsedate_tz(timestr) timetuple = email.utils.parsedate_tz(timestr)
if timetuple is not None: if timetuple is not None:
timestamp = email.utils.mktime_tz(timetuple) timestamp = email.utils.mktime_tz(timetuple)
return timestamp return timestamp
def sanitize_filename(s): def sanitize_filename(s, restricted=False, is_id=False):
"""Sanitizes a string so it could be used as part of a filename.""" """Sanitizes a string so it could be used as part of a filename.
def replace_insane(char): If restricted is set, use a stricter subset of allowed characters.
if char == '?' or ord(char) < 32 or ord(char) == 127: Set is_id if this is not an arbitrary string, but an ID that should be kept if possible
return '' """
elif char == '"': def replace_insane(char):
return '\'' if char == '?' or ord(char) < 32 or ord(char) == 127:
elif char == ':': return ''
return ' -' elif char == '"':
elif char in '\\/|*<>': return '' if restricted else '\''
return '-' elif char == ':':
return char return '_-' if restricted else ' -'
elif char in '\\/|*<>':
result = u''.join(map(replace_insane, s)) return '_'
while '--' in result: if restricted and (char in '!&\'()[]{}$;`^,#' or char.isspace()):
result = result.replace('--', '-') return '_'
return result.strip('-') if restricted and ord(char) > 127:
return '_'
return char
result = u''.join(map(replace_insane, s))
if not is_id:
while '__' in result:
result = result.replace('__', '_')
result = result.strip('_')
# Common case of "Foreign band name - English song title"
if restricted and result.startswith('-_'):
result = result[2:]
if not result:
result = '_'
return result
def orderedSet(iterable): def orderedSet(iterable):
""" Remove all duplicates from the input iterable """ """ Remove all duplicates from the input iterable """
res = [] res = []
for el in iterable: for el in iterable:
if el not in res: if el not in res:
res.append(el) res.append(el)
return res return res
def unescapeHTML(s): def unescapeHTML(s):
""" """
@param s a string (of type unicode) @param s a string
""" """
assert type(s) == type(u'') assert type(s) == type(u'')
result = re.sub(ur'(?u)&(.+?);', htmlentity_transform, s) result = re.sub(u'(?u)&(.+?);', htmlentity_transform, s)
return result return result
def encodeFilename(s): def encodeFilename(s):
""" """
@param s The name of the file (of type unicode) @param s The name of the file
""" """
assert type(s) == type(u'')
# Python 3 has a Unicode API
if sys.version_info >= (3, 0):
return s
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
return s
else:
return s.encode(sys.getfilesystemencoding(), 'ignore')
class ExtractorError(Exception):
"""Error during info extraction."""
def __init__(self, msg, tb=None):
""" tb, if given, is the original traceback (so that it can be printed out). """
super(ExtractorError, self).__init__(msg)
self.traceback = tb
assert type(s) == type(u'') def format_traceback(self):
if self.traceback is None:
return None
return u''.join(traceback.format_tb(self.traceback))
if sys.platform == 'win32' and sys.getwindowsversion()[0] >= 5:
# Pass u'' directly to use Unicode APIs on Windows 2000 and up
# (Detecting Windows NT 4 is tricky because 'major >= 4' would
# match Windows 9x series as well. Besides, NT 4 is obsolete.)
return s
else:
return s.encode(sys.getfilesystemencoding(), 'ignore')
class DownloadError(Exception): class DownloadError(Exception):
"""Download Error exception. """Download Error exception.
This exception may be thrown by FileDownloader objects if they are not This exception may be thrown by FileDownloader objects if they are not
configured to continue on errors. They will contain the appropriate configured to continue on errors. They will contain the appropriate
error message. error message.
""" """
pass pass
class SameFileError(Exception): class SameFileError(Exception):
"""Same File exception. """Same File exception.
This exception will be thrown by FileDownloader objects if they detect This exception will be thrown by FileDownloader objects if they detect
multiple files would have to be downloaded to the same file on disk. multiple files would have to be downloaded to the same file on disk.
""" """
pass pass
class PostProcessingError(Exception): class PostProcessingError(Exception):
"""Post Processing exception. """Post Processing exception.
This exception may be raised by PostProcessor's .run() method to This exception may be raised by PostProcessor's .run() method to
indicate an error in the postprocessing task. indicate an error in the postprocessing task.
""" """
pass pass
class MaxDownloadsReached(Exception): class MaxDownloadsReached(Exception):
""" --max-downloads limit has been reached. """ """ --max-downloads limit has been reached. """
pass pass
class UnavailableVideoError(Exception): class UnavailableVideoError(Exception):
"""Unavailable Format exception. """Unavailable Format exception.
This exception will be thrown when a video is requested This exception will be thrown when a video is requested
in a format that is not available for that video. in a format that is not available for that video.
""" """
pass pass
class ContentTooShortError(Exception): class ContentTooShortError(Exception):
"""Content Too Short exception. """Content Too Short exception.
This exception may be raised by FileDownloader objects when a file they This exception may be raised by FileDownloader objects when a file they
download is too small for what the server announced first, indicating download is too small for what the server announced first, indicating
the connection was probably interrupted. the connection was probably interrupted.
""" """
# Both in bytes # Both in bytes
downloaded = None downloaded = None
expected = None expected = None
def __init__(self, downloaded, expected): def __init__(self, downloaded, expected):
self.downloaded = downloaded self.downloaded = downloaded
self.expected = expected self.expected = expected
class YoutubeDLHandler(compat_urllib_request.HTTPHandler):
class Trouble(Exception): """Handler for HTTP requests and responses.
"""Trouble helper exception
This class, when installed with an OpenerDirector, automatically adds
This is an exception to be handled with the standard headers to every HTTP request and handles gzipped and
FileDownloader.trouble deflated responses from web servers. If compression is to be avoided in
""" a particular request, the original request in the program code only has
to include the HTTP header "Youtubedl-No-Compression", which will be
class YoutubeDLHandler(urllib2.HTTPHandler): removed before making the real request.
"""Handler for HTTP requests and responses.
Part of this code was copied from:
This class, when installed with an OpenerDirector, automatically adds
the standard headers to every HTTP request and handles gzipped and http://techknack.net/python-urllib2-handlers/
deflated responses from web servers. If compression is to be avoided in
a particular request, the original request in the program code only has Andrew Rowls, the author of that code, agreed to release it to the
to include the HTTP header "Youtubedl-No-Compression", which will be public domain.
removed before making the real request. """
Part of this code was copied from: @staticmethod
def deflate(data):
http://techknack.net/python-urllib2-handlers/ try:
return zlib.decompress(data, -zlib.MAX_WBITS)
Andrew Rowls, the author of that code, agreed to release it to the except zlib.error:
public domain. return zlib.decompress(data)
"""
@staticmethod
@staticmethod def addinfourl_wrapper(stream, headers, url, code):
def deflate(data): if hasattr(compat_urllib_request.addinfourl, 'getcode'):
try: return compat_urllib_request.addinfourl(stream, headers, url, code)
return zlib.decompress(data, -zlib.MAX_WBITS) ret = compat_urllib_request.addinfourl(stream, headers, url)
except zlib.error: ret.code = code
return zlib.decompress(data) return ret
@staticmethod def http_request(self, req):
def addinfourl_wrapper(stream, headers, url, code): for h in std_headers:
if hasattr(urllib2.addinfourl, 'getcode'): if h in req.headers:
return urllib2.addinfourl(stream, headers, url, code) del req.headers[h]
ret = urllib2.addinfourl(stream, headers, url) req.add_header(h, std_headers[h])
ret.code = code if 'Youtubedl-no-compression' in req.headers:
return ret if 'Accept-encoding' in req.headers:
del req.headers['Accept-encoding']
def http_request(self, req): del req.headers['Youtubedl-no-compression']
for h in std_headers: return req
if h in req.headers:
del req.headers[h] def http_response(self, req, resp):
req.add_header(h, std_headers[h]) old_resp = resp
if 'Youtubedl-no-compression' in req.headers: # gzip
if 'Accept-encoding' in req.headers: if resp.headers.get('Content-encoding', '') == 'gzip':
del req.headers['Accept-encoding'] gz = gzip.GzipFile(fileobj=io.BytesIO(resp.read()), mode='r')
del req.headers['Youtubedl-no-compression'] resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
return req resp.msg = old_resp.msg
# deflate
def http_response(self, req, resp): if resp.headers.get('Content-encoding', '') == 'deflate':
old_resp = resp gz = io.BytesIO(self.deflate(resp.read()))
# gzip resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
if resp.headers.get('Content-encoding', '') == 'gzip': resp.msg = old_resp.msg
gz = gzip.GzipFile(fileobj=StringIO.StringIO(resp.read()), mode='r') return resp
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg https_request = http_request
# deflate https_response = http_response
if resp.headers.get('Content-encoding', '') == 'deflate':
gz = StringIO.StringIO(self.deflate(resp.read()))
resp = self.addinfourl_wrapper(gz, old_resp.headers, old_resp.url, old_resp.code)
resp.msg = old_resp.msg
return resp

@ -0,0 +1,2 @@
__version__ = '2013.01.02'
Loading…
Cancel
Save