[cleanup] Misc cleanup

pull/2222/head
pukkandan 3 years ago
parent 21df2117e4
commit 9e907ebddf
No known key found for this signature in database
GPG Key ID: 0F00D95A001F4698

@ -165,7 +165,7 @@ jobs:
- name: Install Requirements - name: Install Requirements
run: | run: |
brew install coreutils brew install coreutils
/usr/bin/python3 -m pip install -U --user pip Pyinstaller==4.5.1 mutagen pycryptodomex websockets /usr/bin/python3 -m pip install -U --user pip Pyinstaller==4.5.1 -r requirements.txt
- name: Bump version - name: Bump version
id: bump_version id: bump_version
run: /usr/bin/python3 devscripts/update-version.py run: /usr/bin/python3 devscripts/update-version.py
@ -208,7 +208,7 @@ jobs:
- name: Get SHA2-256SUMS for yt-dlp_macos.zip - name: Get SHA2-256SUMS for yt-dlp_macos.zip
id: sha256_macos_zip id: sha256_macos_zip
run: echo "::set-output name=sha256_macos_zip::$(sha256sum dist/yt-dlp_macos.zip | awk '{print $1}')" run: echo "::set-output name=sha256_macos_zip::$(sha256sum dist/yt-dlp_macos.zip | awk '{print $1}')"
- name: Get SHA2-512SUMS for yt-dlp_macos - name: Get SHA2-512SUMS for yt-dlp_macos.zip
id: sha512_macos_zip id: sha512_macos_zip
run: echo "::set-output name=sha512_macos_zip::$(sha512sum dist/yt-dlp_macos.zip | awk '{print $1}')" run: echo "::set-output name=sha512_macos_zip::$(sha512sum dist/yt-dlp_macos.zip | awk '{print $1}')"
@ -234,7 +234,7 @@ jobs:
# Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds # Custom pyinstaller built with https://github.com/yt-dlp/pyinstaller-builds
run: | run: |
python -m pip install --upgrade pip setuptools wheel py2exe python -m pip install --upgrade pip setuptools wheel py2exe
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets pip install "https://yt-dlp.github.io/Pyinstaller-Builds/x86_64/pyinstaller-4.5.1-py3-none-any.whl" -r requirements.txt
- name: Bump version - name: Bump version
id: bump_version id: bump_version
env: env:
@ -321,7 +321,7 @@ jobs:
- name: Install Requirements - name: Install Requirements
run: | run: |
python -m pip install --upgrade pip setuptools wheel python -m pip install --upgrade pip setuptools wheel
pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.5.1-py3-none-any.whl" mutagen pycryptodomex websockets pip install "https://yt-dlp.github.io/Pyinstaller-Builds/i686/pyinstaller-4.5.1-py3-none-any.whl" -r requirements.txt
- name: Bump version - name: Bump version
id: bump_version id: bump_version
env: env:

@ -1,5 +1,6 @@
all: lazy-extractors yt-dlp doc pypi-files all: lazy-extractors yt-dlp doc pypi-files
clean: clean-test clean-dist clean-cache clean: clean-test clean-dist
clean-all: clean clean-cache
completions: completion-bash completion-fish completion-zsh completions: completion-bash completion-fish completion-zsh
doc: README.md CONTRIBUTING.md issuetemplates supportedsites doc: README.md CONTRIBUTING.md issuetemplates supportedsites
ot: offlinetest ot: offlinetest
@ -21,7 +22,7 @@ clean-dist:
rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \ rm -rf yt-dlp.1.temp.md yt-dlp.1 README.txt MANIFEST build/ dist/ .coverage cover/ yt-dlp.tar.gz completions/ \
yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap yt_dlp/extractor/lazy_extractors.py *.spec CONTRIBUTING.md.tmp yt-dlp yt-dlp.exe yt_dlp.egg-info/ AUTHORS .mailmap
clean-cache: clean-cache:
find . -name "*.pyc" -o -name "*.class" -delete find . \( -name "*.pyc" -o -name "*.class" \) -delete
completion-bash: completions/bash/yt-dlp completion-bash: completions/bash/yt-dlp
completion-fish: completions/fish/yt-dlp.fish completion-fish: completions/fish/yt-dlp.fish

@ -112,7 +112,7 @@ yt-dlp is a [youtube-dl](https://github.com/ytdl-org/youtube-dl) fork based on t
* **Other new options**: Many new options have been added such as `--print`, `--wait-for-video`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc * **Other new options**: Many new options have been added such as `--print`, `--wait-for-video`, `--sleep-requests`, `--convert-thumbnails`, `--write-link`, `--force-download-archive`, `--force-overwrites`, `--break-on-reject` etc
* **Improvements**: Regex and other operators in `--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio etc * **Improvements**: Regex and other operators in `--match-filter`, multiple `--postprocessor-args` and `--downloader-args`, faster archive checking, more [format selection options](#format-selection), merge multi-video/audio, multiple `--config-locations`, etc
* **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details * **Plugins**: Extractors and PostProcessors can be loaded from an external file. See [plugins](#plugins) for details
@ -133,7 +133,7 @@ Some of yt-dlp's default options are different from that of youtube-dl and youtu
* `--ignore-errors` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead * `--ignore-errors` is enabled by default. Use `--abort-on-error` or `--compat-options abort-on-error` to abort on errors instead
* When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files * When writing metadata files such as thumbnails, description or infojson, the same information (if available) is also written for playlists. Use `--no-write-playlist-metafiles` or `--compat-options no-playlist-metafiles` to not write these files
* `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this * `--add-metadata` attaches the `infojson` to `mkv` files in addition to writing the metadata when used with `--write-info-json`. Use `--no-embed-info-json` or `--compat-options no-attach-info-json` to revert this
* Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](https://github.com/yt-dlp/yt-dlp#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this * Some metadata are embedded into different fields when using `--add-metadata` as compared to youtube-dl. Most notably, `comment` field contains the `webpage_url` and `synopsis` contains the `description`. You can [use `--parse-metadata`](#modifying-metadata) to modify this to your liking or use `--compat-options embed-metadata` to revert this
* `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior * `playlist_index` behaves differently when used with options like `--playlist-reverse` and `--playlist-items`. See [#302](https://github.com/yt-dlp/yt-dlp/issues/302) for details. You can use `--compat-options playlist-index` if you want to keep the earlier behavior
* The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this * The output of `-F` is listed in a new format. Use `--compat-options list-formats` to revert this
* All *experiences* of a funimation episode are considered as a single video. This behavior breaks existing archives. Use `--compat-options seperate-video-versions` to extract information from only the default player * All *experiences* of a funimation episode are considered as a single video. This behavior breaks existing archives. Use `--compat-options seperate-video-versions` to extract information from only the default player
@ -327,22 +327,26 @@ You can also fork the project on github and run your fork's [build workflow](.gi
an error. The default value "fixup_error" an error. The default value "fixup_error"
repairs broken URLs, but emits an error if repairs broken URLs, but emits an error if
this is not possible instead of searching this is not possible instead of searching
--ignore-config, --no-config Disable loading any configuration files --ignore-config Don't load any more configuration files
except the one provided by --config-location. except those given by --config-locations.
When given inside a configuration For backward compatibility, if this option
file, no further configuration files are is found inside the system configuration
loaded. Additionally, (for backward file, the user configuration is not loaded
compatibility) if this option is found --no-config-locations Do not load any custom configuration files
inside the system configuration file, the (default). When given inside a
user configuration is not loaded configuration file, ignore all previous
--config-location PATH Location of the main configuration file; --config-locations defined in the current
file
--config-locations PATH Location of the main configuration file;
either the path to the config or its either the path to the config or its
containing directory containing directory. Can be used multiple
times and inside other configuration files
--flat-playlist Do not extract the videos of a playlist, --flat-playlist Do not extract the videos of a playlist,
only list them only list them
--no-flat-playlist Extract the videos of a playlist --no-flat-playlist Extract the videos of a playlist
--live-from-start Download livestreams from the start. --live-from-start Download livestreams from the start.
Currently only supported for YouTube Currently only supported for YouTube
(Experimental)
--no-live-from-start Download livestreams from the current time --no-live-from-start Download livestreams from the current time
(default) (default)
--wait-for-video MIN[-MAX] Wait for scheduled streams to become --wait-for-video MIN[-MAX] Wait for scheduled streams to become

@ -1154,6 +1154,7 @@ class TestYoutubeDL(unittest.TestCase):
self.assertTrue(entries[1] is None) self.assertTrue(entries[1] is None)
self.assertEqual(len(ydl.downloaded_info_dicts), 1) self.assertEqual(len(ydl.downloaded_info_dicts), 1)
downloaded = ydl.downloaded_info_dicts[0] downloaded = ydl.downloaded_info_dicts[0]
entries[2].pop('requested_downloads', None)
self.assertEqual(entries[2], downloaded) self.assertEqual(entries[2], downloaded)
self.assertEqual(downloaded['url'], TEST_URL) self.assertEqual(downloaded['url'], TEST_URL)
self.assertEqual(downloaded['title'], 'Video Transparent 2') self.assertEqual(downloaded['title'], 'Video Transparent 2')

@ -1161,7 +1161,7 @@ class YoutubeDL(object):
str_fmt = f'{fmt[:-1]}s' str_fmt = f'{fmt[:-1]}s'
if fmt[-1] == 'l': # list if fmt[-1] == 'l': # list
delim = '\n' if '#' in flags else ', ' delim = '\n' if '#' in flags else ', '
value, fmt = delim.join(variadic(value, allowed_types=(str, bytes))), str_fmt value, fmt = delim.join(map(str, variadic(value, allowed_types=(str, bytes)))), str_fmt
elif fmt[-1] == 'j': # json elif fmt[-1] == 'j': # json
value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt value, fmt = json.dumps(value, default=_dumpjson_default, indent=4 if '#' in flags else None), str_fmt
elif fmt[-1] == 'q': # quoted elif fmt[-1] == 'q': # quoted
@ -2396,6 +2396,9 @@ class YoutubeDL(object):
if not get_from_start: if not get_from_start:
info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M') info_dict['title'] += ' ' + datetime.datetime.now().strftime('%Y-%m-%d %H:%M')
# backward compatibility
info_dict['fulltitle'] = info_dict['title']
if not formats: if not formats:
self.raise_no_formats(info_dict) self.raise_no_formats(info_dict)
@ -2584,7 +2587,7 @@ class YoutubeDL(object):
if max_downloads_reached: if max_downloads_reached:
break break
write_archive = set(f.get('_write_download_archive', False) for f in formats_to_download) write_archive = set(f.get('__write_download_archive', False) for f in formats_to_download)
assert write_archive.issubset({True, False, 'ignore'}) assert write_archive.issubset({True, False, 'ignore'})
if True in write_archive and False not in write_archive: if True in write_archive and False not in write_archive:
self.record_download_archive(info_dict) self.record_download_archive(info_dict)
@ -2754,14 +2757,11 @@ class YoutubeDL(object):
assert info_dict.get('_type', 'video') == 'video' assert info_dict.get('_type', 'video') == 'video'
original_infodict = info_dict original_infodict = info_dict
# TODO: backward compatibility, to be removed
info_dict['fulltitle'] = info_dict['title']
if 'format' not in info_dict and 'ext' in info_dict: if 'format' not in info_dict and 'ext' in info_dict:
info_dict['format'] = info_dict['ext'] info_dict['format'] = info_dict['ext']
if self._match_entry(info_dict) is not None: if self._match_entry(info_dict) is not None:
info_dict['_write_download_archive'] = 'ignore' info_dict['__write_download_archive'] = 'ignore'
return return
self.post_extract(info_dict) self.post_extract(info_dict)
@ -2776,7 +2776,7 @@ class YoutubeDL(object):
self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict)) self.__forced_printings(info_dict, full_filename, incomplete=('format' not in info_dict))
if self.params.get('simulate'): if self.params.get('simulate'):
info_dict['_write_download_archive'] = self.params.get('force_write_download_archive') info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
return return
if full_filename is None: if full_filename is None:
@ -2890,7 +2890,7 @@ class YoutubeDL(object):
info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename))) info_dict['__finaldir'] = os.path.dirname(os.path.abspath(encodeFilename(full_filename)))
info_dict['__files_to_move'] = files_to_move info_dict['__files_to_move'] = files_to_move
replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict)) replace_info_dict(self.run_pp(MoveFilesAfterDownloadPP(self, False), info_dict))
info_dict['_write_download_archive'] = self.params.get('force_write_download_archive') info_dict['__write_download_archive'] = self.params.get('force_write_download_archive')
else: else:
# Download # Download
info_dict.setdefault('__postprocessors', []) info_dict.setdefault('__postprocessors', [])
@ -3119,10 +3119,10 @@ class YoutubeDL(object):
except Exception as err: except Exception as err:
self.report_error('post hooks: %s' % str(err)) self.report_error('post hooks: %s' % str(err))
return return
info_dict['_write_download_archive'] = True info_dict['__write_download_archive'] = True
if self.params.get('force_write_download_archive'): if self.params.get('force_write_download_archive'):
info_dict['_write_download_archive'] = True info_dict['__write_download_archive'] = True
# Make sure the info_dict was modified in-place # Make sure the info_dict was modified in-place
assert info_dict is original_infodict assert info_dict is original_infodict

@ -110,6 +110,8 @@ class Zee5IE(InfoExtractor):
raise ExtractorError(otp_request_json['message'], expected=True) raise ExtractorError(otp_request_json['message'], expected=True)
elif username.lower() == 'token' and len(password) > 1198: elif username.lower() == 'token' and len(password) > 1198:
self._USER_TOKEN = password self._USER_TOKEN = password
else:
raise ExtractorError(self._LOGIN_HINT, expected=True)
def _real_initialize(self): def _real_initialize(self):
self._login() self._login()

@ -137,7 +137,7 @@ def create_parser():
def _list_from_options_callback(option, opt_str, value, parser, append=True, delim=',', process=str.strip): def _list_from_options_callback(option, opt_str, value, parser, append=True, delim=',', process=str.strip):
# append can be True, False or -1 (prepend) # append can be True, False or -1 (prepend)
current = getattr(parser.values, option.dest) if append else [] current = list(getattr(parser.values, option.dest)) if append else []
value = list(filter(None, [process(value)] if delim is None else map(process, value.split(delim)))) value = list(filter(None, [process(value)] if delim is None else map(process, value.split(delim))))
setattr( setattr(
parser.values, option.dest, parser.values, option.dest,
@ -146,7 +146,7 @@ def create_parser():
def _set_from_options_callback( def _set_from_options_callback(
option, opt_str, value, parser, delim=',', allowed_values=None, aliases={}, option, opt_str, value, parser, delim=',', allowed_values=None, aliases={},
process=lambda x: x.lower().strip()): process=lambda x: x.lower().strip()):
current = getattr(parser.values, option.dest) current = set(getattr(parser.values, option.dest))
values = [process(value)] if delim is None else list(map(process, value.split(delim)[::-1])) values = [process(value)] if delim is None else list(map(process, value.split(delim)[::-1]))
while values: while values:
actual_val = val = values.pop() actual_val = val = values.pop()
@ -261,7 +261,7 @@ def create_parser():
'--ignore-config', '--no-config', '--ignore-config', '--no-config',
action='store_true', dest='ignoreconfig', action='store_true', dest='ignoreconfig',
help=( help=(
'Disable loading any further configuration files except the one provided by --config-locations. ' 'Don\'t load any more configuration files except those given by --config-locations. '
'For backward compatibility, if this option is found inside the system configuration file, the user configuration is not loaded')) 'For backward compatibility, if this option is found inside the system configuration file, the user configuration is not loaded'))
general.add_option( general.add_option(
'--no-config-locations', '--no-config-locations',
@ -286,7 +286,7 @@ def create_parser():
general.add_option( general.add_option(
'--live-from-start', '--live-from-start',
action='store_true', dest='live_from_start', action='store_true', dest='live_from_start',
help='Download livestreams from the start. Currently only supported for YouTube') help='Download livestreams from the start. Currently only supported for YouTube (Experimental)')
general.add_option( general.add_option(
'--no-live-from-start', '--no-live-from-start',
action='store_false', dest='live_from_start', action='store_false', dest='live_from_start',
@ -811,7 +811,7 @@ def create_parser():
metavar='NAME:ARGS', dest='external_downloader_args', default={}, type='str', metavar='NAME:ARGS', dest='external_downloader_args', default={}, type='str',
action='callback', callback=_dict_from_options_callback, action='callback', callback=_dict_from_options_callback,
callback_kwargs={ callback_kwargs={
'allowed_keys': r'ffmpeg_[io]\d*|%s' % '|'.join(list_external_downloaders()), 'allowed_keys': r'ffmpeg_[io]\d*|%s' % '|'.join(map(re.escape, list_external_downloaders())),
'default_key': 'default', 'default_key': 'default',
'process': compat_shlex_split 'process': compat_shlex_split
}, help=( }, help=(
@ -1050,7 +1050,7 @@ def create_parser():
metavar='[TYPES:]PATH', dest='paths', default={}, type='str', metavar='[TYPES:]PATH', dest='paths', default={}, type='str',
action='callback', callback=_dict_from_options_callback, action='callback', callback=_dict_from_options_callback,
callback_kwargs={ callback_kwargs={
'allowed_keys': 'home|temp|%s' % '|'.join(OUTTMPL_TYPES.keys()), 'allowed_keys': 'home|temp|%s' % '|'.join(map(re.escape, OUTTMPL_TYPES.keys())),
'default_key': 'home' 'default_key': 'home'
}, help=( }, help=(
'The paths where the files should be downloaded. ' 'The paths where the files should be downloaded. '
@ -1065,7 +1065,7 @@ def create_parser():
metavar='[TYPES:]TEMPLATE', dest='outtmpl', default={}, type='str', metavar='[TYPES:]TEMPLATE', dest='outtmpl', default={}, type='str',
action='callback', callback=_dict_from_options_callback, action='callback', callback=_dict_from_options_callback,
callback_kwargs={ callback_kwargs={
'allowed_keys': '|'.join(OUTTMPL_TYPES.keys()), 'allowed_keys': '|'.join(map(re.escape, OUTTMPL_TYPES.keys())),
'default_key': 'default' 'default_key': 'default'
}, help='Output filename template; see "OUTPUT TEMPLATE" for details') }, help='Output filename template; see "OUTPUT TEMPLATE" for details')
filesystem.add_option( filesystem.add_option(
@ -1302,7 +1302,8 @@ def create_parser():
metavar='NAME:ARGS', dest='postprocessor_args', default={}, type='str', metavar='NAME:ARGS', dest='postprocessor_args', default={}, type='str',
action='callback', callback=_dict_from_options_callback, action='callback', callback=_dict_from_options_callback,
callback_kwargs={ callback_kwargs={
'allowed_keys': r'\w+(?:\+\w+)?', 'default_key': 'default-compat', 'allowed_keys': r'\w+(?:\+\w+)?',
'default_key': 'default-compat',
'process': compat_shlex_split, 'process': compat_shlex_split,
'multiple_keys': False 'multiple_keys': False
}, help=( }, help=(

@ -2381,13 +2381,8 @@ class PUTRequest(compat_urllib_request.Request):
def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1): def int_or_none(v, scale=1, default=None, get_attr=None, invscale=1):
if get_attr: if get_attr and v is not None:
if v is not None:
v = getattr(v, get_attr, None) v = getattr(v, get_attr, None)
if v == '':
v = None
if v is None:
return default
try: try:
return int(v) * invscale // scale return int(v) * invscale // scale
except (ValueError, TypeError, OverflowError): except (ValueError, TypeError, OverflowError):
@ -5036,7 +5031,6 @@ def traverse_obj(
return default return default
# Deprecated
def traverse_dict(dictn, keys, casesense=True): def traverse_dict(dictn, keys, casesense=True):
write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated ' write_string('DeprecationWarning: yt_dlp.utils.traverse_dict is deprecated '
'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead') 'and may be removed in a future version. Use yt_dlp.utils.traverse_obj instead')

Loading…
Cancel
Save