Format code

- server/entertainment_decider/common.py
- server/entertainment_decider/extractors/all/tt_rss.py
- server/entertainment_decider/extractors/all/ytdl.py
- server/entertainment_decider/extractors/collection/__init__.py
- server/entertainment_decider/extractors/collection/tt_rss.py
- server/entertainment_decider/extractors/collection/youtube.py
- server/entertainment_decider/extractors/generic.py
- server/entertainment_decider/extractors/helpers.py
- server/entertainment_decider/extractors/media/__init__.py
- server/entertainment_decider/extractors/media/base.py
- server/entertainment_decider/extractors/media/youtube.py
- server/entertainment_decider/extractors/media/ytdl.py

Applied automatically using black
master
Felix Stupp 2 years ago
parent b7cbcbd540
commit f236db4084
Signed by: zocker
GPG Key ID: 93E1BD26F6B02FB7

@ -2,18 +2,32 @@ import itertools
import subprocess import subprocess
from typing import Iterable, List, Literal, TypeVar, Union from typing import Iterable, List, Literal, TypeVar, Union
def call(args, check=True, stdin=None) -> subprocess.CompletedProcess: def call(args, check=True, stdin=None) -> subprocess.CompletedProcess:
proc = subprocess.run(args, capture_output=True, check=check, text=True, stdin=stdin) proc = subprocess.run(
args,
capture_output=True,
check=check,
text=True,
stdin=stdin,
)
return proc return proc
def update_bool_value(old_value: bool, new_value: Union[bool, Literal["toggle"]]) -> bool:
def update_bool_value(
old_value: bool, new_value: Union[bool, Literal["toggle"]]
) -> bool:
if new_value == "toggle": if new_value == "toggle":
return not old_value return not old_value
if type(new_value) != bool: if type(new_value) != bool:
raise Exception(f"Invalid type of new_value: Expected bool or literal \"toggle\", got type={type(new_value)!r}, value={new_value!r}") raise Exception(
f'Invalid type of new_value: Expected bool or literal "toggle", got type={type(new_value)!r}, value={new_value!r}'
)
return new_value return new_value
T = TypeVar("T") T = TypeVar("T")
def limit_iter(iter: Iterable[T], limit: int) -> List[T]: def limit_iter(iter: Iterable[T], limit: int) -> List[T]:
return list(itertools.islice(iter, limit)) return list(itertools.islice(iter, limit))

@ -28,14 +28,18 @@ TT_RSS_CONNECTION: Connection = None
HeadlineList = List[Headline] HeadlineList = List[Headline]
def _build_connection(params: TtRssConnectionParameter) -> Connection: def _build_connection(params: TtRssConnectionParameter) -> Connection:
global TT_RSS_CONNECTION global TT_RSS_CONNECTION
if TT_RSS_CONNECTION is None: if TT_RSS_CONNECTION is None:
TT_RSS_CONNECTION = Connection(proto=params.proto, host=params.host, endpoint=params.endpoint) TT_RSS_CONNECTION = Connection(
proto=params.proto, host=params.host, endpoint=params.endpoint
)
if not TT_RSS_CONNECTION.isLoggedIn(): if not TT_RSS_CONNECTION.isLoggedIn():
TT_RSS_CONNECTION.login(username=params.username, password=params.password) TT_RSS_CONNECTION.login(username=params.username, password=params.password)
return TT_RSS_CONNECTION return TT_RSS_CONNECTION
def get_headlines(params: TtRssConnectionParameter, **kwargs) -> HeadlineList: def get_headlines(params: TtRssConnectionParameter, **kwargs) -> HeadlineList:
conn = _build_connection(params) conn = _build_connection(params)
if "limit" in kwargs: if "limit" in kwargs:
@ -71,9 +75,11 @@ class TtRssUriKind(Enum):
@dataclass @dataclass
class TtRssUri: class TtRssUri:
supported_kinds = '|'.join(re.escape(n.path_name.lower()) for n in TtRssUriKind) supported_kinds = "|".join(re.escape(n.path_name.lower()) for n in TtRssUriKind)
scheme = "tt-rss" scheme = "tt-rss"
path_re = re.compile(fr"^/((?P<all>all)|(?P<kind>{supported_kinds})/(?P<id>-?\d+))/?$") path_re = re.compile(
rf"^/((?P<all>all)|(?P<kind>{supported_kinds})/(?P<id>-?\d+))/?$"
)
kind: TtRssUriKind kind: TtRssUriKind
id: Optional[str] id: Optional[str]
@ -95,9 +101,16 @@ class TtRssUri:
if m is None: if m is None:
raise Exception(f"Could not parse path of tt-rss uri: {parts.path!r}") raise Exception(f"Could not parse path of tt-rss uri: {parts.path!r}")
return TtRssUri( return TtRssUri(
kind = TtRssUriKind.ALL if m.group("all") else TtRssUriKind.from_path_name(m.group("kind")), kind=TtRssUriKind.ALL
if m.group("all")
else TtRssUriKind.from_path_name(m.group("kind")),
id=m.group("id"), id=m.group("id"),
options = {single[0]: single[1] for single in (single.split("=") for single in parts.query.split("&"))} if parts.query else {}, options={
single[0]: single[1]
for single in (single.split("=") for single in parts.query.split("&"))
}
if parts.query
else {},
) )
def request(self, params: TtRssConnectionParameter, **kwargs) -> HeadlineList: def request(self, params: TtRssConnectionParameter, **kwargs) -> HeadlineList:

@ -9,7 +9,11 @@ from jsoncache import ApplicationCache
from ...common import call from ...common import call
cache = ApplicationCache(app_name="entertainment-decider-ytdl", create_cache_dir=True, default_max_age=7*86400) cache = ApplicationCache(
app_name="entertainment-decider-ytdl",
create_cache_dir=True,
default_max_age=7 * 86400,
)
cache.clean_cache() cache.clean_cache()
YTDL_CALL = [ YTDL_CALL = [
@ -20,6 +24,7 @@ YTDL_CALL = [
class YtdlErrorException(subprocess.CalledProcessError): class YtdlErrorException(subprocess.CalledProcessError):
pass pass
def ytdl_call(args: List[str]) -> dict: def ytdl_call(args: List[str]) -> dict:
proc = call(YTDL_CALL + args, check=False) proc = call(YTDL_CALL + args, check=False)
if proc.returncode != 0: if proc.returncode != 0:
@ -31,18 +36,24 @@ def ytdl_call(args: List[str]) -> dict:
) )
return json.loads(proc.stdout.strip()) return json.loads(proc.stdout.strip())
@cache.cache_json() @cache.cache_json()
def get_video_info(uri: str) -> dict: def get_video_info(uri: str) -> dict:
return ytdl_call([ return ytdl_call(
[
"--no-playlist", "--no-playlist",
"--dump-json", "--dump-json",
uri, uri,
]) ]
)
@cache.cache_json() @cache.cache_json()
def get_playlist_info(uri: str) -> dict: def get_playlist_info(uri: str) -> dict:
return ytdl_call([ return ytdl_call(
[
"--yes-playlist", "--yes-playlist",
"--dump-single-json", "--dump-single-json",
uri, uri,
]) ]
)

@ -12,16 +12,21 @@ from .youtube import YouTubeCollectionExtractor
tt_rss_params = TtRssConnectionParameter(**app_config["extractors"]["tt_rss"]) tt_rss_params = TtRssConnectionParameter(**app_config["extractors"]["tt_rss"])
COLLECTION_EXTRACTORS: Dict[str, CollectionExtractor] = { COLLECTION_EXTRACTORS: Dict[str, CollectionExtractor] = {
"tt-rss": TtRssCollectionExtractor(params=tt_rss_params, label_filter=-1033), "tt-rss": TtRssCollectionExtractor(
params=tt_rss_params,
label_filter=-1033,
),
"youtube": YouTubeCollectionExtractor(), "youtube": YouTubeCollectionExtractor(),
} }
def collection_expect_extractor(uri: str) -> CollectionExtractor: def collection_expect_extractor(uri: str) -> CollectionExtractor:
return expect_suitable_extractor( return expect_suitable_extractor(
extractor_list=COLLECTION_EXTRACTORS.values(), extractor_list=COLLECTION_EXTRACTORS.values(),
uri=uri, uri=uri,
) )
def collection_update(collection: MediaCollection, check_cache_expired: bool = True): def collection_update(collection: MediaCollection, check_cache_expired: bool = True):
ex = collection_expect_extractor(collection.uri) ex = collection_expect_extractor(collection.uri)
ex.update_object( ex.update_object(
@ -29,6 +34,7 @@ def collection_update(collection: MediaCollection, check_cache_expired: bool = T
check_cache_expired=check_cache_expired, check_cache_expired=check_cache_expired,
) )
def collection_extract_uri(uri: str) -> MediaCollection: def collection_extract_uri(uri: str) -> MediaCollection:
elem: MediaCollection = CollectionExtractor.check_uri(uri) elem: MediaCollection = CollectionExtractor.check_uri(uri)
ex = collection_expect_extractor(uri) ex = collection_expect_extractor(uri)

@ -18,7 +18,8 @@ class TtRssCollectionExtractor(CollectionExtractor[HeadlineList]):
__label_filter: Optional[int] __label_filter: Optional[int]
__mark_as_read: bool __mark_as_read: bool
def __init__(self, def __init__(
self,
params: TtRssConnectionParameter, params: TtRssConnectionParameter,
mark_as_read: bool = False, mark_as_read: bool = False,
label_filter: Optional[int] = None, label_filter: Optional[int] = None,
@ -53,8 +54,10 @@ class TtRssCollectionExtractor(CollectionExtractor[HeadlineList]):
data = rss_uri.request(self.__params, order_by="feed_dates", view_mode="unread") data = rss_uri.request(self.__params, order_by="feed_dates", view_mode="unread")
if self.__label_filter is not None: if self.__label_filter is not None:
data = [ data = [
headline for headline in data headline
if self.__label_filter in (label_marker[0] for label_marker in headline.labels) for headline in data
if self.__label_filter
in (label_marker[0] for label_marker in headline.labels)
] ]
if self.__mark_as_read: if self.__mark_as_read:
parameters = { parameters = {
@ -62,7 +65,9 @@ class TtRssCollectionExtractor(CollectionExtractor[HeadlineList]):
"field": "2", # unread "field": "2", # unread
"mode": "0", # false "mode": "0", # false
} }
raise NotImplementedError("Cannot set articles as read with tinytinypy for now") # TODO raise NotImplementedError(
"Cannot set articles as read with tinytinypy for now"
) # TODO
return ExtractedData( return ExtractedData(
extractor_name=self.name, extractor_name=self.name,
object_key=uri, object_key=uri,
@ -75,10 +80,7 @@ class TtRssCollectionExtractor(CollectionExtractor[HeadlineList]):
object.title = object.uri object.title = object.uri
logging.debug(f"Got {len(data)} headlines") logging.debug(f"Got {len(data)} headlines")
for headline in data: for headline in data:
self._add_episode( self._add_episode(collection=object, uri=headline.url)
collection = object,
uri = headline.url
)
orm.commit() orm.commit()
if object.watch_in_order_auto: if object.watch_in_order_auto:
object.watch_in_order = False # no order available object.watch_in_order = False # no order available

@ -15,7 +15,9 @@ from .base import CollectionExtractor
class YouTubeCollectionExtractor(CollectionExtractor[Dict]): class YouTubeCollectionExtractor(CollectionExtractor[Dict]):
__uri_regex = re.compile(r"^https?://(www\.)?youtube\.com/(channel/|playlist\?list=)(?P<id>[^/&?]+)") __uri_regex = re.compile(
r"^https?://(www\.)?youtube\.com/(channel/|playlist\?list=)(?P<id>[^/&?]+)"
)
@classmethod @classmethod
def __get_id(cls, uri: str) -> str: def __get_id(cls, uri: str) -> str:
@ -69,7 +71,9 @@ class YouTubeCollectionExtractor(CollectionExtractor[Dict]):
playlist = youtubesearchpython.Playlist(playlist_link) playlist = youtubesearchpython.Playlist(playlist_link)
while playlist.hasMoreVideos: while playlist.hasMoreVideos:
playlist.getNextVideos() playlist.getNextVideos()
logging.debug(f"Retrieved {len(playlist.videos)} videos from playlist {playlist_link!r}") logging.debug(
f"Retrieved {len(playlist.videos)} videos from playlist {playlist_link!r}"
)
return ExtractedData( return ExtractedData(
extractor_name=self.name, extractor_name=self.name,
object_key=playlist_id, object_key=playlist_id,
@ -83,7 +87,11 @@ class YouTubeCollectionExtractor(CollectionExtractor[Dict]):
def _update_object_raw(self, object: MediaCollection, data: Dict): def _update_object_raw(self, object: MediaCollection, data: Dict):
info = data["info"] info = data["info"]
is_channel = self.__is_channel_id(info["id"]) is_channel = self.__is_channel_id(info["id"])
object.title = f"[channel] [{self.name}] {info['channel']['name']}" if is_channel else f"[playlist] {info['channel']['name']}: {info['title']}" object.title = (
f"[channel] [{self.name}] {info['channel']['name']}"
if is_channel
else f"[playlist] {info['channel']['name']}: {info['title']}"
)
object.add_single_uri(info["link"]) object.add_single_uri(info["link"])
video_list = data["videos"] video_list = data["videos"]
if object.watch_in_order_auto: if object.watch_in_order_auto:

@ -61,7 +61,7 @@ class ExtractedDataLight:
return MediaCollection( return MediaCollection(
uri=self.object_uri, uri=self.object_uri,
extractor_name=self.extractor_name, extractor_name=self.extractor_name,
extractor_key = self.object_key extractor_key=self.object_key,
) )
@ -74,10 +74,14 @@ class ExtractedData(ExtractedDataLight, Generic[T]):
return self.data is not None return self.data is not None
def load_media(self) -> Optional[MediaElement]: def load_media(self) -> Optional[MediaElement]:
return MediaElement.get(extractor_name=self.extractor_name, extractor_key=self.object_key) return MediaElement.get(
extractor_name=self.extractor_name, extractor_key=self.object_key
)
def load_collection(self) -> Optional[MediaCollection]: def load_collection(self) -> Optional[MediaCollection]:
return MediaCollection.get(extractor_name=self.extractor_name, extractor_key=self.object_key) return MediaCollection.get(
extractor_name=self.extractor_name, extractor_key=self.object_key
)
@dataclass @dataclass
@ -91,6 +95,7 @@ class AuthorExtractedData(ExtractedDataLight):
E = TypeVar("E", MediaElement, MediaCollection) E = TypeVar("E", MediaElement, MediaCollection)
class GeneralExtractor(Generic[E, T]): class GeneralExtractor(Generic[E, T]):
name: str name: str
@ -136,7 +141,11 @@ class GeneralExtractor(Generic[E, T]):
# defined # defined
def _extract_offline(self, uri: str) -> ExtractedData[T]: def _extract_offline(self, uri: str) -> ExtractedData[T]:
return self._extract_offline_only(uri) if self.can_extract_offline(uri) else self._extract_online(uri) return (
self._extract_offline_only(uri)
if self.can_extract_offline(uri)
else self._extract_online(uri)
)
def _extract_required(self, data: ExtractedData[T]) -> ExtractedData[T]: def _extract_required(self, data: ExtractedData[T]) -> ExtractedData[T]:
if data.has_data: if data.has_data:
@ -151,8 +160,14 @@ class GeneralExtractor(Generic[E, T]):
return object return object
def update_object(self, object: E, check_cache_expired: bool = True) -> E: def update_object(self, object: E, check_cache_expired: bool = True) -> E:
if object.was_extracted and check_cache_expired and not self._cache_expired(object.last_updated): if (
logging.debug(f"Skip info for element as already extracted and cache valid: {object.title!r}") object.was_extracted
and check_cache_expired
and not self._cache_expired(object.last_updated)
):
logging.debug(
f"Skip info for element as already extracted and cache valid: {object.title!r}"
)
return object return object
data = self._extract_online(object.uri) data = self._extract_online(object.uri)
logging.debug(f"Updating info for media: {data!r}") logging.debug(f"Updating info for media: {data!r}")

@ -18,6 +18,7 @@ def search_suitable_extractor(extractor_list: Iterable[T], uri: str) -> Optional
best_bet = extractor best_bet = extractor
return best_bet return best_bet
def expect_suitable_extractor(extractor_list: Iterable[T], uri: str) -> T: def expect_suitable_extractor(extractor_list: Iterable[T], uri: str) -> T:
extractor = search_suitable_extractor(extractor_list, uri) extractor = search_suitable_extractor(extractor_list, uri)
if extractor is None: if extractor is None:

@ -15,12 +15,14 @@ MEDIA_EXTRACTORS: Dict[str, MediaExtractor] = {
"ytdl": YtdlMediaExtractor(), "ytdl": YtdlMediaExtractor(),
} }
def media_expect_extractor(uri: str) -> MediaExtractor: def media_expect_extractor(uri: str) -> MediaExtractor:
return expect_suitable_extractor( return expect_suitable_extractor(
extractor_list=MEDIA_EXTRACTORS.values(), extractor_list=MEDIA_EXTRACTORS.values(),
uri=uri, uri=uri,
) )
def media_update(element: MediaElement, check_cache_expired: bool = True): def media_update(element: MediaElement, check_cache_expired: bool = True):
ex = media_expect_extractor(element.uri) ex = media_expect_extractor(element.uri)
ex.update_object( ex.update_object(
@ -28,6 +30,7 @@ def media_update(element: MediaElement, check_cache_expired: bool = True):
check_cache_expired=check_cache_expired, check_cache_expired=check_cache_expired,
) )
def media_extract_uri(uri: str) -> MediaElement: def media_extract_uri(uri: str) -> MediaElement:
elem: MediaElement = MediaExtractor.check_uri(uri) elem: MediaElement = MediaExtractor.check_uri(uri)
if not elem: if not elem:

@ -28,8 +28,8 @@ class MediaExtractor(GeneralExtractor[MediaElement, T]):
elem: MediaElement = MediaElement.get(uri=uri) elem: MediaElement = MediaElement.get(uri=uri)
if elem: if elem:
logging.warning( logging.warning(
f"Add missing URI mapping entry for uri {uri!r}, " + f"Add missing URI mapping entry for uri {uri!r}, "
"this should not happen at this point and is considered a bug" + "this should not happen at this point and is considered a bug"
) )
elem.add_single_uri(uri) elem.add_single_uri(uri)
return elem return elem
@ -41,13 +41,17 @@ class MediaExtractor(GeneralExtractor[MediaElement, T]):
def _load_object(self, data: ExtractedData[T]) -> MediaElement: def _load_object(self, data: ExtractedData[T]) -> MediaElement:
return data.load_media() return data.load_media()
def __create_author_collection(self, author_data: AuthorExtractedData) -> MediaCollection: def __create_author_collection(
self, author_data: AuthorExtractedData
) -> MediaCollection:
collection = author_data.create_collection() collection = author_data.create_collection()
collection.keep_updated = False collection.keep_updated = False
collection.watch_in_order = False collection.watch_in_order = False
return collection return collection
def __lookup_author_collection(self, author_data: AuthorExtractedData) -> Optional[MediaCollection]: def __lookup_author_collection(
self, author_data: AuthorExtractedData
) -> Optional[MediaCollection]:
return CollectionExtractor.check_uri( return CollectionExtractor.check_uri(
uri=author_data.object_uri, uri=author_data.object_uri,
) or MediaCollection.get( ) or MediaCollection.get(
@ -55,12 +59,16 @@ class MediaExtractor(GeneralExtractor[MediaElement, T]):
extractor_key=author_data.object_key, extractor_key=author_data.object_key,
) )
def __get_author_collection(self, author_data: AuthorExtractedData) -> MediaCollection: def __get_author_collection(
self, author_data: AuthorExtractedData
) -> MediaCollection:
collection = self.__lookup_author_collection(author_data) collection = self.__lookup_author_collection(author_data)
if collection is None: if collection is None:
collection = self.__create_author_collection(author_data) collection = self.__create_author_collection(author_data)
if not collection.title or collection.title.startswith("[author] "): if not collection.title or collection.title.startswith("[author] "):
collection.title = f"[author] [{author_data.extractor_name}] {author_data.author_name}" collection.title = (
f"[author] [{author_data.extractor_name}] {author_data.author_name}"
)
return collection return collection
def __add_to_author_collection(self, element: MediaElement, data: Dict): def __add_to_author_collection(self, element: MediaElement, data: Dict):

@ -7,27 +7,37 @@ from typing import List, Optional, TypedDict
from youtubesearchpython import ResultMode, Video from youtubesearchpython import ResultMode, Video
from ...models import MediaElement from ...models import (
from ..generic import AuthorExtractedData, ExtractedData, SuitableLevel MediaElement,
)
from ..generic import (
AuthorExtractedData,
ExtractedData,
SuitableLevel,
)
from .base import MediaExtractor from .base import MediaExtractor
class YoutubeDuration(TypedDict): class YoutubeDuration(TypedDict):
secondsText: str secondsText: str
class YoutubeViewCount(TypedDict): class YoutubeViewCount(TypedDict):
text: str text: str
class YoutubeThumbnailData(TypedDict): class YoutubeThumbnailData(TypedDict):
url: str url: str
width: int width: int
height: int height: int
class YoutubeChannelData(TypedDict): class YoutubeChannelData(TypedDict):
name: str name: str
id: str id: str
link: str link: str
class YoutubeVideoData(TypedDict): class YoutubeVideoData(TypedDict):
id: str id: str
title: str title: str
@ -48,14 +58,17 @@ class YoutubeVideoData(TypedDict):
class YoutubeMediaExtractor(MediaExtractor[YoutubeVideoData]): class YoutubeMediaExtractor(MediaExtractor[YoutubeVideoData]):
__uri_regex = re.compile(r"""^ __uri_regex = re.compile(
r"""^
https?://( https?://(
(www\.)?youtube\.com/( (www\.)?youtube\.com/(
watch\?v= watch\?v=
)| )|
youtu\.be/ youtu\.be/
)(?P<id>[^/&?]+) )(?P<id>[^/&?]+)
$""", re.VERBOSE) $""",
re.VERBOSE,
)
def __init__(self): def __init__(self):
super().__init__("ytdl") super().__init__("ytdl")
@ -68,7 +81,7 @@ class YoutubeMediaExtractor(MediaExtractor[YoutubeVideoData]):
object_uri=data["channel"]["link"], object_uri=data["channel"]["link"],
extractor_name=self.name, extractor_name=self.name,
object_key=f"author:{data['channel']['id']}", object_key=f"author:{data['channel']['id']}",
author_name = data["channel"]["name"] author_name=data["channel"]["name"],
) )
def _extract_online(self, uri: str) -> ExtractedData[YoutubeVideoData]: def _extract_online(self, uri: str) -> ExtractedData[YoutubeVideoData]:
@ -86,5 +99,7 @@ class YoutubeMediaExtractor(MediaExtractor[YoutubeVideoData]):
def _update_object_raw(self, object: MediaElement, data: YoutubeVideoData): def _update_object_raw(self, object: MediaElement, data: YoutubeVideoData):
object.title = f"{data['title']} - {data['channel']['name']}" object.title = f"{data['title']} - {data['channel']['name']}"
object.release_date = datetime.strptime(data.get("uploadDate") or data["publishDate"], "%Y-%m-%d") object.release_date = datetime.strptime(
data.get("uploadDate") or data["publishDate"], "%Y-%m-%d"
)
object.length = int(data["duration"]["secondsText"]) object.length = int(data["duration"]["secondsText"])

@ -5,14 +5,15 @@ import logging
from typing import Dict, Optional from typing import Dict, Optional
from ...models import MediaElement from ...models import (
MediaElement,
)
from ..all.ytdl import get_video_info, YtdlErrorException from ..all.ytdl import get_video_info, YtdlErrorException
from ..generic import AuthorExtractedData, ExtractedData, ExtractionError, SuitableLevel from ..generic import AuthorExtractedData, ExtractedData, ExtractionError, SuitableLevel
from .base import MediaExtractor from .base import MediaExtractor
class YtdlMediaExtractor(MediaExtractor[Dict]): class YtdlMediaExtractor(MediaExtractor[Dict]):
def __init__(self): def __init__(self):
super().__init__("ytdl") super().__init__("ytdl")
@ -22,12 +23,18 @@ class YtdlMediaExtractor(MediaExtractor[Dict]):
def _get_author_data(self, data: Dict) -> Optional[AuthorExtractedData]: def _get_author_data(self, data: Dict) -> Optional[AuthorExtractedData]:
video_extractor_key = data.get("extractor_key") or data["ie_key"] video_extractor_key = data.get("extractor_key") or data["ie_key"]
author_key = data.get("channel_id") or data.get("uploader_id") author_key = data.get("channel_id") or data.get("uploader_id")
author_name = data.get("channel") or data.get("uploader") or data.get("uploader_id") author_name = (
data.get("channel") or data.get("uploader") or data.get("uploader_id")
)
return AuthorExtractedData( return AuthorExtractedData(
object_uri=data.get("channel_url") or data.get("uploader_url"), object_uri=data.get("channel_url") or data.get("uploader_url"),
extractor_name=self.name, extractor_name=self.name,
object_key = f"author:{video_extractor_key}:{author_key}" if author_key else None, object_key=f"author:{video_extractor_key}:{author_key}"
author_name = f"[{video_extractor_key.lower()}] {author_name}" if author_name else None, if author_key
else None,
author_name=f"[{video_extractor_key.lower()}] {author_name}"
if author_name
else None,
) )
def _extract_online(self, uri: str) -> ExtractedData[Dict]: def _extract_online(self, uri: str) -> ExtractedData[Dict]:
@ -48,6 +55,10 @@ class YtdlMediaExtractor(MediaExtractor[Dict]):
) )
def _update_object_raw(self, object: MediaElement, data: Dict) -> str: def _update_object_raw(self, object: MediaElement, data: Dict) -> str:
object.title = f"{data['title']} - {data['uploader']}" if "uploader" in data else data["title"] object.title = (
f"{data['title']} - {data['uploader']}"
if "uploader" in data
else data["title"]
)
object.release_date = datetime.strptime(data["upload_date"], "%Y%m%d") object.release_date = datetime.strptime(data["upload_date"], "%Y%m%d")
object.length = int(data["duration"]) object.length = int(data["duration"])

Loading…
Cancel
Save