Remove extractor_cache and further usages of it

master
Felix Stupp 3 years ago
parent fe183d0411
commit beaf99e539
Signed by: zocker
GPG Key ID: 93E1BD26F6B02FB7

@ -124,21 +124,20 @@ class TtRssCollectionExtractor(CollectionExtractor[HeadlineList]):
def __decode_uri(self, uri: str) -> TtRssUri:
return TtRssUri.from_str_uri(uri)
def can_extract_offline(self, uri: str, cache: Dict = None) -> bool:
def can_extract_offline(self, uri: str) -> bool:
return True
def _cache_expired(self, date: datetime) -> bool:
return (datetime.now() - date) < timedelta(hours=4)
def _extract_offline(self, uri: str, cache: Dict = None) -> ExtractedData[HeadlineList]:
def _extract_offline(self, uri: str) -> ExtractedData[HeadlineList]:
return ExtractedData(
extractor_name=self.name,
object_key=uri,
object_uri=uri,
cache=cache,
)
def _extract_online(self, uri: str, cache: Dict = None) -> ExtractedData[HeadlineList]:
def _extract_online(self, uri: str) -> ExtractedData[HeadlineList]:
rss_uri = self.__decode_uri(uri)
logging.info(f"Extract collection from tt-rss: {uri!r}")
data = rss_uri.request(self.__params, order_by="feed_dates", view_mode="unread")

@ -45,22 +45,21 @@ class YouTubeCollectionExtractor(CollectionExtractor[Dict]):
def __init__(self):
super().__init__("youtube")
def can_extract_offline(self, uri: str, cache: Dict = None) -> bool:
def can_extract_offline(self, uri: str) -> bool:
return True
def _cache_expired(self, date: datetime) -> bool:
return (datetime.now() - date) < timedelta(hours=4)
def _extract_offline(self, uri: str, cache: Dict = None) -> ExtractedData[Dict]:
def _extract_offline(self, uri: str) -> ExtractedData[Dict]:
playlist_id = self.__convert_if_required(self.__get_id(uri))
return ExtractedData(
extractor_name=self.name,
object_key=playlist_id,
object_uri=uri,
cache=cache,
)
def _extract_online(self, uri: str, cache: Dict = None) -> ExtractedData[Dict]:
def _extract_online(self, uri: str) -> ExtractedData[Dict]:
playlist_id = self.__convert_if_required(self.__get_id(uri))
playlist_link = f"https://www.youtube.com/playlist?list={playlist_id}"
logging.info(f"Request Youtube playlist {playlist_link!r}")

@ -40,7 +40,6 @@ class ExtractedDataLight:
@dataclass
class ExtractedData(ExtractedDataLight, Generic[T]):
data: T = dataclasses.field(default=None, repr=False, compare=False)
cache: Dict = dataclasses.field(default=None, repr=False, compare=False)
@property
def has_data(self) -> bool:
@ -88,16 +87,16 @@ class GeneralExtractor(Generic[E, T]):
#def uri_suitable(self, uri: str) -> bool:
# raise NotImplementedError()
def can_extract_offline(self, uri: str, cache: Dict = None) -> bool:
def can_extract_offline(self, uri: str) -> bool:
return False
def _cache_expired(self, date: datetime) -> bool:
return False
def _extract_offline_only(self, uri: str, cache: Dict = None) -> ExtractedData[T]:
def _extract_offline_only(self, uri: str) -> ExtractedData[T]:
raise NotImplementedError()
def _extract_online(self, uri: str, cache: Dict = None) -> ExtractedData[T]:
def _extract_online(self, uri: str) -> ExtractedData[T]:
raise NotImplementedError()
def _update_object_raw(self, object: E, data: T):
@ -108,16 +107,15 @@ class GeneralExtractor(Generic[E, T]):
# defined
def _extract_offline(self, uri: str, cache: Dict = None) -> ExtractedData[T]:
return self._extract_offline_only(uri, cache) if self.can_extract_offline(uri, cache) else self._extract_online(uri, cache)
def _extract_offline(self, uri: str) -> ExtractedData[T]:
return self._extract_offline_only(uri) if self.can_extract_offline(uri) else self._extract_online(uri)
def _extract_required(self, data: ExtractedData[T]) -> ExtractedData[T]:
if data.has_data:
return data
return self._extract_online(data.object_uri, data.cache)
return self._extract_online(data.object_uri)
def _update_object(self, object: E, data: ExtractedData[T]) -> E:
object.extractor_cache = data.cache
object.uri = data.object_uri
object.add_uris((data.object_uri,))
self._update_object_raw(object, data.data)
@ -127,7 +125,7 @@ class GeneralExtractor(Generic[E, T]):
def update_object(self, object: E, check_cache_expired: bool = True) -> E:
if object.last_updated and check_cache_expired and not self._cache_expired(object.last_updated):
return object
data = self._extract_online(object.uri, object.extractor_cache)
data = self._extract_online(object.uri)
logging.debug(f"Updating info for media: {data!r}")
return self._update_object(object, data)

@ -65,16 +65,12 @@ class YtdlMediaExtractor(MediaExtractor[Dict]):
author_name = f"{video_extractor_key}: {author_name}" if author_name else None,
)
def _extract_online(self, uri: str, cache: Dict) -> ExtractedData[Dict]:
if cache:
logging.debug(f"Use preloaded cache to get infos of video {uri!r}")
vid_data = cache
else:
logging.info(f"Request info using youtube-dl for {uri!r}")
try:
vid_data = get_video_info(uri)
except YtdlErrorException as e:
raise ExtractionError from e
def _extract_online(self, uri: str) -> ExtractedData[Dict]:
logging.info(f"Request info using youtube-dl for {uri!r}")
try:
vid_data = get_video_info(uri)
except YtdlErrorException as e:
raise ExtractionError from e
if vid_data.get("is_live", False):
raise ExtractionError("Video is live, so pass extraction")
ytdl_extractor_key = vid_data.get("extractor_key") or vid_data["ie_key"]
@ -84,7 +80,6 @@ class YtdlMediaExtractor(MediaExtractor[Dict]):
extractor_name=self.name,
object_key=f"{ytdl_extractor_key}:{ytdl_video_id}",
data=vid_data,
cache=None,
)
def _update_object_raw(self, object: MediaElement, data: Dict) -> str:

@ -118,7 +118,6 @@ class MediaElement(db.Entity):
extractor_name: str = orm.Required(str)
extractor_key: str = orm.Required(str)
orm.composite_key(extractor_name, extractor_key)
_extractor_cache: Dict = orm.Optional(orm.Json, nullable=True)
last_updated: datetime = orm.Optional(datetime)
watched: bool = orm.Required(bool, default=False)
@ -130,13 +129,6 @@ class MediaElement(db.Entity):
_uris: Iterable[MediaUriMapping] = orm.Set(lambda: MediaUriMapping)
collection_links: Iterable[MediaCollectionLink] = orm.Set(lambda: MediaCollectionLink)
def __get_cache(self):
return self._extractor_cache
def __set_cache(self, cache: Dict):
self._extractor_cache = cache
self.last_updated = datetime.now()
extractor_cache = property(__get_cache, __set_cache)
@property
def left_length(self) -> int:
return self.length - self.progress
@ -227,7 +219,6 @@ class MediaCollection(db.Entity):
extractor_name: str = orm.Required(str)
extractor_key: str = orm.Required(str)
orm.composite_key(extractor_name, extractor_key)
_extractor_cache: Dict = orm.Optional(orm.Json, nullable=True)
last_updated: datetime = orm.Optional(datetime)
keep_updated: bool = orm.Required(bool, default=False)
@ -240,13 +231,6 @@ class MediaCollection(db.Entity):
_uris: Iterable[CollectionUriMapping] = orm.Set(lambda: CollectionUriMapping)
media_links: Iterable[MediaCollectionLink] = orm.Set(MediaCollectionLink)
def __get_cache(self):
return self._extractor_cache
def __set_cache(self, cache: Dict):
self._extractor_cache = cache
self.last_updated = datetime.now()
extractor_cache = property(__get_cache, __set_cache)
@property
def next_episode(self) -> Optional[MediaCollectionLink]:
#return orm \

Loading…
Cancel
Save