diff --git a/ScraperEngine/main.py b/ScraperEngine/main.py index 70e64db..43ce228 100644 --- a/ScraperEngine/main.py +++ b/ScraperEngine/main.py @@ -1,5 +1,6 @@ import falcon import falcon.asgi +from resources.animegg import AnimeggResource from resources.animeworld import AnimeworldResource from resources.dreamsub import DreamsubResource @@ -15,5 +16,6 @@ AnimeworldResource(app) GogoanimeResource(app) DesuonlineResource(app) +AnimeggResource(app) -AniplaylistResource(app) \ No newline at end of file +AniplaylistResource(app) diff --git a/ScraperEngine/resources/animegg.py b/ScraperEngine/resources/animegg.py new file mode 100644 index 0000000..d8197e3 --- /dev/null +++ b/ScraperEngine/resources/animegg.py @@ -0,0 +1,63 @@ +from operator import is_ +import falcon +import aiohttp +from utils.session import execute_proxied_request + +from falcon import uri +from typing import List +from bs4 import BeautifulSoup +from interfaces.resource import ScraperResource +from models.episode import Episode +from models.matching import Matching + + +class AnimeggResource(ScraperResource): + + def __init__(self, app: falcon.App) -> None: + # On this line, use the name you used inside MongoDB's websites collection + super().__init__(app, "animegg") + + async def get_possible_matchings(self, res: falcon.Response, title: str) -> List[Matching]: + matchings = [] + url = f"{self.base_url}/search/?q={uri.encode(title)}" + try: + page = await execute_proxied_request(self, url) + # Search results class is "mse" + results = page.find_all(class_="mse") + for result in results: + url = result.get("href") + title = result.select_one(".searchre > .media-body > .first > h2").text + matchings.append(Matching(title, url)) + except Exception as e: + print(str(e)) + raise + return matchings + + async def get_episode(self, res: falcon.Response, path: str, number: int) -> List[Episode]: + episodes = [] + series_name = uri.encode(path.split("/")[-1].replace(" ", "-").replace(",", "")) + video_url = f"{self.base_url}/{series_name}-episode-{number}" + try: + page = await execute_proxied_request(self, video_url) + links = page.find("ul", id="videos").find_next("li").find_all("a") + for video in links: + embed_id = video["data-id"] + + is_dub = video["data-version"] == "dubbed" + vid_type = 'subbed' + if is_dub: + vid_type = "dubbed" + + quality_text = video.select_one("span.btn-hd").text + quality = 1080 if quality_text == "HD" else 480 + embed_url = f"{self.base_url}/embed/{embed_id}" + page = await execute_proxied_request(self, embed_url) + video_path = page.select_one("meta[property='og:video']")["content"] + video_url = f"{self.base_url}{video_path}" + episodes.append(Episode(f"Episode {number}", f"{embed_url}#{vid_type}", video_url, quality, "mp4")) + + except Exception as e: + print(str(e)) + raise + + return episodes \ No newline at end of file diff --git a/ScraperEngine/resources/desuonline.py b/ScraperEngine/resources/desuonline.py index ff935b7..d9ad354 100644 --- a/ScraperEngine/resources/desuonline.py +++ b/ScraperEngine/resources/desuonline.py @@ -94,8 +94,7 @@ async def get_episode(self, res: falcon.Response, path: str, number: int) -> Lis episodes = [] url = f"{self.base_url}{path}-odcinek-{number}" - print(url) - + try: # This here works, but theres a faster method # But in case there are any bugs with current approach u you can use this diff --git a/SyncService/Models/Websites/AnimeggWebsite.cs b/SyncService/Models/Websites/AnimeggWebsite.cs new file mode 100644 index 0000000..0521a0f --- /dev/null +++ b/SyncService/Models/Websites/AnimeggWebsite.cs @@ -0,0 +1,32 @@ +using Commons; +using System; +using System.Collections.Generic; +using System.Linq; +using System.Text; +using System.Threading.Tasks; + +namespace SyncService.Models.Websites +{ + public class AnimeggWebsite : IWebsite + { + public AnimeggWebsite(Website website) : base(website) + { + } + + public override bool AnalyzeMatching(Anime anime, AnimeMatching matching, string sourceTitle) + { + if(matching.EpisodePath.Contains("#dubbed")) { + matching.isDub = true; + } + return base.AnalyzeMatching(anime, matching, sourceTitle); + } + + public override Dictionary GetVideoProxyHeaders(AnimeMatching matching, Dictionary values = null) + { + return new Dictionary + { + { "referer", matching.EpisodePath.Replace("#dubbed", "").Replace("#subbed", "") } + }; + } + } +} diff --git a/SyncService/Services/WebsiteScraperService.cs b/SyncService/Services/WebsiteScraperService.cs index b455b1e..2e4f129 100644 --- a/SyncService/Services/WebsiteScraperService.cs +++ b/SyncService/Services/WebsiteScraperService.cs @@ -84,6 +84,9 @@ public override async Task Start(CancellationToken cancellationToken) case "gogoanime": iWeb = new GogoanimeWebsite(website); break; + case "animegg": + iWeb = new AnimeggWebsite(website); + break; case "desuonline": iWeb = new DesuonlineWebsite(website); break; diff --git a/WebAPI/Resources/localizations.1_1_5.json b/WebAPI/Resources/localizations.1_1_5.json index c2717bd..7bbfd71 100644 --- a/WebAPI/Resources/localizations.1_1_5.json +++ b/WebAPI/Resources/localizations.1_1_5.json @@ -1,16 +1,16 @@ { - "localizations": [ - { - "i18n": "en", - "label": "English" - }, - { - "i18n": "it", - "label": "Italian" - }, - { - "i18n": "pl", - "label": "Polish" - } - ] -} \ No newline at end of file + "localizations": [ + { + "i18n": "en", + "label": "English" + }, + { + "i18n": "it", + "label": "Italian" + }, + { + "i18n": "pl", + "label": "Polish" + } + ] +} \ No newline at end of file diff --git a/WebAPI/WebAPI.csproj b/WebAPI/WebAPI.csproj index bdba398..7d4dd05 100644 --- a/WebAPI/WebAPI.csproj +++ b/WebAPI/WebAPI.csproj @@ -25,8 +25,8 @@ - +