Skip to content

Commit 05d1d07

Browse files
committed
de-aniworld provider
1 parent 82f8387 commit 05d1d07

File tree

10 files changed

+228
-347
lines changed

10 files changed

+228
-347
lines changed

weeb_cli/commands/search/watch_flow.py

Lines changed: 2 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ def _handle_single_season_watch(slug, details, episodes, season=1):
9898
if selected_ep is None:
9999
return
100100

101-
success = _play_episode(slug, selected_ep, details, season, episodes)
101+
success = _play_episode(slug, selected_ep, details, season, episodes, completed_ids)
102102

103103
if success:
104104
ep_num = selected_ep.get("number") or selected_ep.get("ep_num")
@@ -148,7 +148,7 @@ def _build_episode_choices(episodes, season, completed_ids, next_ep_num):
148148

149149
return ep_choices
150150

151-
def _play_episode(slug, selected_ep, details, season, episodes):
151+
def _play_episode(slug, selected_ep, details, season, episodes, completed_ids):
152152
ep_id = selected_ep.get("id")
153153
ep_num = selected_ep.get("number") or selected_ep.get("ep_num")
154154

@@ -184,8 +184,6 @@ def _play_episode(slug, selected_ep, details, season, episodes):
184184
time.sleep(1.5)
185185
return False
186186

187-
if len(valid_streams) < len(streams_list):
188-
console.print(f"[dim]{len(valid_streams)}/{len(streams_list)} {i18n.t('details.streams_valid')}[/dim]")
189187
streams_list = sort_streams(valid_streams)
190188

191189
selected_stream = _select_stream(streams_list)

weeb_cli/locales/de.json

Lines changed: 9 additions & 341 deletions
Large diffs are not rendered by default.

weeb_cli/locales/en.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@
226226
"results": "Search Results",
227227
"cancel": "Cancel",
228228
"error": "Search failed.",
229-
"recent": "Recent searches"
229+
"recent": "Recent Searches"
230230
},
231231
"details": {
232232
"error_slug": "Error: Invalid anime ID.",

weeb_cli/locales/tr.json

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -226,7 +226,7 @@
226226
"results": "Arama Sonuçları",
227227
"cancel": "İptal",
228228
"error": "Arama başarısız.",
229-
"recent": "Son aramalar"
229+
"recent": "Son Aramalar"
230230
},
231231
"details": {
232232
"error_slug": "Hata: Geçersiz anime ID.",

weeb_cli/providers/de/aniworld.py

Lines changed: 119 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,119 @@
1+
import json, re, requests
2+
from typing import List, Optional
3+
from bs4 import BeautifulSoup
4+
from urllib.parse import urljoin
5+
from weeb_cli.providers.base import BaseProvider, AnimeResult, AnimeDetails, Episode, StreamLink
6+
from weeb_cli.providers.registry import register_provider
7+
from weeb_cli.services.logger import debug
8+
9+
BASE_URL = "https://aniworld.to"
10+
AJAX_URL = "https://aniworld.to/ajax/search"
11+
STREAM_BASE = "https://aniworld.to/anime/stream/"
12+
13+
@register_provider(name="aniworld", lang="de", region="DE")
14+
class AniWorldProvider(BaseProvider):
15+
def __init__(self):
16+
super().__init__()
17+
self.session = requests.Session()
18+
self.headers = {
19+
"User-Agent": "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/120.0.0.0 Safari/537.36",
20+
"Referer": "https://aniworld.to/"
21+
}
22+
23+
def _get(self, url):
24+
try:
25+
resp = self.session.get(url, headers=self.headers, timeout=10)
26+
resp.raise_for_status()
27+
return resp.text
28+
except Exception as e:
29+
debug(f"[AniWorld] GET Error: {e}"); return ""
30+
31+
def _post(self, url, data):
32+
try:
33+
h = self.headers.copy(); h["X-Requested-With"] = "XMLHttpRequest"
34+
resp = self.session.post(url, data=data, headers=h, timeout=10)
35+
return resp.text
36+
except Exception as e:
37+
debug(f"[AniWorld] POST Error: {e}"); return ""
38+
39+
def search(self, query: str) -> List[AnimeResult]:
40+
res = self._post(AJAX_URL, {"keyword": query})
41+
if not res: return []
42+
try:
43+
data = json.loads(res)
44+
results = []
45+
for item in data:
46+
link = item.get("link", "")
47+
if link.startswith("/anime/stream/"):
48+
slug = link.replace("/anime/stream/", "").split("/")[0]
49+
title = item.get("title", "").replace("<em>", "").replace("</em>", "")
50+
results.append(AnimeResult(id=slug, title=title, type="series"))
51+
return results
52+
except: return []
53+
54+
def get_details(self, anime_id: str) -> Optional[AnimeDetails]:
55+
slug = anime_id.split("/")[0]
56+
html = self._get(urljoin(STREAM_BASE, slug))
57+
if not html: return None
58+
soup = BeautifulSoup(html, "html.parser")
59+
title = soup.find("h1", itemprop="name").text.strip() if soup.find("h1", itemprop="name") else slug
60+
season_matches = re.findall(r"staffel-(\d+)", html)
61+
unique_seasons = sorted(list(set(int(s) for s in season_matches)))
62+
if not unique_seasons: unique_seasons = [1]
63+
all_episodes = []
64+
for s_num in unique_seasons:
65+
s_html = self._get(f"{STREAM_BASE}{slug}/staffel-{s_num}")
66+
ep_matches = re.findall(f"staffel-{s_num}/episode-(\\d+)", s_html)
67+
unique_eps = sorted(list(set(int(m) for m in ep_matches)))
68+
for e_num in unique_eps:
69+
all_episodes.append(Episode(
70+
id=f"{slug}/staffel-{s_num}/episode-{e_num}",
71+
number=e_num, title=f"Folge {e_num}" if s_num > 0 else f"Film {e_num}",
72+
season=s_num
73+
))
74+
return AnimeDetails(id=slug, title=title, description="", cover=None, total_episodes=len(all_episodes), episodes=all_episodes)
75+
76+
def get_episodes(self, anime_id: str, season: int = 1) -> List[Episode]:
77+
slug = anime_id.split("/")[0]
78+
html = self._get(f"{STREAM_BASE}{slug}/staffel-{season}")
79+
if not html: return []
80+
ep_matches = re.findall(f"staffel-{season}/episode-(\\d+)", html)
81+
unique_eps = sorted(list(set(int(m) for m in ep_matches)))
82+
return [Episode(id=f"{slug}/staffel-{season}/episode-{num}", number=num, title=f"Folge {num}" if season > 0 else f"Film {num}", season=season) for num in unique_eps]
83+
84+
def get_streams(self, anime_id: str, episode_id: str) -> List[StreamLink]:
85+
from weeb_cli.providers.extractors.voe import extract_voe
86+
from weeb_cli.providers.extractors.filemoon import extract_filemoon
87+
from weeb_cli.providers.extractors.streamtape import extract_streamtape
88+
from weeb_cli.providers.extractors.vidoza import extract_vidoza
89+
from weeb_cli.providers.extractors.doodstream import extract_doodstream
90+
html = self._get(urljoin(STREAM_BASE, episode_id))
91+
if not html: return []
92+
soup = BeautifulSoup(html, "html.parser")
93+
lang_map = {"1": "GerDub", "2": "GerSub", "3": "EngSub"}
94+
streams = []
95+
hoster_items = soup.find_all("li", attrs={"data-link-target": True})
96+
for item in hoster_items:
97+
target = item["data-link-target"]
98+
if not target.startswith("/redirect/"): continue
99+
l_key = item.get("data-lang-key", "0")
100+
lang_name = lang_map.get(l_key, "N/A")
101+
h_name = "Unknown"
102+
icon = item.find("i", class_="icon")
103+
if icon:
104+
for cls in icon.get("class", []):
105+
if cls != "icon": h_name = cls; break
106+
if h_name == "Unknown" and item.find("h4"): h_name = item.find("h4").text.strip()
107+
try:
108+
resp = self.session.get(urljoin(BASE_URL, target), headers=self.headers, timeout=10, allow_redirects=True)
109+
e_url = resp.url
110+
v_url = None; h_low = h_name.lower()
111+
if "voe" in h_low: v_url = extract_voe(e_url)
112+
elif "filemoon" in h_low: v_url = extract_filemoon(e_url)
113+
elif "streamtape" in h_low: v_url = extract_streamtape(e_url)
114+
elif "vidoza" in h_low: v_url = extract_vidoza(e_url)
115+
elif "dood" in h_low: v_url = extract_doodstream(e_url)
116+
f_url = v_url or e_url
117+
if f_url: streams.append(StreamLink(url=f_url, quality=f"[{lang_name}]", server=h_name))
118+
except: continue
119+
return streams
Lines changed: 12 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,12 @@
1+
import re, requests, time, random, string
2+
def extract_doodstream(url):
3+
try:
4+
h = {"User-Agent": "Mozilla/5.0", "Referer": "https://aniworld.to/"}
5+
html = requests.get(url, headers=h, timeout=10).text
6+
m = re.search(r"\$.get\('(/pass_md5/[^']+)'", html)
7+
if not m: return None
8+
purl = f"https://dood.so{m.group(1)}"
9+
base = requests.get(purl, headers={"Referer": url, "User-Agent": h["User-Agent"]}, timeout=10).text
10+
tok = ''.join(random.choice(string.ascii_letters + string.digits) for _ in range(10))
11+
return f"{base}{tok}?token={purl.split('/')[-1]}&expiry={int(time.time() * 1000)}"
12+
except: return None
Lines changed: 31 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,31 @@
1+
import re, requests
2+
from weeb_cli.services.logger import debug
3+
4+
def decode_packer(p, r, c, d):
5+
def replace(m):
6+
try:
7+
idx = int(m.group(0), r)
8+
return d[idx] if idx < len(d) and d[idx] else m.group(0)
9+
except: return m.group(0)
10+
return re.compile(r'\b[0-9a-zA-Z]+\b').sub(replace, p)
11+
12+
def extract_filemoon(url):
13+
h = {"User-Agent": "Mozilla/5.0", "Referer": "https://aniworld.to/"}
14+
try:
15+
s = requests.Session()
16+
html = s.get(url, headers=h, timeout=10).text
17+
ifm = re.search(r'<iframe[^>]*src="([^"]+)"', html)
18+
if not ifm: return None
19+
iurl = ifm.group(1)
20+
if iurl.startswith('//'): iurl = 'https:' + iurl
21+
ihtml = s.get(iurl, headers={"User-Agent": h["User-Agent"], "Referer": "https://filemoon.to/"}, timeout=10).text
22+
ev = re.search(r"eval\(function\(p,a,c,k,e,d\){[\s\S]+?}\('([\s\S]+?)',(\d+),(\d+),'([\s\S]+?)'\.split\('\|'\)\)\)", ihtml)
23+
if not ev: return None
24+
p, r, c, d = ev.groups()
25+
un = decode_packer(p, int(r), int(c), d.split('|'))
26+
m3 = re.search(r'file:"([^"]+\.m3u8[^"]*)"', un)
27+
if m3: return m3.group(1)
28+
sm = re.search(r'sources:\[{file:"([^"]+)"', un)
29+
return sm.group(1) if sm else None
30+
except Exception as e:
31+
debug(f"[Filemoon] Error: {e}"); return None
Lines changed: 8 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,8 @@
1+
import re, requests
2+
def extract_streamtape(url):
3+
try:
4+
h = {"User-Agent": "Mozilla/5.0", "Referer": "https://aniworld.to/"}
5+
html = requests.get(url, headers=h, timeout=10).text
6+
m = re.search(r"document\.getElementById\('robotlink'\)\.innerHTML\s*=\s*'([^']+)'\s*\+\s*'([^']+)'", html)
7+
return f"https:{m.group(1)}{m.group(2)}" if m else None
8+
except: return None
Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,7 @@
1+
import re, requests
2+
def extract_vidoza(url):
3+
try:
4+
html = requests.get(url, headers={"User-Agent": "Mozilla/5.0"}, timeout=10).text
5+
m = re.search(r'<source\s+src="([^"]+\.mp4)"', html)
6+
return m.group(1) if m else None
7+
except: return None
Lines changed: 38 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,38 @@
1+
import base64, json, re, requests
2+
from typing import Optional
3+
from weeb_cli.services.logger import debug
4+
5+
def rot13(s):
6+
res = ""
7+
for c in s:
8+
if 'a' <= c <= 'z': res += chr((ord(c) - ord('a') + 13) % 26 + ord('a'))
9+
elif 'A' <= c <= 'Z': res += chr((ord(c) - ord('A') + 13) % 26 + ord('A'))
10+
else: res += c
11+
return res
12+
13+
def caesar_shift(s, shift): return "".join(chr(ord(c) + shift) for c in s)
14+
15+
def deobfuscate(encoded):
16+
res = rot13(encoded)
17+
for sep in ['@$', '^^', '~@', '%?', '*~', '!!', '#&']: res = res.replace(sep, '')
18+
res = base64.b64decode(res).decode('utf-8')
19+
res = caesar_shift(res, -3)[::-1]
20+
return json.loads(base64.b64decode(res).decode('utf-8'))
21+
22+
def extract_voe(url):
23+
headers = {"User-Agent": "Mozilla/5.0", "Referer": "https://aniworld.to/"}
24+
try:
25+
s = requests.Session()
26+
html = s.get(url, headers=headers, timeout=10, allow_redirects=True).text
27+
m = re.search(r"window\.location\.href\s*=\s*'([^']+)'", html)
28+
if m: html = s.get(m.group(1), headers=headers, timeout=10).text
29+
js = re.search(r'<script\s+type="application/json"[^>]*>([\s\S]*?)</script>', html)
30+
if not js: return None
31+
data = deobfuscate(js.group(1).strip())
32+
val = data.get("file") or data.get("source")
33+
if not val and data.get("fallback_mp4"):
34+
f = data.get("fallback_mp4")
35+
val = f[0] if isinstance(f, list) else f
36+
return val
37+
except Exception as e:
38+
debug(f"[VOE] Error: {e}"); return None

0 commit comments

Comments
 (0)