# -------------------------------------------------------------- # CORE LOGIC # -------------------------------------------------------------- def _load_cache() -> List[str] | None: """Return cached titles if file exists and is fresh, else None.""" if not CACHE_FILE.is_file(): return None mtime = CACHE_FILE.stat().st_mtime if time.time() - mtime > CACHE_TTL_SECONDS: return None try: return json.loads(CACHE_FILE.read_text(encoding="utf-8")) except Exception: return None
Returns ------- List[str] Alphabetically sorted, duplicate‑free series titles. """ if not force_refresh: cached = _load_cache() if cached is not None: return cached all ullu web series name
Author: ChatGPT (2024‑06) License: MIT """ """ soup = BeautifulSoup(html, "lxml") nxt = soup
def _next_page_url(html: str) -> str | None: """ Detect the URL of the “next” pagination link. Returns None when we’re on the last page. """ soup = BeautifulSoup(html, "lxml") nxt = soup.select_one("a[rel='next'], li.next > a") if nxt and nxt.get("href"): # Some links are relative – turn them into absolute URLs. return requests.compat.urljoin(BASE_URL, nxt["href"]) return None """ soup = BeautifulSoup(html
def _fetch_page(url: str) -> str: """Download a page, raise for HTTP errors.""" resp = requests.get(url, headers=HEADERS, timeout=15) resp.raise_for_status() return resp.text
while page_url: html = _fetch_page(page_url) titles = _extract_titles(html) all_titles.update(titles) page_url = _next_page_url(html)