|
1 | | -"""Australia OAIC collector — scrapes OAIC privacy decisions.""" |
| 1 | +"""Australia OAIC collector — scrapes privacy determinations from oaic.gov.au. |
| 2 | +
|
| 3 | +Scrapes all pages of determinations, extracting case metadata and summary text |
| 4 | +directly from the OAIC listing. Each case is stored as an HTML document containing |
| 5 | +the structured summary (title, date, legislative provision, determination, catchwords). |
| 6 | +The AustLII URL is preserved as source_page_url for linking. |
| 7 | +""" |
2 | 8 |
|
3 | 9 | from __future__ import annotations |
4 | 10 |
|
5 | 11 | import logging |
| 12 | +import time |
6 | 13 |
|
7 | 14 | import requests |
8 | 15 | from bs4 import BeautifulSoup |
9 | | -from urllib.parse import urljoin |
10 | 16 |
|
11 | 17 | from pipeline.collectors.base import BaseCollector |
12 | 18 | from pipeline.config import HTTP_TIMEOUT |
13 | 19 | from pipeline.models import DiscoveredDoc |
14 | 20 |
|
15 | 21 | log = logging.getLogger(__name__) |
16 | 22 |
|
17 | | -_BASE_URL = "https://www.oaic.gov.au/privacy/privacy-decisions" |
| 23 | +_BASE_URL = "https://www.oaic.gov.au/privacy/privacy-assessments-and-decisions/privacy-decisions/privacy-determinations" |
18 | 24 |
|
19 | 25 |
|
20 | 26 | class OAICCollector(BaseCollector): |
21 | | - """Discover OAIC privacy decision documents.""" |
| 27 | + """Discover OAIC privacy determinations by scraping the listing pages.""" |
22 | 28 |
|
23 | 29 | def discover(self) -> list[DiscoveredDoc]: |
24 | 30 | docs: list[DiscoveredDoc] = [] |
25 | | - |
26 | | - try: |
27 | | - resp = requests.get(_BASE_URL, headers=self.get_headers(), timeout=HTTP_TIMEOUT) |
28 | | - resp.raise_for_status() |
29 | | - except requests.RequestException as e: |
30 | | - log.error(f"OAIC fetch failed: {e}") |
31 | | - return [] |
32 | | - |
33 | | - soup = BeautifulSoup(resp.text, "lxml") |
34 | | - |
35 | | - for link in soup.select("a[href*='/privacy-decisions/'], a[href*='/privacy/determinations/']"): |
36 | | - href = link.get("href", "") |
37 | | - title = link.get_text(strip=True) |
38 | | - if not href or not title or len(title) < 5: |
39 | | - continue |
40 | | - |
41 | | - case_page_url = urljoin("https://www.oaic.gov.au", href) |
42 | | - |
43 | | - # Check for PDF link vs HTML decision |
44 | | - if href.endswith(".pdf"): |
45 | | - doc_url = case_page_url |
46 | | - file_type = "pdf" |
47 | | - else: |
48 | | - doc_url = case_page_url |
49 | | - file_type = "html" |
50 | | - |
51 | | - docs.append(DiscoveredDoc( |
52 | | - case_title=title, |
53 | | - source_page_url=case_page_url, |
54 | | - document_url=doc_url, |
55 | | - file_type=file_type, |
56 | | - )) |
| 31 | + seen_urls: set[str] = set() |
| 32 | + page = 1 |
| 33 | + |
| 34 | + # First page has no param; subsequent pages use result_26111_result_page=N |
| 35 | + next_url: str | None = _BASE_URL |
| 36 | + |
| 37 | + while next_url and page <= 20: # safety limit |
| 38 | + try: |
| 39 | + resp = requests.get(next_url, headers=self.get_headers(), timeout=HTTP_TIMEOUT) |
| 40 | + resp.raise_for_status() |
| 41 | + except requests.RequestException as e: |
| 42 | + log.warning(f"OAIC page {page} fetch failed: {e}") |
| 43 | + break |
| 44 | + |
| 45 | + soup = BeautifulSoup(resp.text, "lxml") |
| 46 | + items = soup.select("article.custom-listing__item") |
| 47 | + |
| 48 | + if not items: |
| 49 | + break |
| 50 | + |
| 51 | + for item in items: |
| 52 | + cells = item.select(".custom-listing__cell") |
| 53 | + if len(cells) < 6: |
| 54 | + continue |
| 55 | + |
| 56 | + # Extract structured data from cells |
| 57 | + title_text = cells[0].get_text(strip=True).removeprefix("Decision").strip() |
| 58 | + date_text = cells[1].get_text(strip=True).removeprefix("Decision year").strip() |
| 59 | + status_text = cells[2].get_text(strip=True).removeprefix("Status").strip() |
| 60 | + provision_text = cells[3].get_text(strip=True).removeprefix("Legislative provision").strip() |
| 61 | + determination_text = cells[4].get_text(strip=True).removeprefix("Determination").strip() |
| 62 | + catchword_text = cells[5].get_text(strip=True).removeprefix("Catchword summary").strip() |
| 63 | + |
| 64 | + # Get AustLII link |
| 65 | + austlii_link = item.select_one("a[href*='austlii']") |
| 66 | + austlii_url = austlii_link["href"].split("#")[0] if austlii_link else "" |
| 67 | + |
| 68 | + # Build a synthetic document containing all the summary text |
| 69 | + # This will be stored as the "document" for text extraction |
| 70 | + doc_content = f"""OAIC Privacy Determination |
| 71 | +Title: {title_text} |
| 72 | +Date: {date_text} |
| 73 | +Status: {status_text} |
| 74 | +Legislative Provision: {provision_text} |
| 75 | +Determination: {determination_text} |
| 76 | +Catchword Summary: {catchword_text} |
| 77 | +Source: {austlii_url}""" |
| 78 | + |
| 79 | + # Use a stable URL as document_url for dedup |
| 80 | + # Use the OAIC page URL + case identifier |
| 81 | + doc_url = austlii_url or f"{_BASE_URL}#{title_text[:50]}" |
| 82 | + |
| 83 | + if doc_url in seen_urls: |
| 84 | + continue |
| 85 | + seen_urls.add(doc_url) |
| 86 | + |
| 87 | + docs.append(DiscoveredDoc( |
| 88 | + case_title=title_text, |
| 89 | + source_page_url=austlii_url, |
| 90 | + document_url=doc_url, |
| 91 | + file_type="text", |
| 92 | + # Store the pre-built summary as metadata for the downloader |
| 93 | + _oaic_summary=doc_content, |
| 94 | + )) |
| 95 | + |
| 96 | + log.info(f"OAIC page {page}: found {len(items)} cases (total: {len(docs)})") |
| 97 | + |
| 98 | + # Find next page link |
| 99 | + next_link = None |
| 100 | + pag_links = soup.select("a.search-results__pagination-navlinks") |
| 101 | + for pl in pag_links: |
| 102 | + href = pl.get("href", "") |
| 103 | + text = pl.get_text(strip=True) |
| 104 | + if text == str(page + 1) and href: |
| 105 | + next_link = href |
| 106 | + break |
| 107 | + |
| 108 | + next_url = next_link |
| 109 | + page += 1 |
| 110 | + time.sleep(0.5) # Be polite |
57 | 111 |
|
58 | 112 | log.info(f"OAIC discovery complete: {len(docs)} documents") |
59 | 113 | return docs |
0 commit comments