|
| 1 | +#!/usr/bin/env python3 |
| 2 | +""" |
| 3 | +Indian Railway Station Names from Wikipedia |
| 4 | +
|
| 5 | +Scrapes railway station names from Wikipedia's list and attempts to fetch |
| 6 | +multilingual names from corresponding Wikipedia articles in regional languages. |
| 7 | +
|
| 8 | +This provides real, validated station names from public sources. |
| 9 | +
|
| 10 | +Usage: |
| 11 | + python scrape_wikipedia_stations.py |
| 12 | + python scrape_wikipedia_stations.py --limit 50 --output stations.tsv |
| 13 | +""" |
| 14 | + |
| 15 | +import argparse |
| 16 | +import csv |
| 17 | +import re |
| 18 | +import time |
| 19 | +from pathlib import Path |
| 20 | +from typing import List, Dict, Optional |
| 21 | +import requests |
| 22 | +from bs4 import BeautifulSoup |
| 23 | + |
| 24 | +HEADERS = { |
| 25 | + 'User-Agent': 'Indicate-Research-Bot/1.0 (https://github.com/in-rolls/indicate; research purposes)' |
| 26 | +} |
| 27 | + |
| 28 | +# Language Wikipedia prefixes |
| 29 | +LANG_WIKIS = { |
| 30 | + 'hi': 'Hindi', |
| 31 | + 'bn': 'Bengali', |
| 32 | + 'ta': 'Tamil', |
| 33 | + 'te': 'Telugu', |
| 34 | + 'kn': 'Kannada', |
| 35 | + 'ml': 'Malayalam', |
| 36 | + 'mr': 'Marathi', |
| 37 | + 'gu': 'Gujarati', |
| 38 | + 'pa': 'Punjabi', |
| 39 | + 'or': 'Odia', |
| 40 | + 'ur': 'Urdu' |
| 41 | +} |
| 42 | + |
| 43 | + |
| 44 | +def scrape_station_list() -> List[Dict]: |
| 45 | + """Scrape list of stations from English Wikipedia.""" |
| 46 | + url = 'https://en.wikipedia.org/wiki/List_of_railway_stations_in_India' |
| 47 | + |
| 48 | + print(f"Fetching station list from Wikipedia...") |
| 49 | + response = requests.get(url, headers=HEADERS, timeout=30) |
| 50 | + |
| 51 | + if response.status_code != 200: |
| 52 | + print(f"Error: HTTP {response.status_code}") |
| 53 | + return [] |
| 54 | + |
| 55 | + soup = BeautifulSoup(response.content, 'html.parser') |
| 56 | + tables = soup.find_all('table', class_='wikitable') |
| 57 | + |
| 58 | + stations = [] |
| 59 | + |
| 60 | + for table in tables: |
| 61 | + rows = table.find_all('tr')[1:] # Skip header |
| 62 | + |
| 63 | + for row in rows: |
| 64 | + cells = row.find_all('td') |
| 65 | + if len(cells) < 2: |
| 66 | + continue |
| 67 | + |
| 68 | + # Extract station name and code |
| 69 | + name_cell = cells[0] |
| 70 | + code_cell = cells[1] |
| 71 | + |
| 72 | + # Get the link to the station's article |
| 73 | + link = name_cell.find('a') |
| 74 | + if link and link.get('href'): |
| 75 | + station_name = name_cell.get_text(strip=True) |
| 76 | + station_code = code_cell.get_text(strip=True) |
| 77 | + wiki_path = link.get('href') |
| 78 | + |
| 79 | + stations.append({ |
| 80 | + 'name_en': station_name, |
| 81 | + 'code': station_code, |
| 82 | + 'wiki_path': wiki_path |
| 83 | + }) |
| 84 | + |
| 85 | + print(f"✓ Found {len(stations)} stations") |
| 86 | + return stations |
| 87 | + |
| 88 | + |
| 89 | +def get_interwiki_names(wiki_path: str) -> Dict[str, str]: |
| 90 | + """Get multilingual names from Wikipedia interwiki links.""" |
| 91 | + # Remove /wiki/ prefix |
| 92 | + if wiki_path.startswith('/wiki/'): |
| 93 | + page_title = wiki_path[6:] |
| 94 | + else: |
| 95 | + page_title = wiki_path |
| 96 | + |
| 97 | + # Use Wikipedia API to get interwiki links |
| 98 | + api_url = 'https://en.wikipedia.org/w/api.php' |
| 99 | + params = { |
| 100 | + 'action': 'query', |
| 101 | + 'titles': page_title, |
| 102 | + 'prop': 'langlinks', |
| 103 | + 'lllimit': 'max', |
| 104 | + 'format': 'json' |
| 105 | + } |
| 106 | + |
| 107 | + try: |
| 108 | + response = requests.get(api_url, params=params, headers=HEADERS, timeout=10) |
| 109 | + data = response.json() |
| 110 | + |
| 111 | + pages = data.get('query', {}).get('pages', {}) |
| 112 | + if not pages: |
| 113 | + return {} |
| 114 | + |
| 115 | + # Get first (and only) page |
| 116 | + page = list(pages.values())[0] |
| 117 | + langlinks = page.get('langlinks', []) |
| 118 | + |
| 119 | + # Extract names in target languages |
| 120 | + names = {} |
| 121 | + for link in langlinks: |
| 122 | + lang = link.get('lang') |
| 123 | + title = link.get('*') |
| 124 | + if lang in LANG_WIKIS and title: |
| 125 | + names[lang] = title |
| 126 | + |
| 127 | + return names |
| 128 | + except Exception as e: |
| 129 | + return {} |
| 130 | + |
| 131 | + |
| 132 | +def extract_transliteration_pairs(stations: List[Dict], limit: Optional[int] = None) -> List[Dict]: |
| 133 | + """Extract transliteration pairs with multilingual names.""" |
| 134 | + pairs = [] |
| 135 | + count = 0 |
| 136 | + |
| 137 | + for station in stations: |
| 138 | + if limit and count >= limit: |
| 139 | + break |
| 140 | + |
| 141 | + count += 1 |
| 142 | + print(f"Processing {count}/{min(limit or len(stations), len(stations))}: {station['name_en']}...", end='\r') |
| 143 | + |
| 144 | + # Get multilingual names |
| 145 | + multilingual_names = get_interwiki_names(station['wiki_path']) |
| 146 | + |
| 147 | + if not multilingual_names: |
| 148 | + continue |
| 149 | + |
| 150 | + # Create pairs for each language |
| 151 | + for lang, native_name in multilingual_names.items(): |
| 152 | + pairs.append({ |
| 153 | + 'native_script': native_name, |
| 154 | + 'romanization': station['name_en'], |
| 155 | + 'language': lang, |
| 156 | + 'station_code': station['code'], |
| 157 | + 'source': 'wikipedia' |
| 158 | + }) |
| 159 | + |
| 160 | + # Rate limiting |
| 161 | + time.sleep(0.2) |
| 162 | + |
| 163 | + print() # New line after progress |
| 164 | + return pairs |
| 165 | + |
| 166 | + |
| 167 | +def save_to_tsv(pairs: List[Dict], output_file: Path): |
| 168 | + """Save pairs to TSV file.""" |
| 169 | + with open(output_file, 'w', encoding='utf-8', newline='') as f: |
| 170 | + writer = csv.DictWriter( |
| 171 | + f, |
| 172 | + fieldnames=['native_script', 'romanization', 'language', 'station_code', 'source'], |
| 173 | + delimiter='\t' |
| 174 | + ) |
| 175 | + writer.writeheader() |
| 176 | + writer.writerows(pairs) |
| 177 | + |
| 178 | + |
| 179 | +def print_statistics(pairs: List[Dict]): |
| 180 | + """Print statistics.""" |
| 181 | + lang_counts = {} |
| 182 | + for pair in pairs: |
| 183 | + lang = pair['language'] |
| 184 | + lang_counts[lang] = lang_counts.get(lang, 0) + 1 |
| 185 | + |
| 186 | + print(f"\n{'='*60}") |
| 187 | + print(f"Total pairs: {len(pairs)}") |
| 188 | + print(f"\nBy language:") |
| 189 | + for lang, count in sorted(lang_counts.items()): |
| 190 | + print(f" {lang}: {count}") |
| 191 | + print(f"{'='*60}") |
| 192 | + |
| 193 | + |
| 194 | +def main(): |
| 195 | + parser = argparse.ArgumentParser(description='Scrape railway station names from Wikipedia') |
| 196 | + parser.add_argument('--limit', type=int, default=100, help='Limit number of stations to process') |
| 197 | + parser.add_argument('--output', type=str, default='railway_stations.tsv', help='Output file') |
| 198 | + args = parser.parse_args() |
| 199 | + |
| 200 | + print("="*60) |
| 201 | + print("Wikipedia Railway Station Scraper") |
| 202 | + print("="*60) |
| 203 | + |
| 204 | + # Scrape station list |
| 205 | + stations = scrape_station_list() |
| 206 | + |
| 207 | + if not stations: |
| 208 | + print("Error: No stations found") |
| 209 | + return |
| 210 | + |
| 211 | + # Extract transliteration pairs |
| 212 | + print(f"\nFetching multilingual names (limit: {args.limit})...") |
| 213 | + pairs = extract_transliteration_pairs(stations, args.limit) |
| 214 | + |
| 215 | + if not pairs: |
| 216 | + print("Warning: No transliteration pairs found") |
| 217 | + return |
| 218 | + |
| 219 | + # Save |
| 220 | + output_file = Path(args.output) |
| 221 | + save_to_tsv(pairs, output_file) |
| 222 | + print(f"\n✓ Saved {len(pairs)} pairs to {output_file}") |
| 223 | + |
| 224 | + # Stats |
| 225 | + print_statistics(pairs) |
| 226 | + |
| 227 | + # Sample |
| 228 | + print(f"\nSample pairs:") |
| 229 | + for pair in pairs[:10]: |
| 230 | + print(f" {pair['native_script']} → {pair['romanization']} ({pair['language']})") |
| 231 | + |
| 232 | + |
| 233 | +if __name__ == '__main__': |
| 234 | + main() |
0 commit comments