-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathutils.py
More file actions
448 lines (360 loc) · 17.8 KB
/
utils.py
File metadata and controls
448 lines (360 loc) · 17.8 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
import os
import json
import requests
import re
import subprocess
import sys
import ctypes
from ctypes import wintypes
import platform
import winreg
def make_safe_filename(name):
"""Return filesystem‑safe lowercase name (keeps alnum, _ and -)."""
safe_name = name.lower().replace(" ", "_")
for char in [':', '/', '\\', '*', '?', '"', '<', '>', '|', '.']:
safe_name = safe_name.replace(char, "_")
safe_name = ''.join(c for c in safe_name if c.isalnum() or c in ['_', '-'])
return safe_name
def load_config(config_file):
"""Load JSON config; return default structure on any failure."""
if os.path.exists(config_file):
try:
with open(config_file, 'r') as f:
return json.load(f)
except Exception as e:
print(f"Error loading config: {e}")
return {"backup_dir": "", "games": {}}
return {"backup_dir": "", "games": {}}
def save_config(config_file, config):
"""Persist config atomically enough for small file (no temp file needed)."""
with open(config_file, 'w') as f:
json.dump(config, f, indent=4)
def generate_game_name_suggestions(paths):
"""Heuristically derive human‑friendly candidate names from save paths.
Walk shallow directory levels only (avoid deep / expensive recursion) and
filter out generic save folder terms and obvious file stems.
"""
suggestions = set()
for path in paths:
if os.path.isdir(path):
folder_name = os.path.basename(path)
suggestions.add(folder_name.replace('_', ' ').title())
try:
for subfolder in os.listdir(path):
subfolder_path = os.path.join(path, subfolder)
if os.path.isdir(subfolder_path):
suggestions.add(subfolder.replace('_', ' ').title())
except (PermissionError, FileNotFoundError):
pass
else:
parent_folder = os.path.basename(os.path.dirname(path))
if parent_folder:
suggestions.add(parent_folder.replace('_', ' ').title())
file_name = os.path.splitext(os.path.basename(path))[0]
if file_name and len(file_name) > 3: # Avoid very short names
suggestions.add(file_name.replace('_', ' ').title())
cleaned_suggestions = []
for suggestion in suggestions:
if suggestion.lower() not in ['saves', 'saved games', 'savedata', 'save games', 'savegames', 'save files']:
if not suggestion.lower().endswith(('.sav', '.dat', '.bin', '.json', '.xml')):
cleaned_suggestions.append(suggestion)
return cleaned_suggestions
def open_directory(path):
"""Open a directory cross‑platform; swallow errors and return bool."""
try:
path = os.path.normpath(path)
if sys.platform == 'win32':
os.startfile(path)
elif sys.platform == 'darwin':
subprocess.run(['open', path], check=True)
else:
subprocess.run(['xdg-open', path], check=True)
return True
except Exception as e:
print(f"Failed to open directory: {e}")
return False
def clean_game_name_for_search(game_name):
"""Normalize a game name for external search APIs (remove symbols/spaces)."""
import re
cleaned = re.sub(r'[™®©]', '', game_name)
cleaned = re.sub(r'\s+', ' ', cleaned)
cleaned = cleaned.strip()
return cleaned
def search_pcgamingwiki_titles(game_name):
"""Return list of possible PCGamingWiki page titles for a game name.
Uses MediaWiki search API; also resolves inline redirect snippets so we can
attempt canonical target names when Steam title differs.
"""
try:
cleaned_name = clean_game_name_for_search(game_name)
print(f"Searching PCGamingWiki for: '{cleaned_name}' (original: '{game_name}')")
# Use search API to get potential wiki page titles
search_url = f"https://www.pcgamingwiki.com/w/api.php?action=query&list=search&srsearch={cleaned_name.replace(' ', '%20')}&format=json"
response = requests.get(search_url, timeout=10)
response.raise_for_status()
search_data = response.json()
if 'query' not in search_data or 'search' not in search_data['query']:
return []
search_results = search_data['query']['search']
if not search_results:
return []
titles = []
for result in search_results:
title = result['title']
if 'snippet' in result and '#REDIRECT' in result['snippet']:
# Extract the redirect target from the snippet
import re
# Look for [[Target]] pattern in the snippet
redirect_match = re.search(r'\[\[([^\]]+)\]\]', result['snippet'])
if redirect_match:
# Clean any HTML tags from the redirect target
redirect_target = re.sub(r'<[^>]*>', '', redirect_match.group(1))
titles.append(redirect_target)
print(f"Found redirect: '{title}' -> '{redirect_target}'")
else:
titles.append(title)
else:
titles.append(title)
print(f"PCGamingWiki found {len(titles)} results: {titles}")
return titles
except requests.exceptions.RequestException as e:
print(f"PCGamingWiki title search failed: {e}")
return []
except Exception as e:
print(f"Unexpected error searching PCGamingWiki titles: {e}")
return []
def fetch_pcgamingwiki_save_locations(game_name):
"""Scrape PCGamingWiki 'Save game data location' table.
1. opensearch for canonical page
2. parse sections metadata to find save section index
3. fetch section HTML and regex rows (HTML stable here; lighter than parser)
4. split multi‑line cells (<br>) & expand environment placeholders
Returns mapping store/platform variant -> absolute or pattern path.
"""
save_locations = {}
try:
# Step 1: Use opensearch to get the exact wiki page title
search_url = f"https://www.pcgamingwiki.com/w/api.php?action=opensearch&format=json&search={game_name.replace(' ', '%20')}&formatversion=2"
response = requests.get(search_url)
response.raise_for_status()
search_data = response.json()
if not search_data[1] or len(search_data[1]) == 0:
return {}
wiki_page_title = search_data[1][0]
wiki_page_url = search_data[3][0]
wiki_page_name = wiki_page_url.split('/')[-1]
# Step 2: Get the sections to find the save location section index
sections_url = f"https://www.pcgamingwiki.com/w/api.php?action=parse&format=json&page={wiki_page_name}&prop=sections&formatversion=2"
response = requests.get(sections_url)
response.raise_for_status()
sections_data = response.json()
if 'error' in sections_data:
return {}
if 'parse' not in sections_data:
return {}
# Find the "Save game data location" section
save_section_index = None
for section in sections_data['parse']['sections']:
if section['line'] == 'Save game data location':
save_section_index = section['index']
break
if not save_section_index:
return {}
# Step 3: Get the content of the save location section
content_url = f"https://www.pcgamingwiki.com/w/api.php?action=parse&format=json&page={wiki_page_name}§ion={save_section_index}&formatversion=2"
response = requests.get(content_url)
response.raise_for_status()
content_data = response.json()
if 'error' in content_data:
return {}
if 'parse' not in content_data:
return {}
# extract paths from the HTML content
html_content = content_data['parse']['text']
# pattern to match rows in the table
row_pattern = r'<th\s+scope="row"\s+class="table-gamedata-body-system">(.*?)</th>\s*?<td\s+class="table-gamedata-body-location"><span[^>]*>(.*?)</span></td>'
store_rows = re.findall(row_pattern, html_content, re.DOTALL)
split_rows = []
for row in store_rows:
store_type = row[0].strip()
path_html = row[1]
# First, split on common delimiters that indicate separate entries
# Split on \n\n (double newline) or </tr><tr> patterns that leak through
path_html_chunks = re.split(r'\n\n+|</tr>\s*<tr|</th>\s*<th', path_html, flags=re.DOTALL)
processed_chunks = []
for chunk in path_html_chunks:
# Clean HTML from each chunk
chunk = re.sub(r'<th\s+scope="row".*?>', '', chunk, flags=re.DOTALL)
chunk = re.sub(r'</th>.*?<td.*?>', '', chunk, flags=re.DOTALL)
chunk = re.sub(r'class="[^"]*"', '', chunk)
# Now split by <br> tags
parts = re.split(r'<br\s*/?>', chunk)
for part in parts:
# Remove all remaining HTML tags
clean_part = re.sub(r'<[^>]*>', '', part)
clean_part = clean_part.strip()
# Only keep valid path-like entries
if clean_part and len(clean_part) > 5:
# Skip if it's obviously not a path
if any(skip in clean_part.lower() for skip in ['scope=', 'class=', '</td>', '</th>', '<tr', '<td']):
continue
# Skip if it starts with common table artifacts
if clean_part.startswith(('>', '<', 'scope', 'class')):
continue
processed_chunks.append(clean_part)
# Create store type entries
if len(processed_chunks) > 1:
for index, path in enumerate(processed_chunks):
split_rows.append((f"{store_type} [{str(index + 1)}]", path))
elif len(processed_chunks) == 1:
split_rows.append((store_type, processed_chunks[0]))
print(split_rows)
for row in split_rows:
store_type = row[0].strip()
path = row[1]
# Filter to Windows paths only (skip Linux, macOS, etc.)
# Keep: Windows, Steam, Microsoft Store, GOG, Epic, generic
store_lower = store_type.lower()
if any(skip in store_lower for skip in ['linux', 'macos', 'os x', 'playstation', 'xbox', 'switch', 'android', 'ios']):
continue
# Skip paths with obvious HTML table artifacts
if any(artifact in path for artifact in ['</th>', '<td', '</tr>', '<tr', 'class="', 'scope="']):
continue
# normalize backslashes for Windows paths
path = re.sub(r'\\+', '\\\\', path)
# replace common environment variables
try:
if '%USERPROFILE%' in path:
if '%USERPROFILE%\\Documents' in path:
# use wintypes to find actual documents folder
CSIDL_PERSONAL = 5
SHGFP_TYPE_CURRENT = 0
buf = ctypes.create_unicode_buffer(wintypes.MAX_PATH)
ctypes.windll.shell32.SHGetFolderPathW(None, CSIDL_PERSONAL, None, SHGFP_TYPE_CURRENT, buf)
documents = buf.value
path = path.replace('%USERPROFILE%\\Documents', documents)
else:
user_profile = os.path.expandvars('%USERPROFILE%')
path = path.replace('%USERPROFILE%', user_profile)
if '%APPDATA%' in path:
appdata = os.path.expandvars('%APPDATA%')
path = path.replace('%APPDATA%', appdata)
if '%LOCALAPPDATA%' in path:
localappdata = os.path.expandvars('%LOCALAPPDATA%')
path = path.replace('%LOCALAPPDATA%', localappdata)
if '%PUBLIC%' in path:
public = os.path.expandvars('%PUBLIC%')
path = path.replace('%PUBLIC%', public)
if '%PROGRAMDATA%' in path:
programdata = os.path.expandvars('%PROGRAMDATA%')
path = path.replace('%PROGRAMDATA%', programdata)
except Exception as e:
print(f"Error expanding environment variables: {e}")
save_locations[store_type] = path
return save_locations
except requests.exceptions.RequestException as e:
print(f"Network request failed: {e}")
return {}
except Exception as e:
print(f"Unexpected error fetching PCGamingWiki data: {e}")
return {}
def extract_between_tags(text, start_tag, end_tag):
"""Extract all substrings between start/end tag markers (non‑nested)."""
result = []
start_len = len(start_tag)
end_len = len(end_tag)
current_pos = 0
while True:
start_pos = text.find(start_tag, current_pos)
if start_pos == -1:
break
start_pos += start_len
end_pos = text.find(end_tag, start_pos)
if end_pos == -1:
break
content = text[start_pos:end_pos]
result.append(content)
current_pos = end_pos + end_len
return result
def get_igdb_api_source(config):
"""Return dict describing which IGDB worker classes to use.
Falls back to bundled ambidex proxy unless legacy API creds + legacy mode
explicitly configured. Keeps caller logic simple.
"""
api_source = config.get("igdb_api_source", "ambidex")
if api_source == "legacy" and config.get("igdb_auth"):
from workers import LegacyIGDBGameSearchWorker, LegacyIGDBImageDownloadWorker
return {
"search_worker": LegacyIGDBGameSearchWorker,
"image_worker": LegacyIGDBImageDownloadWorker,
"needs_auth": True
}
else:
# Always default to Ambidex API if legacy auth is missing
from workers import IGDBGameSearchWorker, IGDBImageDownloadWorker
return {
"search_worker": IGDBGameSearchWorker,
"image_worker": IGDBImageDownloadWorker,
"needs_auth": False
}
def get_windows_accent_color():
"""Return current Windows accent color (#RRGGBB) or fallback blue."""
if platform.system() == "Windows":
try:
# The DWM AccentColor is an ABGR value (alpha, blue, green, red)
# e.g., 0xAABBGGRR
key_path = r"Software\\Microsoft\\Windows\\DWM"
key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, key_path)
accent_color_dword, _ = winreg.QueryValueEx(key, "AccentColor")
winreg.CloseKey(key)
# Extract R, G, B components
# alpha = (accent_color_dword >> 24) & 0xFF
blue = (accent_color_dword >> 16) & 0xFF
green = (accent_color_dword >> 8) & 0xFF
red = accent_color_dword & 0xFF
return f"#{red:02x}{green:02x}{blue:02x}"
except Exception:
# Fallback if registry access fails or key/value not found
pass
return "#0078D4" # Default blue for non-Windows or if detection fails
def is_windows_11_or_later():
"""Best‑effort Windows 11+ detection via build number (>=22000)."""
if platform.system() == "Windows":
try:
# platform.version() gives something like '10.0.22621' for Win11
# Windows 11 official release build is 22000
version_parts = platform.version().split('.')
if len(version_parts) >= 3:
build_number = int(version_parts[2])
return build_number >= 22000
except (ValueError, IndexError):
# Could not parse version string
pass
return False
def is_game_duplicate(game_name, existing_games, threshold=0.8):
"""Lightweight duplicate detection (exact or word overlap heuristic)."""
game_name_lower = game_name.lower().strip()
# First check for exact matches
for existing_name in existing_games:
if existing_name.lower().strip() == game_name_lower:
return True, existing_name
# Check for very similar names using basic similarity
for existing_name in existing_games:
existing_lower = existing_name.lower().strip()
# Simple similarity check - if one name is contained in another
if (game_name_lower in existing_lower and len(game_name_lower) > len(existing_lower) * 0.7) or \
(existing_lower in game_name_lower and len(existing_lower) > len(game_name_lower) * 0.7):
return True, existing_name
# Check for similar words
game_words = set(game_name_lower.split())
existing_words = set(existing_lower.split())
if game_words and existing_words:
common_words = game_words.intersection(existing_words)
total_words = game_words.union(existing_words)
similarity = len(common_words) / len(total_words)
if similarity >= threshold:
return True, existing_name
return False, None
def get_existing_game_names(config):
"""Return set of existing game names (lowercase) for duplicate checking."""
return {name.lower() for name in config.get("games", {}).keys()}