diff --git a/.github/workflows/ruff-lint.yml b/.github/workflows/ruff-lint.yml
new file mode 100644
index 0000000..924b309
--- /dev/null
+++ b/.github/workflows/ruff-lint.yml
@@ -0,0 +1,27 @@
+name: Ruff Checks
+
+on:
+ push:
+ branches: [main]
+ pull_request:
+ branches: [main]
+
+jobs:
+ lint-and-test:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Install UV
+ uses: astral-sh/setup-uv@v6
+
+ - name: Set up Python
+ run: uv python install
+
+ - name: Install dependencies
+ run: uv sync --locked --dev
+
+ - name: Run Ruff
+ run: uv run ruff check .
diff --git a/builder/build_cli.py b/builder/build_cli.py
index 35c7917..c63c762 100644
--- a/builder/build_cli.py
+++ b/builder/build_cli.py
@@ -10,7 +10,6 @@
from pathlib import Path
import argparse
import subprocess
-import sys
import requests
import json
import time
@@ -21,6 +20,7 @@
SPEC_CHECKSUM_URL = "https://rust-lang.github.io/fls/paragraph-ids.json"
SPEC_LOCKFILE = "spec.lock"
+
def build_docs(
root: Path,
builder: str,
@@ -68,15 +68,15 @@ def build_docs(
# Add configuration options as needed
if not spec_lock_consistency_check:
conf_opt_values.append("enable_spec_lock_consistency=0")
- if offline:
+ if offline:
conf_opt_values.append("offline=1")
- if debug:
+ if debug:
conf_opt_values.append("debug=1")
# Only add the --define argument if there are options to define
if conf_opt_values:
for opt in conf_opt_values:
- args.append("--define") # each option needs its own --define
+ args.append("--define") # each option needs its own --define
args.append(opt)
if serve:
@@ -89,7 +89,6 @@ def build_docs(
args += ["-W", "--keep-going"]
try:
-
# Tracking build time
timer_start = time.perf_counter()
subprocess.run(
@@ -111,24 +110,24 @@ def build_docs(
print(f"\nBuild finished in {timer_end - timer_start:.2f} seconds.")
return dest / builder
-def update_spec_lockfile(spec_checksum_location, lockfile_location):
+def update_spec_lockfile(spec_checksum_location, lockfile_location):
try:
response = requests.get(spec_checksum_location, stream=True)
response.raise_for_status()
- with open(lockfile_location, 'wb') as file:
+ with open(lockfile_location, "wb") as file:
for chunk in response.iter_content(chunk_size=8192):
if chunk:
file.write(chunk)
- with open(lockfile_location, 'r') as file:
+ with open(lockfile_location, "r") as file:
data = json.load(file)
print("-- read in --")
- with open(lockfile_location, 'w') as outfile:
+ with open(lockfile_location, "w") as outfile:
json.dump(data, outfile, indent=4, sort_keys=True)
print("-- wrote back out --")
@@ -139,6 +138,7 @@ def update_spec_lockfile(spec_checksum_location, lockfile_location):
print(f"Error downloading file: {e}")
return False
+
def main(root):
root = Path(root)
@@ -156,12 +156,10 @@ def main(root):
"--ignore-spec-lock-diff",
help="ignore spec.lock file differences with live release -- for WIP branches only",
default=False,
- action="store_true"
+ action="store_true",
)
parser.add_argument(
- "--update-spec-lock-file",
- help="update spec.lock file",
- action="store_true"
+ "--update-spec-lock-file", help="update spec.lock file", action="store_true"
)
group.add_argument(
"-s",
@@ -191,7 +189,12 @@ def main(root):
if args.update_spec_lock_file:
update_spec_lockfile(SPEC_CHECKSUM_URL, root / "src" / SPEC_LOCKFILE)
- rendered = build_docs(
- root, "xml" if args.xml else "html", args.clear, args.serve, args.debug, args.offline, not args.ignore_spec_lock_diff,
+ build_docs(
+ root,
+ "xml" if args.xml else "html",
+ args.clear,
+ args.serve,
+ args.debug,
+ args.offline,
+ not args.ignore_spec_lock_diff,
)
-
diff --git a/exts/coding_guidelines/__init__.py b/exts/coding_guidelines/__init__.py
index f5d74d1..27f8a87 100644
--- a/exts/coding_guidelines/__init__.py
+++ b/exts/coding_guidelines/__init__.py
@@ -1,15 +1,19 @@
# SPDX-License-Identifier: MIT OR Apache-2.0
# SPDX-FileCopyrightText: The Coding Guidelines Subcommittee Contributors
-
-from . import fls_checks
-from . import write_guidelines_ids
-from . import std_role
-from . import fls_linking
-from . import guidelines_checks
-from .common import logger, get_tqdm, bar_format, logging
from sphinx.domains import Domain
+from . import (
+ common,
+ fls_checks,
+ fls_linking,
+ guidelines_checks,
+ std_role,
+ write_guidelines_ids,
+)
+from .common import bar_format, get_tqdm, logger, logging
+
+
class CodingGuidelinesDomain(Domain):
name = "coding-guidelines"
label = "Rust Standard Library"
@@ -19,17 +23,17 @@ class CodingGuidelinesDomain(Domain):
directives = {}
object_types = {}
indices = {}
-
+
def get_objects(self):
return []
-
+
def merge_domaindata(self, docnames, other):
pass # No domain data to merge
def on_build_finished(app, exception):
print("\nFinalizing build:")
- for _ in get_tqdm(iterable=range(1), desc="Finalizing",bar_format=bar_format):
+ for _ in get_tqdm(iterable=range(1), desc="Finalizing", bar_format=bar_format):
pass
outdir = app.outdir
@@ -39,47 +43,44 @@ def on_build_finished(app, exception):
if not app.config.debug:
print(f" + Build complete -> {outdir}")
+
def setup(app):
-
app.add_domain(CodingGuidelinesDomain)
app.add_config_value(
- name = "offline",
- default=False,
- rebuild= "env"
- ) # register the offline option
+ name="offline", default=False, rebuild="env"
+ ) # register the offline option
app.add_config_value(
name="spec_std_docs_url",
default="https://doc.rust-lang.org/stable/std",
rebuild="env", # Rebuild the environment when this changes
types=[str],
)
- app.add_config_value(name='debug',
- default=False,
- rebuild='env'
+ app.add_config_value(name="debug", default=False, rebuild="env")
+ app.add_config_value(
+ name="fls_paragraph_ids_url",
+ default="https://rust-lang.github.io/fls/paragraph-ids.json",
+ rebuild="env",
+ )
+ app.add_config_value(
+ name="enable_spec_lock_consistency", default=True, rebuild="env"
)
- app.add_config_value(name='fls_paragraph_ids_url',
- default='https://rust-lang.github.io/fls/paragraph-ids.json',
- rebuild='env')
- app.add_config_value(name='enable_spec_lock_consistency',
- default=True,
- rebuild='env')
app.add_config_value(
- name='required_guideline_fields',
- default=['release', 'fls', 'decidability', 'scope'],
- rebuild='env',
+ name="required_guideline_fields",
+ default=["release", "fls", "decidability", "scope"],
+ rebuild="env",
types=[list],
)
if app.config.debug:
logger.setLevel(logging.INFO)
- common.disable_tqdm = True
-
- app.connect('env-check-consistency', guidelines_checks.validate_required_fields)
- app.connect('env-check-consistency', fls_checks.check_fls)
- app.connect('build-finished', write_guidelines_ids.build_finished)
- app.connect('build-finished', fls_linking.build_finished)
- app.connect('build-finished', on_build_finished)
-
+ common.disable_tqdm = True
+
+ app.connect("env-check-consistency", guidelines_checks.validate_required_fields)
+ app.connect("env-check-consistency", fls_checks.check_fls)
+ app.connect("build-finished", write_guidelines_ids.build_finished)
+ app.connect("build-finished", fls_linking.build_finished)
+ app.connect("build-finished", on_build_finished)
+
return {
- 'version': '0.1',
- 'parallel_read_safe': True,
+ "version": "0.1",
+ "parallel_read_safe": True,
}
diff --git a/exts/coding_guidelines/common.py b/exts/coding_guidelines/common.py
index 70382c8..0cc2290 100644
--- a/exts/coding_guidelines/common.py
+++ b/exts/coding_guidelines/common.py
@@ -1,16 +1,19 @@
+import logging
from tqdm import tqdm
-import logging
# This is a wrapper around tqdm that allows us to disable it with this global variable
-disable_tqdm = False
+disable_tqdm = False
+
+
def get_tqdm(**kwargs):
- kwargs['disable'] = disable_tqdm
+ kwargs["disable"] = disable_tqdm
return tqdm(**kwargs)
+
+
# Get the Sphinx logger
-logger = logging.getLogger('sphinx')
-logger.setLevel(logging.WARNING)
+logger = logging.getLogger("sphinx")
+logger.setLevel(logging.WARNING)
# This is what controls the progress bar format
bar_format = "{l_bar}{bar}| {n_fmt}/{total_fmt} {postfix}"
-
diff --git a/exts/coding_guidelines/fls_checks.py b/exts/coding_guidelines/fls_checks.py
index b8a0940..55e0766 100644
--- a/exts/coding_guidelines/fls_checks.py
+++ b/exts/coding_guidelines/fls_checks.py
@@ -2,37 +2,44 @@
# SPDX-FileCopyrightText: The Coding Guidelines Subcommittee Contributors
-from .common import logger, get_tqdm, bar_format, logging
-import time
-import requests
-import re
import json
+import re
+
+import requests
from sphinx.errors import SphinxError
from sphinx_needs.data import SphinxNeedsData
+from .common import bar_format, get_tqdm, logger
+
fls_paragraph_ids_url = "https://rust-lang.github.io/fls/paragraph-ids.json"
+
class FLSValidationError(SphinxError):
category = "FLS Validation Error"
+
def check_fls(app, env):
"""Main checking function for FLS validation"""
# First make sure all guidelines have correctly formatted FLS IDs
check_fls_exists_and_valid_format(app, env)
offline_mode = env.config.offline
-
+
# Gather all FLS paragraph IDs from the specification and get the raw JSON
fls_ids, raw_json_data = gather_fls_paragraph_ids(app, fls_paragraph_ids_url)
# Error out if we couldn't get the raw JSON data
if not raw_json_data:
error_message = f"Failed to retrieve or parse the FLS specification from {fls_paragraph_ids_url}"
logger.error(error_message)
- raise FLSValidationError(error_message)
- if not offline_mode: # in offline mode, ignore checking against the lock file
+ raise FLSValidationError(error_message)
+ if not offline_mode: # in offline mode, ignore checking against the lock file
# Check for differences against lock file
- has_differences, differences = check_fls_lock_consistency(app, env, raw_json_data)
+ has_differences, differences = check_fls_lock_consistency(
+ app, env, raw_json_data
+ )
if has_differences:
- error_message = "The FLS specification has changed since the lock file was created:\n"
+ error_message = (
+ "The FLS specification has changed since the lock file was created:\n"
+ )
for diff in differences:
error_message += f" - {diff}\n"
error_message += "\nPlease manually inspect FLS spec items whose checksums have changed as corresponding guidelines may need to account for these changes."
@@ -42,37 +49,37 @@ def check_fls(app, env):
raise FLSValidationError(error_message)
# Check if all referenced FLS IDs exist
check_fls_ids_correct(app, env, fls_ids)
-
+
# Read the ignore list
fls_id_ignore_list = read_fls_ignore_list(app)
-
+
# Insert coverage information into fls_ids
insert_fls_coverage(app, env, fls_ids)
-
+
# Calculate and report coverage
coverage_data = calculate_fls_coverage(fls_ids, fls_id_ignore_list)
-
+
# Log coverage report
log_coverage_report(coverage_data)
def read_fls_ignore_list(app):
"""Read the list of FLS IDs to ignore from a file"""
- ignore_file_path = app.confdir / 'spec_ignore_list.txt'
+ ignore_file_path = app.confdir / "spec_ignore_list.txt"
ignore_list = []
-
+
if ignore_file_path.exists():
logger.info(f"Reading FLS ignore list from {ignore_file_path}")
- with open(ignore_file_path, 'r') as f:
+ with open(ignore_file_path, "r") as f:
for line in f:
# Remove comments and whitespace
- line = line.split('#')[0].strip()
+ line = line.split("#")[0].strip()
if line:
ignore_list.append(line)
logger.info(f"Loaded {len(ignore_list)} FLS IDs to ignore")
else:
logger.warning(f"No FLS ignore list found at {ignore_file_path}")
-
+
return ignore_list
@@ -86,35 +93,35 @@ def check_fls_exists_and_valid_format(app, env):
# Regular expression for FLS ID validation
# Format: fls_<12 alphanumeric chars including upper and lowercase>
- fls_pattern = re.compile(r'^fls_[a-zA-Z0-9]{9,12}$')
+ fls_pattern = re.compile(r"^fls_[a-zA-Z0-9]{9,12}$")
for need_id, need in needs.items():
logger.debug(f"ID: {need_id}, Need: {need}")
- if need.get('type') == 'guideline':
+ if need.get("type") == "guideline":
fls_value = need.get("fls")
-
+
# Check if fls field exists and is not empty
if fls_value is None:
msg = f"Need {need_id} has no fls field"
logger.error(msg)
raise FLSValidationError(msg)
-
+
if fls_value == "":
msg = f"Need {need_id} has empty fls field"
logger.error(msg)
raise FLSValidationError(msg)
-
+
# Validate FLS ID format
if not fls_pattern.match(fls_value):
msg = f"Need {need_id} has invalid fls format: '{fls_value}'. Expected format: fls_ followed by 12 alphanumeric characters"
logger.error(msg)
raise FLSValidationError(msg)
-
+
def check_fls_ids_correct(app, env, fls_ids):
"""
Check that all FLS IDs referenced in guidelines actually exist in the specification.
-
+
Args:
app: The Sphinx application
env: The Sphinx environment
@@ -123,30 +130,37 @@ def check_fls_ids_correct(app, env, fls_ids):
logger.debug("check_fls_ids_correct")
data = SphinxNeedsData(env)
needs = data.get_needs_view()
-
+
# Track any errors found
invalid_ids = []
-
+
# prefiltering: this is mainly done for tqdm progress
- guidelines = {k: v for k, v in needs.items() if v.get('type') == 'guideline'}
-
- pbar = get_tqdm(iterable=guidelines.items(), desc="Validating FLS IDs",bar_format=bar_format, unit="need")
+ guidelines = {k: v for k, v in needs.items() if v.get("type") == "guideline"}
+
+ pbar = get_tqdm(
+ iterable=guidelines.items(),
+ desc="Validating FLS IDs",
+ bar_format=bar_format,
+ unit="need",
+ )
# Check each guideline's FLS reference
for need_id, need in pbar:
- if need.get('type') == 'guideline':
+ if need.get("type") == "guideline":
pbar.set_postfix(fls_id=need_id)
fls_value = need.get("fls")
-
+
# Skip needs we already validated format for
if fls_value is None or fls_value == "":
continue
-
+
# Check if the FLS ID exists in the gathered IDs
if fls_value not in fls_ids:
invalid_ids.append((need_id, fls_value))
- logger.warning(f"Need {need_id} references non-existent FLS ID: '{fls_value}'")
-
+ logger.warning(
+ f"Need {need_id} references non-existent FLS ID: '{fls_value}'"
+ )
+
# Raise error if any invalid IDs were found
if invalid_ids:
error_message = "The following needs reference non-existent FLS IDs:\n"
@@ -154,7 +168,7 @@ def check_fls_ids_correct(app, env, fls_ids):
error_message += f" - Need {need_id} references '{fls_id}'\n"
logger.error(error_message)
raise FLSValidationError(error_message)
-
+
logger.info("All FLS references in guidelines are valid")
pbar.close() # Ensure cleanup
@@ -162,30 +176,30 @@ def check_fls_ids_correct(app, env, fls_ids):
def gather_fls_paragraph_ids(app, json_url):
"""
- Gather all Ferrocene Language Specification paragraph IDs from the paragraph-ids.json file
+ Gather all Ferrocene Language Specification paragraph IDs from the paragraph-ids.json file
or from the lock file in offline mode, including both container section IDs and individual paragraph IDs.
-
+
Args:
app: The Sphinx application
json_url: The URL or path to the paragraph-ids.json file
-
+
Returns:
Dictionary mapping paragraph IDs to metadata AND the complete raw JSON data
"""
offline = app.config.offline
- lock_path = app.confdir / 'spec.lock'
-
+ lock_path = app.confdir / "spec.lock"
+
# Dictionary to store all FLS IDs and their metadata
all_fls_ids = {}
raw_json_data = None
-
+
try:
# Load the JSON file
if not offline:
logger.info("Gathering FLS paragraph IDs from %s", json_url)
response = requests.get(json_url)
response.raise_for_status() # Raise exception for HTTP errors
- # Parse the JSON data
+ # Parse the JSON data
try:
raw_json_data = response.json()
data = raw_json_data # Keep reference to the original data
@@ -195,43 +209,42 @@ def gather_fls_paragraph_ids(app, json_url):
logger.debug(f"Response content preview: {response.text[:500]}...")
raise
- else : # if online mode is on read from the lock file
-
- if not lock_path.exists():
- logger.warning(f"No FLS lock file found at {lock_path}") # TODO: returns an error
+ else: # if online mode is on read from the lock file
+ if not lock_path.exists():
+ logger.warning(
+ f"No FLS lock file found at {lock_path}"
+ ) # TODO: returns an error
return False, []
logger.info("Gathering FLS paragraph IDs from lock file: %s", lock_path)
- with open(lock_path, 'r', encoding='utf-8') as f:
- raw_json_data=f.read()
+ with open(lock_path, "r", encoding="utf-8") as f:
+ raw_json_data = f.read()
data = json.loads(raw_json_data)
-
# Check if we have the expected document structure
- if 'documents' not in data:
+ if "documents" not in data:
logger.error("JSON does not have 'documents' key")
logger.debug(f"JSON keys: {list(data.keys())}")
return {}, None
-
+
# Base URL for constructing direct links
base_url = "https://rust-lang.github.io/fls/"
-
+
# Process each document in the JSON structure
- for document in data['documents']:
- doc_title = document.get('title', 'Unknown')
- doc_link = document.get('link', '')
-
+ for document in data["documents"]:
+ doc_title = document.get("title", "Unknown")
+
# Process each section in the document
- for section in document.get('sections', []):
- section_title = section.get('title', 'Unknown')
- section_id = section.get('id', '')
- section_number = section.get('number', '')
- section_link = section.get('link', '')
- is_informational = section.get('informational', False)
-
+ for section in document.get("sections", []):
+ section_title = section.get("title", "Unknown")
+ section_id = section.get("id", "")
+ section_number = section.get("number", "")
+ section_link = section.get("link", "")
+ is_informational = section.get("informational", False)
+
# Add the section container ID if it starts with 'fls_'
- if section_id and section_id.startswith('fls_'):
+ if section_id and section_id.startswith("fls_"):
direct_url = f"{base_url}{section_link}"
-
+
# Store section metadata
all_fls_ids[section_id] = {
"url": direct_url,
@@ -240,24 +253,24 @@ def gather_fls_paragraph_ids(app, json_url):
"section_title": section_title,
"section_number": section_number,
"is_container": True, # Mark as a container/section
- "informational": is_informational
+ "informational": is_informational,
# Note: No checksum for container IDs
}
-
+
# Process each paragraph in the section
- for paragraph in section.get('paragraphs', []):
- para_id = paragraph.get('id', '')
- para_number = paragraph.get('number', '')
- para_link = paragraph.get('link', '')
- para_checksum = paragraph.get('checksum', '')
-
+ for paragraph in section.get("paragraphs", []):
+ para_id = paragraph.get("id", "")
+ para_number = paragraph.get("number", "")
+ para_link = paragraph.get("link", "")
+ para_checksum = paragraph.get("checksum", "")
+
# Skip entries without proper IDs
- if not para_id or not para_id.startswith('fls_'):
+ if not para_id or not para_id.startswith("fls_"):
continue
-
+
# Create the full URL
direct_url = f"{base_url}{para_link}"
-
+
# Store metadata
all_fls_ids[para_id] = {
"url": direct_url,
@@ -267,22 +280,27 @@ def gather_fls_paragraph_ids(app, json_url):
"section_number": section_number,
"checksum": para_checksum,
"is_container": False, # Mark as individual paragraph
- "parent_section_id": section_id if section_id else None
+ "parent_section_id": section_id if section_id else None,
}
-
+
logger.info(f"Found {len(all_fls_ids)} total FLS IDs (sections and paragraphs)")
# Count sections vs paragraphs
- sections_count = sum(1 for metadata in all_fls_ids.values() if metadata.get('is_container', False))
+ sections_count = sum(
+ 1
+ for metadata in all_fls_ids.values()
+ if metadata.get("is_container", False)
+ )
paragraphs_count = len(all_fls_ids) - sections_count
logger.info(f" - {sections_count} section/container IDs")
logger.info(f" - {paragraphs_count} paragraph IDs")
-
+
return all_fls_ids, raw_json_data
-
+
except requests.exceptions.RequestException as e:
logger.error(f"Error fetching paragraph IDs from {json_url}: {e}")
return {}, None
+
def check_fls_lock_consistency(app, env, fls_raw_data):
"""
Compare live FLS JSON data with the lock file to detect changes
@@ -303,11 +321,9 @@ def check_fls_lock_consistency(app, env, fls_raw_data):
import json
import tempfile
- import os
- from pathlib import Path
logger.info("Checking FLS lock file consistency")
- lock_path = app.confdir / 'spec.lock'
+ lock_path = app.confdir / "spec.lock"
# Get the needs data to find affected guidelines
data = SphinxNeedsData(env)
@@ -317,24 +333,30 @@ def check_fls_lock_consistency(app, env, fls_raw_data):
fls_to_guidelines = {}
# prefiltering: this is mainly done for tqdm progress
- guidelines = {k: v for k, v in needs.items() if v.get('type') == 'guideline'}
- pbar = get_tqdm(iterable=guidelines.items(), desc="Checking fls lock consistency", bar_format=bar_format, unit="need")
+ guidelines = {k: v for k, v in needs.items() if v.get("type") == "guideline"}
+ pbar = get_tqdm(
+ iterable=guidelines.items(),
+ desc="Checking fls lock consistency",
+ bar_format=bar_format,
+ unit="need",
+ )
for need_id, need in pbar:
- if need.get('type') == 'guideline':
+ if need.get("type") == "guideline":
pbar.set_postfix(fls_id=need_id)
fls_value = need.get("fls")
if fls_value:
if fls_value not in fls_to_guidelines:
fls_to_guidelines[fls_value] = []
- fls_to_guidelines[fls_value].append({
- 'id': need_id,
- 'title': need.get('title', 'Untitled')
- })
+ fls_to_guidelines[fls_value].append(
+ {"id": need_id, "title": need.get("title", "Untitled")}
+ )
# If no lock file exists, skip checking
if not lock_path.exists():
- logger.warning(f"No FLS lock file found at {lock_path}, skipping consistency check")
+ logger.warning(
+ f"No FLS lock file found at {lock_path}, skipping consistency check"
+ )
return False, []
# Initialize result variables
@@ -344,7 +366,7 @@ def check_fls_lock_consistency(app, env, fls_raw_data):
try:
# Load lock file
- with open(lock_path, 'r', encoding='utf-8') as f:
+ with open(lock_path, "r", encoding="utf-8") as f:
locked_data = json.load(f)
# Create maps of paragraph IDs to checksums for both live and locked data
@@ -352,31 +374,31 @@ def check_fls_lock_consistency(app, env, fls_raw_data):
locked_checksums = {}
# Extract from live data
- for document in fls_raw_data.get('documents', []):
- for section in document.get('sections', []):
- for paragraph in section.get('paragraphs', []):
- para_id = paragraph.get('id', '')
- para_checksum = paragraph.get('checksum', '')
- para_number = paragraph.get('number', '')
-
- if para_id and para_id.startswith('fls_'):
+ for document in fls_raw_data.get("documents", []):
+ for section in document.get("sections", []):
+ for paragraph in section.get("paragraphs", []):
+ para_id = paragraph.get("id", "")
+ para_checksum = paragraph.get("checksum", "")
+ para_number = paragraph.get("number", "")
+
+ if para_id and para_id.startswith("fls_"):
live_checksums[para_id] = {
- 'checksum': para_checksum,
- 'section_id': para_number
+ "checksum": para_checksum,
+ "section_id": para_number,
}
# Extract from locked data
- for document in locked_data.get('documents', []):
- for section in document.get('sections', []):
- for paragraph in section.get('paragraphs', []):
- para_id = paragraph.get('id', '')
- para_checksum = paragraph.get('checksum', '')
- para_number = paragraph.get('number', '')
-
- if para_id and para_id.startswith('fls_'):
+ for document in locked_data.get("documents", []):
+ for section in document.get("sections", []):
+ for paragraph in section.get("paragraphs", []):
+ para_id = paragraph.get("id", "")
+ para_checksum = paragraph.get("checksum", "")
+ para_number = paragraph.get("number", "")
+
+ if para_id and para_id.startswith("fls_"):
locked_checksums[para_id] = {
- 'checksum': para_checksum,
- 'section_id': para_number
+ "checksum": para_checksum,
+ "section_id": para_number,
}
logger.info(f"Found {len(live_checksums)} paragraphs in live data")
@@ -385,18 +407,22 @@ def check_fls_lock_consistency(app, env, fls_raw_data):
# Helper function to track affected guidelines
def track_affected_guidelines(fls_id, change_type):
for guideline in fls_to_guidelines.get(fls_id, []):
- guideline_id = guideline['id']
+ guideline_id = guideline["id"]
if guideline_id not in affected_guidelines:
affected_guidelines[guideline_id] = {
- 'title': guideline['title'],
- 'changes': []
+ "title": guideline["title"],
+ "changes": [],
}
- section_id = live_checksums.get(fls_id, {}).get('section_id') or locked_checksums.get(fls_id, {}).get('section_id')
- affected_guidelines[guideline_id]['changes'].append({
- 'fls_id': fls_id,
- 'change_type': change_type,
- 'section_id': section_id
- })
+ section_id = live_checksums.get(fls_id, {}).get(
+ "section_id"
+ ) or locked_checksums.get(fls_id, {}).get("section_id")
+ affected_guidelines[guideline_id]["changes"].append(
+ {
+ "fls_id": fls_id,
+ "change_type": change_type,
+ "section_id": section_id,
+ }
+ )
# Format affected guidelines information (for detailed log)
def format_affected_guidelines(fls_id):
@@ -415,7 +441,9 @@ def format_affected_guidelines(fls_id):
for fls_id in sorted(new_ids):
diff_msg = f"New FLS ID added: {fls_id} ({live_checksums[fls_id]['section_id']})"
affected_msg = format_affected_guidelines(fls_id)
- detailed_differences.append(f"{diff_msg}\n Affected guidelines:\n{affected_msg}")
+ detailed_differences.append(
+ f"{diff_msg}\n Affected guidelines:\n{affected_msg}"
+ )
track_affected_guidelines(fls_id, "added")
has_differences = True
@@ -425,29 +453,31 @@ def format_affected_guidelines(fls_id):
for fls_id in sorted(removed_ids):
diff_msg = f"FLS ID removed: {fls_id} ({locked_checksums[fls_id]['section_id']})"
affected_msg = format_affected_guidelines(fls_id)
- detailed_differences.append(f"{diff_msg}\n Affected guidelines:\n{affected_msg}")
+ detailed_differences.append(
+ f"{diff_msg}\n Affected guidelines:\n{affected_msg}"
+ )
track_affected_guidelines(fls_id, "removed")
has_differences = True
# Check for checksum changes on existing IDs
common_ids = set(live_checksums.keys()) & set(locked_checksums.keys())
for fls_id in sorted(common_ids):
- live_checksum = live_checksums[fls_id]['checksum']
- locked_checksum = locked_checksums[fls_id]['checksum']
+ live_checksum = live_checksums[fls_id]["checksum"]
+ locked_checksum = locked_checksums[fls_id]["checksum"]
changes = []
change_type = None
if live_checksum != locked_checksum:
changes.append(
- f"Content changed for FLS ID {fls_id} ({live_checksums[fls_id]['section_id']}): " +
- f"checksum was {locked_checksum[:8]}... now {live_checksum[:8]}..."
+ f"Content changed for FLS ID {fls_id} ({live_checksums[fls_id]['section_id']}): "
+ + f"checksum was {locked_checksum[:8]}... now {live_checksum[:8]}..."
)
change_type = "content_changed"
# Also check if section IDs have changed
- live_section = live_checksums[fls_id]['section_id']
- locked_section = locked_checksums[fls_id]['section_id']
+ live_section = live_checksums[fls_id]["section_id"]
+ locked_section = locked_checksums[fls_id]["section_id"]
if live_section != locked_section:
changes.append(
@@ -458,7 +488,9 @@ def format_affected_guidelines(fls_id):
if changes:
affected_msg = format_affected_guidelines(fls_id)
- detailed_differences.append(f"{changes[0]}\n Affected guidelines:\n{affected_msg}")
+ detailed_differences.append(
+ f"{changes[0]}\n Affected guidelines:\n{affected_msg}"
+ )
# Add any additional changes separately
for i in range(1, len(changes)):
@@ -474,14 +506,20 @@ def format_affected_guidelines(fls_id):
detailed_differences.append("\n\nDETAILED AFFECTED GUIDELINES:")
for guideline_id, info in sorted(affected_guidelines.items()):
# For each guideline, list the changed FLS IDs with their section IDs
- changed_fls = [f"{c['fls_id']} ({c['section_id']})" for c in info['changes']]
+ changed_fls = [
+ f"{c['fls_id']} ({c['section_id']})" for c in info["changes"]
+ ]
detailed_differences.append(f"{guideline_id}: {info['title']}")
- detailed_differences.append(f" Changed FLS paragraphs: {', '.join(changed_fls)}")
+ detailed_differences.append(
+ f" Changed FLS paragraphs: {', '.join(changed_fls)}"
+ )
if has_differences:
temp_file = None
try:
- with tempfile.NamedTemporaryFile(mode='w', delete=False, prefix='fls_diff_', suffix='.txt') as temp_file:
+ with tempfile.NamedTemporaryFile(
+ mode="w", delete=False, prefix="fls_diff_", suffix=".txt"
+ ) as temp_file:
temp_file.write("\n".join(detailed_differences))
temp_path = temp_file.name
logger.warning(f"Detailed FLS differences written to: {temp_path}")
@@ -491,10 +529,12 @@ def format_affected_guidelines(fls_id):
# Create concise summary for return
summary = []
if has_differences:
- summary.append(f"Found differences between live FLS data and lock file affecting {len(affected_guidelines)} guidelines")
+ summary.append(
+ f"Found differences between live FLS data and lock file affecting {len(affected_guidelines)} guidelines"
+ )
for guideline_id, info in sorted(affected_guidelines.items()):
# Get unique FLS IDs
- fls_ids = sorted(set(c['fls_id'] for c in info['changes']))
+ fls_ids = sorted(set(c["fls_id"] for c in info["changes"]))
summary.append(f"{guideline_id}: {', '.join(fls_ids)}")
return has_differences, summary
@@ -503,10 +543,11 @@ def format_affected_guidelines(fls_id):
logger.error(f"Error reading or parsing lock file {lock_path}: {e}")
return False, [f"Failed to read lock file: {e}"]
+
def insert_fls_coverage(app, env, fls_ids):
"""
Enrich the fls_ids with whether each FLS ID is covered by coding guidelines
-
+
Args:
app: The Sphinx application
env: The Sphinx environment
@@ -515,49 +556,49 @@ def insert_fls_coverage(app, env, fls_ids):
logger.debug("Inserting FLS coverage data")
data = SphinxNeedsData(env)
needs = data.get_needs_view()
-
+
# Initialize coverage for all FLS IDs
for fls_id in fls_ids:
- fls_ids[fls_id]['covered'] = False
- fls_ids[fls_id]['covering_needs'] = [] # List to store all covering guidelines
-
+ fls_ids[fls_id]["covered"] = False
+ fls_ids[fls_id]["covering_needs"] = [] # List to store all covering guidelines
+
# Extract chapter information from section_id (e.g., "22.1:4" -> chapter 22)
- section_id = fls_ids[fls_id]['section_id']
+ section_id = fls_ids[fls_id]["section_id"]
logger.debug(f"Processing section_id: {section_id} for {fls_id}")
-
+
# Handle formats like "22.1:4", "4.3.1:9", or "A.1:2"
- if ':' in section_id:
+ if ":" in section_id:
# Split at colon to get the section number without paragraph number
- section_parts = section_id.split(':')[0].split('.')
+ section_parts = section_id.split(":")[0].split(".")
else:
# Fallback if no colon present
- section_parts = section_id.split('.')
-
+ section_parts = section_id.split(".")
+
if section_parts and section_parts[0].isdigit():
chapter = int(section_parts[0])
- fls_ids[fls_id]['chapter'] = chapter
+ fls_ids[fls_id]["chapter"] = chapter
else:
# Handle appendices or other non-standard formats
first_char = section_id[0] if section_id else None
if first_char and first_char.isalpha():
# For appendices like "A.1.1", use the letter as chapter
- fls_ids[fls_id]['chapter'] = first_char
+ fls_ids[fls_id]["chapter"] = first_char
else:
- fls_ids[fls_id]['chapter'] = 'unknown'
-
+ fls_ids[fls_id]["chapter"] = "unknown"
+
# Mark covered FLS IDs
unique_covered_ids = set()
total_references = 0
-
+
for need_id, need in needs.items():
- if need.get('type') == 'guideline':
+ if need.get("type") == "guideline":
fls_value = need.get("fls")
if fls_value and fls_value in fls_ids:
- fls_ids[fls_value]['covered'] = True
- fls_ids[fls_value]['covering_needs'].append(need_id)
+ fls_ids[fls_value]["covered"] = True
+ fls_ids[fls_value]["covering_needs"].append(need_id)
unique_covered_ids.add(fls_value)
total_references += 1
-
+
logger.info(f"Found {total_references} references to FLS IDs in guidelines")
logger.info(f"Found {len(unique_covered_ids)} unique FLS IDs covered by guidelines")
return fls_ids
@@ -566,71 +607,68 @@ def insert_fls_coverage(app, env, fls_ids):
def calculate_fls_coverage(fls_ids, fls_id_ignore_list):
"""
Calculate coverage statistics for FLS IDs
-
+
Args:
fls_ids: Dictionary of FLS paragraph IDs with metadata, including coverage status
fls_id_ignore_list: List of FLS IDs to ignore in coverage calculations
-
+
Returns:
Dictionary containing coverage statistics
"""
logger.debug("Calculating FLS coverage statistics")
-
+
# Track statistics
total_ids = 0
covered_ids = 0
ignored_ids = 0
-
+
# Organize by chapter
chapters = {}
-
+
# Process each FLS ID
for fls_id, metadata in fls_ids.items():
- chapter = metadata.get('chapter', 'unknown')
-
+ chapter = metadata.get("chapter", "unknown")
+
# Initialize chapter data if needed
if chapter not in chapters:
- chapters[chapter] = {
- 'total': 0,
- 'covered': 0,
- 'ignored': 0,
- 'ids': []
- }
-
+ chapters[chapter] = {"total": 0, "covered": 0, "ignored": 0, "ids": []}
+
# Add to chapter's ID list
- chapters[chapter]['ids'].append(fls_id)
- chapters[chapter]['total'] += 1
+ chapters[chapter]["ids"].append(fls_id)
+ chapters[chapter]["total"] += 1
total_ids += 1
-
+
# Check if ID should be ignored
if fls_id in fls_id_ignore_list:
ignored_ids += 1
- chapters[chapter]['ignored'] += 1
+ chapters[chapter]["ignored"] += 1
# Mark as ignored in the original data structure too
- fls_ids[fls_id]['ignored'] = True
+ fls_ids[fls_id]["ignored"] = True
else:
- fls_ids[fls_id]['ignored'] = False
-
+ fls_ids[fls_id]["ignored"] = False
+
# Count coverage if not ignored
- if metadata.get('covered', False):
+ if metadata.get("covered", False):
covered_ids += 1
- chapters[chapter]['covered'] += 1
-
+ chapters[chapter]["covered"] += 1
+
# Calculate coverage percentages
effective_total = total_ids - ignored_ids
- overall_coverage = (covered_ids / effective_total * 100) if effective_total > 0 else 0
-
+ overall_coverage = (
+ (covered_ids / effective_total * 100) if effective_total > 0 else 0
+ )
+
# Calculate chapter coverage
chapter_coverage = {}
for chapter, data in chapters.items():
- effective_chapter_total = data['total'] - data['ignored']
-
+ effective_chapter_total = data["total"] - data["ignored"]
+
if effective_chapter_total == 0:
# All IDs in this chapter are ignored
chapter_coverage[chapter] = "IGNORED"
else:
- chapter_coverage[chapter] = (data['covered'] / effective_chapter_total * 100)
-
+ chapter_coverage[chapter] = data["covered"] / effective_chapter_total * 100
+
# Sort chapters by custom logic to handle mixed types
def chapter_sort_key(chapter):
if isinstance(chapter, int):
@@ -639,23 +677,24 @@ def chapter_sort_key(chapter):
return (1, chapter) # Sort letters second, alphabetically
else:
return (2, str(chapter)) # Sort anything else last
-
+
sorted_chapters = sorted(chapters.keys(), key=chapter_sort_key)
-
+
# Prepare result
coverage_data = {
- 'total_ids': total_ids,
- 'covered_ids': covered_ids,
- 'ignored_ids': ignored_ids,
- 'effective_total': effective_total,
- 'overall_coverage': overall_coverage,
- 'chapters': sorted_chapters,
- 'chapter_data': chapters,
- 'chapter_coverage': chapter_coverage
+ "total_ids": total_ids,
+ "covered_ids": covered_ids,
+ "ignored_ids": ignored_ids,
+ "effective_total": effective_total,
+ "overall_coverage": overall_coverage,
+ "chapters": sorted_chapters,
+ "chapter_data": chapters,
+ "chapter_coverage": chapter_coverage,
}
-
+
return coverage_data
+
def log_coverage_report(coverage_data):
"""Log a report of FLS coverage statistics"""
logger.info("=== FLS Coverage Report ===")
@@ -663,13 +702,11 @@ def log_coverage_report(coverage_data):
logger.info(f"Covered FLS IDs: {coverage_data['covered_ids']}")
logger.info(f"Ignored FLS IDs: {coverage_data['ignored_ids']}")
logger.info(f"Overall coverage: {coverage_data['overall_coverage']:.2f}%")
-
+
logger.info("\nCoverage by chapter:")
- for chapter in coverage_data['chapters']:
- coverage = coverage_data['chapter_coverage'][chapter]
+ for chapter in coverage_data["chapters"]:
+ coverage = coverage_data["chapter_coverage"][chapter]
if coverage == "IGNORED":
logger.info(f" Chapter {chapter}: IGNORED (all IDs are on ignore list)")
else:
logger.info(f" Chapter {chapter}: {coverage:.2f}%")
-
-
diff --git a/exts/coding_guidelines/fls_linking.py b/exts/coding_guidelines/fls_linking.py
index a59b420..86365aa 100644
--- a/exts/coding_guidelines/fls_linking.py
+++ b/exts/coding_guidelines/fls_linking.py
@@ -1,6 +1,8 @@
-import re
import os
+import re
+
import sphinx
+
from .common import logger
@@ -9,9 +11,9 @@ def build_finished(app, exception):
# The build finished hook also runs when an exception is raised.
if exception is not None:
return
-
+
logger.info("Adding FLS links...")
-
+
try:
with sphinx.util.display.progress_message("dumping paragraph ids"):
post_process_html(app)
@@ -24,31 +26,34 @@ def load_fls_ids(app):
"""Load FLS IDs and their URLs."""
try:
from . import fls_checks
- fls_ids, _ = fls_checks.gather_fls_paragraph_ids(app, app.config.fls_paragraph_ids_url )
- return {fls_id: data['url'] for fls_id, data in fls_ids.items()}
+
+ fls_ids, _ = fls_checks.gather_fls_paragraph_ids(
+ app, app.config.fls_paragraph_ids_url
+ )
+ return {fls_id: data["url"] for fls_id, data in fls_ids.items()}
except Exception as e:
logger.error(f"Failed to load FLS IDs: {e}")
return {}
+
def post_process_html(app):
-
# Load FLS IDs if not already loaded
- if not hasattr(app, 'fls_urls'):
+ if not hasattr(app, "fls_urls"):
app.fls_urls = load_fls_ids(app)
-
+
# Pattern to match the proper HTML structure
pattern = r'fls: (fls_[a-zA-Z0-9]{9,12})'
-
+
# Function to replace FLS IDs with links
def replace_fls(match):
"""Replace FLS ID with a link if it exists."""
fls_id = match.group(1)
-
+
if fls_id in app.fls_urls:
return f'fls: {fls_id}'
else:
return f'fls: {fls_id}'
-
+
# CSS for styling
css = """
/* Styling for FLS ID links */
@@ -70,37 +75,45 @@ def replace_fls(match):
color: #cc0000;
}
"""
-
+
# Write CSS file
- css_path = os.path.join(app.outdir, '_static', 'fls_links.css')
+ css_path = os.path.join(app.outdir, "_static", "fls_links.css")
os.makedirs(os.path.dirname(css_path), exist_ok=True)
- with open(css_path, 'w') as f:
+ with open(css_path, "w") as f:
f.write(css)
-
+
# Process all HTML files
for root, _, files in os.walk(app.outdir):
for filename in files:
- if filename.endswith('.html'):
+ if filename.endswith(".html"):
filepath = os.path.join(root, filename)
-
+
# Read HTML content
- with open(filepath, 'r', encoding='utf-8') as f:
+ with open(filepath, "r", encoding="utf-8") as f:
content = f.read()
-
+
# Replace FLS IDs with links
modified = re.sub(pattern, replace_fls, content)
-
+
# Add CSS link if needed
- if modified != content and '',
- f'\n')
-
+ css_path = os.path.join(
+ rel_path, "_static", "fls_links.css"
+ ).replace("\\", "/")
+
+ modified = modified.replace(
+ "",
+ f'\n',
+ )
+
# Write modified content back
if modified != content:
- with open(filepath, 'w', encoding='utf-8') as f:
+ with open(filepath, "w", encoding="utf-8") as f:
f.write(modified)
logger.debug(f"Updated FLS links in {filepath}")
diff --git a/exts/coding_guidelines/guidelines_checks.py b/exts/coding_guidelines/guidelines_checks.py
index d5eac9f..73e6ea6 100644
--- a/exts/coding_guidelines/guidelines_checks.py
+++ b/exts/coding_guidelines/guidelines_checks.py
@@ -3,35 +3,43 @@
from sphinx.errors import SphinxError
from sphinx_needs.data import SphinxNeedsData
-from .common import logger, get_tqdm, bar_format
+
+from .common import bar_format, get_tqdm, logger
class IntegrityCheckError(SphinxError):
category = "Integrity Check Error"
+
def validate_required_fields(app, env):
"""
- Validate the required fields defined in conf.py
+ Validate the required fields defined in conf.py
"""
logger.debug("Validating required fields")
data = SphinxNeedsData(env)
needs = data.get_needs_view()
- required_fields = app.config.required_guideline_fields # Access the configured values
+ required_fields = (
+ app.config.required_guideline_fields
+ ) # Access the configured values
# prefiltering: this is mainly done for tqdm progress
- guidelines = {k: v for k, v in needs.items() if v.get('type') == 'guideline'}
- pbar = get_tqdm(iterable=guidelines.items(), desc="Checking for required fields", bar_format=bar_format, unit="need")
+ guidelines = {k: v for k, v in needs.items() if v.get("type") == "guideline"}
+ pbar = get_tqdm(
+ iterable=guidelines.items(),
+ desc="Checking for required fields",
+ bar_format=bar_format,
+ unit="need",
+ )
for key, value in pbar:
- if value.get('type') == 'guideline':
+ if value.get("type") == "guideline":
missing_fields = []
for field in required_fields:
pbar.set_postfix(field=field if field is not None else "Missing")
- if value.get(field) in (None, '', []):
+ if value.get(field) in (None, "", []):
missing_fields.append(field)
-
if missing_fields:
error_message = (
f"Guideline '{value.get('title')}' (ID: {value.get('id')}) "
@@ -39,5 +47,5 @@ def validate_required_fields(app, env):
f"{', '.join(missing_fields)}"
)
logger.error(error_message)
- app.builder.statuscode = 1 # mark the build as failed (0 means success)
- raise IntegrityCheckError(error_message)
+ app.builder.statuscode = 1 # mark the build as failed (0 means success)
+ raise IntegrityCheckError(error_message)
diff --git a/exts/coding_guidelines/std_role.py b/exts/coding_guidelines/std_role.py
index ec1c105..4a1fc35 100644
--- a/exts/coding_guidelines/std_role.py
+++ b/exts/coding_guidelines/std_role.py
@@ -5,6 +5,7 @@
from sphinx.roles import SphinxRole
from urllib.parse import quote
+
class StdRefRole(SphinxRole):
def run(self):
text, target = parse_target_from_text(self.text)
diff --git a/exts/coding_guidelines/write_guidelines_ids.py b/exts/coding_guidelines/write_guidelines_ids.py
index 04a1d65..787557f 100644
--- a/exts/coding_guidelines/write_guidelines_ids.py
+++ b/exts/coding_guidelines/write_guidelines_ids.py
@@ -1,6 +1,7 @@
"""
Module to generate checksums for guidelines and write paragraph-ids.json file.
"""
+
import hashlib
import json
import os
@@ -15,95 +16,102 @@ def calculate_checksum(content, options):
# Combine content and sorted options
options_str = json.dumps(options, sort_keys=True)
combined = content + options_str
-
+
# Calculate SHA-256 hash
- hash_obj = hashlib.sha256(combined.encode('UTF-8'))
+ hash_obj = hashlib.sha256(combined.encode("UTF-8"))
return hash_obj.hexdigest()
+
def write_guidelines_ids(app):
"""
Write guideline IDs and checksums to JSON file with guidelines as the primary structure.
-
- This collects all guidelines and their directly associated rationale, good example,
+
+ This collects all guidelines and their directly associated rationale, good example,
and bad example, computes checksums for their content, and writes a structured JSON file.
-
+
Fails the build if any guideline is missing a rationale, good example, or bad example.
"""
env = app.env
data = SphinxNeedsData(env)
all_needs = data.get_needs_view()
-
-# Organize by document
- documents_data = defaultdict(lambda: {
- "title": "",
- "link": "",
- "guidelines": []
- })
-
+
+ # Organize by document
+ documents_data = defaultdict(lambda: {"title": "", "link": "", "guidelines": []})
+
# Collect document titles and links
for docname, title in env.titles.items():
doc_uri = app.builder.get_target_uri(docname)
documents_data[docname]["title"] = title.astext()
documents_data[docname]["link"] = doc_uri
-
+
# List to track guidelines with missing elements
incomplete_guidelines = []
-
+
# Process all guidelines
for need_id, need in all_needs.items():
- if need['type'] == 'guideline':
- docname = need['docname']
+ if need["type"] == "guideline":
+ docname = need["docname"]
doc_uri = app.builder.get_target_uri(docname)
-
+
# Compute checksum for the guideline
- content = need.get('content', '')
- options = {k: v for k, v in need.items() if k not in ('content', 'docname', 'lineno', 'refid', 'content_node')}
+ content = need.get("content", "")
+ options = {
+ k: v
+ for k, v in need.items()
+ if k not in ("content", "docname", "lineno", "refid", "content_node")
+ }
checksum = calculate_checksum(content, options)
-
+
# Create guideline structure
guideline_data = {
- "id": need['id'],
- "title": need.get('title', 'Untitled Guideline'),
+ "id": need["id"],
+ "title": need.get("title", "Untitled Guideline"),
"link": f"{doc_uri}#{need['id']}",
"checksum": checksum,
"rationale": None,
"non_compliant_example": None,
- "compliant_example": None
+ "compliant_example": None,
}
-
+
# Look for associated elements using parent_needs_back
missing_elements = []
-
+
# Get all needs that have this guideline as their parent
- parent_needs_back = need.get('parent_needs_back', [])
-
+ parent_needs_back = need.get("parent_needs_back", [])
+
for related_id in parent_needs_back:
if related_id in all_needs:
related_need = all_needs[related_id]
- related_type = related_need.get('type')
-
+ related_type = related_need.get("type")
+
# Compute checksum for the related need
- related_content = related_need.get('content', '')
- related_options = {k: v for k, v in related_need.items()
- if k not in ('content', 'docname', 'lineno', 'refid', 'content_node')}
- related_checksum = calculate_checksum(related_content, related_options)
-
+ related_content = related_need.get("content", "")
+ related_options = {
+ k: v
+ for k, v in related_need.items()
+ if k
+ not in ("content", "docname", "lineno", "refid", "content_node")
+ }
+ related_checksum = calculate_checksum(
+ related_content, related_options
+ )
+
# Create the related need data
related_data = {
"id": related_id,
"link": f"{app.builder.get_target_uri(related_need['docname'])}#{related_id}",
- "checksum": related_checksum
+ "checksum": related_checksum,
}
-
+
# Add to the appropriate field based on type
- if related_type == 'rationale':
+ if related_type == "rationale":
guideline_data["rationale"] = related_data
- elif related_type == 'non_compliant_example':
+ elif related_type == "non_compliant_example":
guideline_data["non_compliant_example"] = related_data
- elif related_type == 'compliant_example':
+ elif related_type == "compliant_example":
guideline_data["compliant_example"] = related_data
-
+
# Check for missing elements
if guideline_data["rationale"] is None:
missing_elements.append("rationale")
@@ -111,58 +119,60 @@ def write_guidelines_ids(app):
missing_elements.append("non_compliant_example")
if guideline_data["compliant_example"] is None:
missing_elements.append("compliant_example")
-
+
# Track incomplete guidelines
if missing_elements:
- incomplete_guidelines.append({
- "id": need_id,
- "title": need.get('title', 'Untitled Guideline'),
- "missing": missing_elements,
- "docname": docname
- })
-
+ incomplete_guidelines.append(
+ {
+ "id": need_id,
+ "title": need.get("title", "Untitled Guideline"),
+ "missing": missing_elements,
+ "docname": docname,
+ }
+ )
+
# Add this guideline to the document
documents_data[docname]["guidelines"].append(guideline_data)
-
+
# Prepare the final structure for JSON
documents = []
for docname, doc_data in documents_data.items():
if doc_data["guidelines"]: # Only include documents with guidelines
documents.append(doc_data)
-
+
# Write the JSON file with the new name
output_file = os.path.join(app.outdir, "guidelines-ids.json")
- with open(output_file, 'w') as f:
+ with open(output_file, "w") as f:
json.dump({"documents": documents}, f, indent=2)
f.write("\n")
-
+
logger.info(f"Guidelines IDs written to {output_file}")
-
+
# Fail the build if we have incomplete guidelines
if incomplete_guidelines:
error_message = "The following guidelines are missing required elements:\n\n"
-
+
for incomplete in incomplete_guidelines:
error_message += f"Guideline: {incomplete['id']} ({incomplete['title']})\n"
error_message += f"Location: {incomplete['docname']}\n"
error_message += f"Missing: {', '.join(incomplete['missing'])}\n\n"
-
+
error_message += "Each guideline must have an associated rationale, good example, and bad example."
logger.error(error_message)
raise Exception(error_message)
+
def build_finished(app, exception):
"""Hook to run at the end of the build process."""
# The build finished hook also runs when an exception is raised.
if exception is not None:
return
-
+
logger.info("Generating guidelines IDs and checksums...")
-
+
try:
with sphinx.util.display.progress_message("dumping paragraph ids"):
write_guidelines_ids(app)
except Exception as e:
logger.error(str(e))
raise
-
diff --git a/generate-guideline-templates.py b/generate-guideline-templates.py
index 9bd77b0..aa0efe6 100755
--- a/generate-guideline-templates.py
+++ b/generate-guideline-templates.py
@@ -3,18 +3,20 @@
# SPDX-FileCopyrightText: The Coding Guidelines Subcommittee Contributors
import argparse
-import string
import random
+import string
# Configuration
CHARS = string.ascii_letters + string.digits
ID_LENGTH = 12
+
def generate_id(prefix):
"""Generate a random ID with the given prefix."""
random_part = "".join(random.choice(CHARS) for _ in range(ID_LENGTH))
return f"{prefix}_{random_part}"
+
def generate_guideline_template():
"""Generate a complete guideline template with all required sections."""
# Generate IDs for all sections
@@ -22,7 +24,7 @@ def generate_guideline_template():
rationale_id = generate_id("rat")
non_compliant_example_id = generate_id("non_compl_ex")
compliant_example_id = generate_id("compl_ex")
-
+
template = f""".. guideline:: Title Here
:id: {guideline_id}
:category:
@@ -67,34 +69,37 @@ def generate_guideline_template():
"""
return template
+
def parse_args():
"""Parse command-line arguments."""
parser = argparse.ArgumentParser(
description="Generate guideline templates with randomly generated IDs"
)
parser.add_argument(
- "-n",
- "--number-of-templates",
- type=int,
+ "-n",
+ "--number-of-templates",
+ type=int,
default=1,
- help="Number of templates to generate (default: 1)"
+ help="Number of templates to generate (default: 1)",
)
return parser.parse_args()
+
def main():
"""Generate the specified number of guideline templates."""
args = parse_args()
num_templates = args.number_of_templates
-
+
for i in range(num_templates):
if num_templates > 1:
- print(f"=== Template {i+1} ===\n")
-
+ print(f"=== Template {i + 1} ===\n")
+
template = generate_guideline_template()
print(template)
-
+
if num_templates > 1 and i < num_templates - 1:
print("\n" + "=" * 80 + "\n")
+
if __name__ == "__main__":
main()
diff --git a/pyproject.toml b/pyproject.toml
index 9e11cd0..e534bc1 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -18,3 +18,29 @@ members = ["builder"]
[tool.uv.sources]
builder = { workspace = true }
+
+[tool.ruff]
+# line-length = 88
+lint.select = [
+ # "E",
+ # "F",
+ # "W",
+ # "C",
+ # "I",
+ # "Q",
+ # "B",
+]
+# Remove or reduce ignores to catch more issues
+# ignore = []
+# extend-ignore = []
+
+[tool.ruff.lint.flake8-comprehensions]
+allow-dict-calls-with-keyword-arguments = true
+
+[dependency-groups]
+dev = [
+ "ruff>=0.12.3",
+]
+
+# [tool.ruff.mccabe]
+# max-complexity = 10
diff --git a/src/conf.py b/src/conf.py
index fda72ec..ca302e1 100644
--- a/src/conf.py
+++ b/src/conf.py
@@ -10,20 +10,20 @@
# -- Project information -----------------------------------------------------
-project = 'Safety-Critical Rust Coding Guidelines'
-copyright = '2025, Contributors to Coding Guidelines Subcommittee'
-author = 'Contributors to Coding Guidelines Subcommittee'
-release = '0.1'
+project = "Safety-Critical Rust Coding Guidelines"
+copyright = "2025, Contributors to Coding Guidelines Subcommittee"
+author = "Contributors to Coding Guidelines Subcommittee"
+release = "0.1"
# -- General configuration ---------------------------------------------------
# Add sphinx-needs to extensions
extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx.ext.viewcode',
- 'sphinx.ext.autosectionlabel',
- 'sphinx_needs',
- 'coding_guidelines',
+ "sphinx.ext.autodoc",
+ "sphinx.ext.viewcode",
+ "sphinx.ext.autosectionlabel",
+ "sphinx_needs",
+ "coding_guidelines",
]
# Basic needs configuration
@@ -39,29 +39,29 @@
"title": "Guideline",
"prefix": "gui_",
"color": "#BFD8D2",
- "style": "node"
+ "style": "node",
},
{
"directive": "rationale",
"title": "Rationale",
"prefix": "rat_",
"color": "#DF744A",
- "style": "node"
+ "style": "node",
},
{
"directive": "compliant_example",
"title": "Compliant Example",
"prefix": "compl_ex_",
"color": "#729FCF",
- "style": "node"
+ "style": "node",
},
{
"directive": "non_compliant_example",
"title": "Non-Compliant Example",
"prefix": "non_compl_ex_",
"color": "#729FCF",
- "style": "node"
- }
+ "style": "node",
+ },
]
# Define custom sections for needs
@@ -71,7 +71,7 @@
"content",
"rationale",
"non_compliant_example",
- "compliant_example"
+ "compliant_example",
]
}
}
@@ -80,7 +80,11 @@
needs_render_contexts = {
"guideline": {
"content": ["content"],
- "extra_content": ["rationale", "non_compliant_example", "non_compliant_example"]
+ "extra_content": [
+ "rationale",
+ "non_compliant_example",
+ "non_compliant_example",
+ ],
}
}
@@ -89,7 +93,9 @@
needs_statuses = [
dict(name="draft", description="This guideline is in draft stage", color="#999999"),
- dict(name="approved", description="This guideline has been approved", color="#00FF00"),
+ dict(
+ name="approved", description="This guideline has been approved", color="#00FF00"
+ ),
dict(name="retired", description="This guideline is retired", color="#FF0000"),
]
@@ -104,35 +110,85 @@
needs_categories = [
dict(name="mandatory", description="This guideline is mandatory", color="#999999"),
dict(name="required", description="This guideline is required", color="#FFCC00"),
- dict(name="advisory", description="This guideline is advisory, should be followed when able", color="#FFCC00"),
- dict(name="disapplied", description="This guideline is advisory, should be followed when able", color="#FFCC00"),
+ dict(
+ name="advisory",
+ description="This guideline is advisory, should be followed when able",
+ color="#FFCC00",
+ ),
+ dict(
+ name="disapplied",
+ description="This guideline is advisory, should be followed when able",
+ color="#FFCC00",
+ ),
]
needs_decidabilities = [
- dict(name="decidable", description="This guideline can be automatically checked with tooling", color="#999999"),
- dict(name="undecidable", description="This guideline cannot be automatically checked with tooling", color="#999999"),
+ dict(
+ name="decidable",
+ description="This guideline can be automatically checked with tooling",
+ color="#999999",
+ ),
+ dict(
+ name="undecidable",
+ description="This guideline cannot be automatically checked with tooling",
+ color="#999999",
+ ),
]
needs_scopes = [
- dict(name="module", description="This guideline can be checked at the module level", color="#999999"),
- dict(name="crate", description="This guideline can be checked at the crate level", color="#FFCC00"),
- dict(name="system", description="This guideline must be checked alongside the entire source", color="#FFCC00"),
+ dict(
+ name="module",
+ description="This guideline can be checked at the module level",
+ color="#999999",
+ ),
+ dict(
+ name="crate",
+ description="This guideline can be checked at the crate level",
+ color="#FFCC00",
+ ),
+ dict(
+ name="system",
+ description="This guideline must be checked alongside the entire source",
+ color="#FFCC00",
+ ),
]
needs_releases = [
- dict(name="1.85.0", description="This guideline can be checked at the module level", color="#999999"),
- dict(name="1.85.1", description="This guideline can be checked at the module level", color="#999999"),
+ dict(
+ name="1.85.0",
+ description="This guideline can be checked at the module level",
+ color="#999999",
+ ),
+ dict(
+ name="1.85.1",
+ description="This guideline can be checked at the module level",
+ color="#999999",
+ ),
]
# Enable needs export
-needs_extra_options = ["category", "recommendation", "fls", "decidability", "scope", "release"]
+needs_extra_options = [
+ "category",
+ "recommendation",
+ "fls",
+ "decidability",
+ "scope",
+ "release",
+]
# Required guideline fields
-required_guideline_fields = ['category', 'release', 'fls', 'decidability', 'scope', 'tags'] # Id is automatically generated
+required_guideline_fields = [
+ "category",
+ "release",
+ "fls",
+ "decidability",
+ "scope",
+ "tags",
+] # Id is automatically generated
# -- Options for HTML output -------------------------------------------------
# Configure the theme
-html_theme = 'sphinx_rtd_theme'
-html_static_path = ['_static']
+html_theme = "sphinx_rtd_theme"
+html_static_path = ["_static"]
diff --git a/uv.lock b/uv.lock
index 98f7c01..e54e145 100644
--- a/uv.lock
+++ b/uv.lock
@@ -347,6 +347,31 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/68/15/6d22d07e063ce5e9bfbd96db9ec2fbb4693591b4503e3a76996639474d02/rpds_py-0.23.1-cp313-cp313t-win_amd64.whl", hash = "sha256:d6f6512a90bd5cd9030a6237f5346f046c6f0e40af98657568fa45695d4de59d", size = 235415 },
]
+[[package]]
+name = "ruff"
+version = "0.12.3"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/c3/2a/43955b530c49684d3c38fcda18c43caf91e99204c2a065552528e0552d4f/ruff-0.12.3.tar.gz", hash = "sha256:f1b5a4b6668fd7b7ea3697d8d98857390b40c1320a63a178eee6be0899ea2d77", size = 4459341 }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/e2/fd/b44c5115539de0d598d75232a1cc7201430b6891808df111b8b0506aae43/ruff-0.12.3-py3-none-linux_armv6l.whl", hash = "sha256:47552138f7206454eaf0c4fe827e546e9ddac62c2a3d2585ca54d29a890137a2", size = 10430499 },
+ { url = "https://files.pythonhosted.org/packages/43/c5/9eba4f337970d7f639a37077be067e4ec80a2ad359e4cc6c5b56805cbc66/ruff-0.12.3-py3-none-macosx_10_12_x86_64.whl", hash = "sha256:0a9153b000c6fe169bb307f5bd1b691221c4286c133407b8827c406a55282041", size = 11213413 },
+ { url = "https://files.pythonhosted.org/packages/e2/2c/fac3016236cf1fe0bdc8e5de4f24c76ce53c6dd9b5f350d902549b7719b2/ruff-0.12.3-py3-none-macosx_11_0_arm64.whl", hash = "sha256:fa6b24600cf3b750e48ddb6057e901dd5b9aa426e316addb2a1af185a7509882", size = 10586941 },
+ { url = "https://files.pythonhosted.org/packages/c5/0f/41fec224e9dfa49a139f0b402ad6f5d53696ba1800e0f77b279d55210ca9/ruff-0.12.3-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2506961bf6ead54887ba3562604d69cb430f59b42133d36976421bc8bd45901", size = 10783001 },
+ { url = "https://files.pythonhosted.org/packages/0d/ca/dd64a9ce56d9ed6cad109606ac014860b1c217c883e93bf61536400ba107/ruff-0.12.3-py3-none-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:c4faaff1f90cea9d3033cbbcdf1acf5d7fb11d8180758feb31337391691f3df0", size = 10269641 },
+ { url = "https://files.pythonhosted.org/packages/63/5c/2be545034c6bd5ce5bb740ced3e7014d7916f4c445974be11d2a406d5088/ruff-0.12.3-py3-none-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:40dced4a79d7c264389de1c59467d5d5cefd79e7e06d1dfa2c75497b5269a5a6", size = 11875059 },
+ { url = "https://files.pythonhosted.org/packages/8e/d4/a74ef1e801ceb5855e9527dae105eaff136afcb9cc4d2056d44feb0e4792/ruff-0.12.3-py3-none-manylinux_2_17_ppc64.manylinux2014_ppc64.whl", hash = "sha256:0262d50ba2767ed0fe212aa7e62112a1dcbfd46b858c5bf7bbd11f326998bafc", size = 12658890 },
+ { url = "https://files.pythonhosted.org/packages/13/c8/1057916416de02e6d7c9bcd550868a49b72df94e3cca0aeb77457dcd9644/ruff-0.12.3-py3-none-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:12371aec33e1a3758597c5c631bae9a5286f3c963bdfb4d17acdd2d395406687", size = 12232008 },
+ { url = "https://files.pythonhosted.org/packages/f5/59/4f7c130cc25220392051fadfe15f63ed70001487eca21d1796db46cbcc04/ruff-0.12.3-py3-none-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:560f13b6baa49785665276c963edc363f8ad4b4fc910a883e2625bdb14a83a9e", size = 11499096 },
+ { url = "https://files.pythonhosted.org/packages/d4/01/a0ad24a5d2ed6be03a312e30d32d4e3904bfdbc1cdbe63c47be9d0e82c79/ruff-0.12.3-py3-none-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:023040a3499f6f974ae9091bcdd0385dd9e9eb4942f231c23c57708147b06311", size = 11688307 },
+ { url = "https://files.pythonhosted.org/packages/93/72/08f9e826085b1f57c9a0226e48acb27643ff19b61516a34c6cab9d6ff3fa/ruff-0.12.3-py3-none-musllinux_1_2_aarch64.whl", hash = "sha256:883d844967bffff5ab28bba1a4d246c1a1b2933f48cb9840f3fdc5111c603b07", size = 10661020 },
+ { url = "https://files.pythonhosted.org/packages/80/a0/68da1250d12893466c78e54b4a0ff381370a33d848804bb51279367fc688/ruff-0.12.3-py3-none-musllinux_1_2_armv7l.whl", hash = "sha256:2120d3aa855ff385e0e562fdee14d564c9675edbe41625c87eeab744a7830d12", size = 10246300 },
+ { url = "https://files.pythonhosted.org/packages/6a/22/5f0093d556403e04b6fd0984fc0fb32fbb6f6ce116828fd54306a946f444/ruff-0.12.3-py3-none-musllinux_1_2_i686.whl", hash = "sha256:6b16647cbb470eaf4750d27dddc6ebf7758b918887b56d39e9c22cce2049082b", size = 11263119 },
+ { url = "https://files.pythonhosted.org/packages/92/c9/f4c0b69bdaffb9968ba40dd5fa7df354ae0c73d01f988601d8fac0c639b1/ruff-0.12.3-py3-none-musllinux_1_2_x86_64.whl", hash = "sha256:e1417051edb436230023575b149e8ff843a324557fe0a265863b7602df86722f", size = 11746990 },
+ { url = "https://files.pythonhosted.org/packages/fe/84/7cc7bd73924ee6be4724be0db5414a4a2ed82d06b30827342315a1be9e9c/ruff-0.12.3-py3-none-win32.whl", hash = "sha256:dfd45e6e926deb6409d0616078a666ebce93e55e07f0fb0228d4b2608b2c248d", size = 10589263 },
+ { url = "https://files.pythonhosted.org/packages/07/87/c070f5f027bd81f3efee7d14cb4d84067ecf67a3a8efb43aadfc72aa79a6/ruff-0.12.3-py3-none-win_amd64.whl", hash = "sha256:a946cf1e7ba3209bdef039eb97647f1c77f6f540e5845ec9c114d3af8df873e7", size = 11695072 },
+ { url = "https://files.pythonhosted.org/packages/e0/30/f3eaf6563c637b6e66238ed6535f6775480db973c836336e4122161986fc/ruff-0.12.3-py3-none-win_arm64.whl", hash = "sha256:5f9c7c9c8f84c2d7f27e93674d27136fbf489720251544c4da7fb3d742e011b1", size = 10805855 },
+]
+
[[package]]
name = "safety-critical-rust-coding-guidelines"
version = "0.1.0"
@@ -360,6 +385,11 @@ dependencies = [
{ name = "tqdm" },
]
+[package.dev-dependencies]
+dev = [
+ { name = "ruff" },
+]
+
[package.metadata]
requires-dist = [
{ name = "builder", virtual = "builder" },
@@ -370,6 +400,9 @@ requires-dist = [
{ name = "tqdm" },
]
+[package.metadata.requires-dev]
+dev = [{ name = "ruff", specifier = ">=0.12.3" }]
+
[[package]]
name = "sniffio"
version = "1.3.1"