diff --git a/.github/workflows/auto-tag-and-release.yml b/.github/workflows/auto-tag-and-release.yml new file mode 100644 index 0000000000..969c124dc0 --- /dev/null +++ b/.github/workflows/auto-tag-and-release.yml @@ -0,0 +1,98 @@ +name: Auto Tag and Release + +on: + pull_request: + types: [closed] + branches: + - main + +jobs: + auto-tag-and-release: + # Only run if the PR was merged and has the version-bump label + if: github.event.pull_request.merged == true && contains(github.event.pull_request.labels.*.name, 'version-bump') + runs-on: ubuntu-latest + permissions: + contents: write + actions: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + ref: main + fetch-depth: 0 + + - name: Get version from version.txt + id: get-version + run: | + VERSION=$(cat version.txt | tr -d '[:space:]') + echo "version=${VERSION}" >> $GITHUB_OUTPUT + echo "tag=v${VERSION}" >> $GITHUB_OUTPUT + echo "Version from version.txt: ${VERSION}" + echo "Will create tag: v${VERSION}" + + - name: Check if tag already exists + id: check-tag + run: | + TAG="v${{ steps.get-version.outputs.version }}" + if git rev-parse "$TAG" >/dev/null 2>&1; then + echo "exists=true" >> $GITHUB_OUTPUT + echo "⚠️ Tag $TAG already exists" + else + echo "exists=false" >> $GITHUB_OUTPUT + echo "✓ Tag $TAG does not exist yet" + fi + + - name: Create and push tag + if: steps.check-tag.outputs.exists == 'false' + env: + GH_TOKEN: ${{ github.token }} + run: | + TAG="v${{ steps.get-version.outputs.version }}" + + # Configure git + git config --local user.name "github-actions[bot]" + git config --local user.email "41898282+github-actions[bot]@users.noreply.github.com" + + # Create annotated tag + git tag -a "$TAG" -m "Release $TAG" + + # Push tag + git push origin "$TAG" + + echo "✅ Created and pushed tag: $TAG" + + - name: Trigger release workflow + if: steps.check-tag.outputs.exists == 'false' + env: + GH_TOKEN: ${{ secrets.FLASHINFER_BOT_TOKEN }} + run: | + TAG="v${{ steps.get-version.outputs.version }}" + + # Trigger the release workflow + gh workflow run release.yml \ + -f tag="$TAG" + + echo "✅ Triggered release workflow for tag: $TAG" + + - name: Summary + run: | + echo "## Auto Tag and Release Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ "${{ steps.check-tag.outputs.exists }}" = "true" ]; then + echo "⚠️ Tag v${{ steps.get-version.outputs.version }} already exists, skipped" >> $GITHUB_STEP_SUMMARY + else + echo "✅ Successfully created tag and triggered release workflow" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Version**: ${{ steps.get-version.outputs.version }}" >> $GITHUB_STEP_SUMMARY + echo "- **Tag**: v${{ steps.get-version.outputs.version }}" >> $GITHUB_STEP_SUMMARY + echo "- **PR**: #${{ github.event.pull_request.number }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "🔗 [View Release Workflow Runs](../../actions/workflows/release.yml)" >> $GITHUB_STEP_SUMMARY + fi + + - name: Tag already exists + if: steps.check-tag.outputs.exists == 'true' + run: | + echo "Tag v${{ steps.get-version.outputs.version }} already exists, skipping tag creation and release" + exit 0 diff --git a/.github/workflows/bump-version.yml b/.github/workflows/bump-version.yml new file mode 100644 index 0000000000..060e2baa09 --- /dev/null +++ b/.github/workflows/bump-version.yml @@ -0,0 +1,243 @@ +name: Bump Version + +on: + workflow_dispatch: + inputs: + force_bump_type: + description: 'Force a specific bump type (leave empty for AI auto-detection)' + required: false + type: choice + options: + - '' + - major + - minor + - patch + ai_provider: + description: 'AI provider to use for analysis (default: gemini)' + required: false + type: choice + default: 'gemini' + options: + - gemini + - openai + - anthropic + schedule: + # Run weekly on Monday at 9:00 AM UTC + - cron: '0 9 * * 1' + +jobs: + analyze-and-bump: + runs-on: ubuntu-latest + permissions: + contents: write + pull-requests: write + steps: + - name: Checkout code + uses: actions/checkout@v4 + with: + fetch-depth: 0 # Need full history to analyze commits + submodules: false + + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: '3.10' + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install google-generativeai openai anthropic + + - name: Get current version + id: current-version + run: | + CURRENT_VERSION=$(cat version.txt | tr -d '[:space:]') + echo "version=${CURRENT_VERSION}" >> $GITHUB_OUTPUT + echo "Current version: ${CURRENT_VERSION}" + + - name: Check for existing bump PR + id: check-existing-pr + env: + GH_TOKEN: ${{ github.token }} + run: | + # Check if there's already an open PR for version bump + EXISTING_PR=$(gh pr list --state open --label "version-bump" --json number --jq '.[0].number' || echo "") + if [ -n "$EXISTING_PR" ]; then + echo "has_existing_pr=true" >> $GITHUB_OUTPUT + echo "existing_pr_number=${EXISTING_PR}" >> $GITHUB_OUTPUT + echo "⚠️ Existing version bump PR found: #${EXISTING_PR}" + else + echo "has_existing_pr=false" >> $GITHUB_OUTPUT + echo "✓ No existing version bump PR found" + fi + + - name: Analyze commits with AI + id: analyze + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' + env: + GEMINI_API_KEY: ${{ secrets.GEMINI_API_KEY }} + OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }} + ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }} + run: | + # Determine which AI provider to use + # Priority: manual input > default (gemini) + if [ -n "${{ inputs.ai_provider }}" ]; then + AI_PROVIDER="${{ inputs.ai_provider }}" + else + AI_PROVIDER="gemini" + fi + + echo "Using AI provider: ${AI_PROVIDER}" + echo "Analyzing commits to determine version bump..." + + # Run AI analysis with specified provider + python scripts/ai_determine_version_bump.py \ + --provider ${AI_PROVIDER} \ + --output-format json \ + --verbose > /tmp/analysis.json 2>&1 + + # Extract results + BUMP_TYPE=$(jq -r '.bump_type' /tmp/analysis.json) + NEW_VERSION=$(jq -r '.new_version' /tmp/analysis.json) + REASONING=$(jq -r '.reasoning' /tmp/analysis.json) + + echo "bump_type=${BUMP_TYPE}" >> $GITHUB_OUTPUT + echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT + echo "ai_provider=${AI_PROVIDER}" >> $GITHUB_OUTPUT + + # Save reasoning and key changes for PR body + jq -r '.reasoning' /tmp/analysis.json > /tmp/reasoning.txt + jq -r '.key_changes[]' /tmp/analysis.json > /tmp/key_changes.txt || echo "" > /tmp/key_changes.txt + + echo "AI Analysis Result (${AI_PROVIDER}):" + echo " Bump type: ${BUMP_TYPE}" + echo " New version: ${NEW_VERSION}" + echo " Reasoning: ${REASONING}" + + - name: Override with manual input + id: final-decision + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' + run: | + # Use manual input if provided, otherwise use AI result + if [ -n "${{ inputs.force_bump_type }}" ]; then + BUMP_TYPE="${{ inputs.force_bump_type }}" + echo "Using manual bump type: ${BUMP_TYPE}" + + # Calculate new version + CURRENT_VERSION="${{ steps.current-version.outputs.version }}" + if [ "$BUMP_TYPE" = "major" ]; then + MAJOR=$(echo $CURRENT_VERSION | cut -d'.' -f1) + NEW_VERSION="$((MAJOR + 1)).0.0" + elif [ "$BUMP_TYPE" = "minor" ]; then + MAJOR=$(echo $CURRENT_VERSION | cut -d'.' -f1) + MINOR=$(echo $CURRENT_VERSION | cut -d'.' -f2) + NEW_VERSION="${MAJOR}.$((MINOR + 1)).0" + elif [ "$BUMP_TYPE" = "patch" ]; then + MAJOR=$(echo $CURRENT_VERSION | cut -d'.' -f1) + MINOR=$(echo $CURRENT_VERSION | cut -d'.' -f2) + PATCH=$(echo $CURRENT_VERSION | cut -d'.' -f3 | sed 's/[^0-9].*//') + NEW_VERSION="${MAJOR}.${MINOR}.$((PATCH + 1))" + fi + + echo "Manual override - reasoning" > /tmp/reasoning.txt + else + BUMP_TYPE="${{ steps.analyze.outputs.bump_type }}" + NEW_VERSION="${{ steps.analyze.outputs.new_version }}" + echo "Using AI-determined bump type: ${BUMP_TYPE}" + fi + + echo "bump_type=${BUMP_TYPE}" >> $GITHUB_OUTPUT + echo "new_version=${NEW_VERSION}" >> $GITHUB_OUTPUT + + echo "Final decision:" + echo " Bump type: ${BUMP_TYPE}" + echo " New version: ${NEW_VERSION}" + + - name: Check if bump needed + id: check-bump + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' + run: | + BUMP_TYPE="${{ steps.final-decision.outputs.bump_type }}" + + if [ "$BUMP_TYPE" = "none" ]; then + echo "needs_bump=false" >> $GITHUB_OUTPUT + echo "ℹ️ No version bump needed" + else + echo "needs_bump=true" >> $GITHUB_OUTPUT + echo "✓ Version bump needed: ${BUMP_TYPE}" + fi + + - name: Update version.txt + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' && steps.check-bump.outputs.needs_bump == 'true' + run: | + NEW_VERSION="${{ steps.final-decision.outputs.new_version }}" + echo "${NEW_VERSION}" > version.txt + echo "✓ Updated version.txt to ${NEW_VERSION}" + + - name: Generate PR body + id: pr-body + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' && steps.check-bump.outputs.needs_bump == 'true' + run: | + python scripts/generate_bump_pr_body.py \ + --current-version "${{ steps.current-version.outputs.version }}" \ + --new-version "${{ steps.final-decision.outputs.new_version }}" \ + --bump-type "${{ steps.final-decision.outputs.bump_type }}" \ + --ai-provider "${{ steps.analyze.outputs.ai_provider }}" \ + --reasoning-file /tmp/reasoning.txt \ + --key-changes-file /tmp/key_changes.txt \ + --run-id "${{ github.run_id }}" \ + --output /tmp/pr_body.md + + echo "Generated PR body:" + cat /tmp/pr_body.md + + - name: Create Pull Request + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' && steps.check-bump.outputs.needs_bump == 'true' + uses: peter-evans/create-pull-request@v6 + with: + token: ${{ secrets.FLASHINFER_BOT_TOKEN }} + commit-message: "release: bump version to ${{ steps.final-decision.outputs.new_version }}" + title: "Release: Bump version to v${{ steps.final-decision.outputs.new_version }}" + body-path: /tmp/pr_body.md + branch: bump-version-${{ steps.final-decision.outputs.new_version }} + delete-branch: true + labels: | + version-bump + automated + release + committer: flashinfer-bot + author: flashinfer-bot + + - name: Summary + if: steps.check-existing-pr.outputs.has_existing_pr == 'false' + run: | + echo "## Version Bump Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + + if [ "${{ steps.check-bump.outputs.needs_bump }}" = "true" ]; then + echo "✅ Version bump PR created" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "- **Current version**: ${{ steps.current-version.outputs.version }}" >> $GITHUB_STEP_SUMMARY + echo "- **New version**: ${{ steps.final-decision.outputs.new_version }}" >> $GITHUB_STEP_SUMMARY + echo "- **Bump type**: ${{ steps.final-decision.outputs.bump_type }}" >> $GITHUB_STEP_SUMMARY + + # Add AI provider info if available + if [ -n "${{ steps.analyze.outputs.ai_provider }}" ]; then + echo "- **AI provider**: ${{ steps.analyze.outputs.ai_provider }}" >> $GITHUB_STEP_SUMMARY + fi + else + echo "ℹ️ No version bump needed" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "No significant changes detected since the last release." >> $GITHUB_STEP_SUMMARY + fi + + - name: Already has open PR + if: steps.check-existing-pr.outputs.has_existing_pr == 'true' + run: | + echo "## Version Bump Summary" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "⚠️ A version bump PR already exists: #${{ steps.check-existing-pr.outputs.existing_pr_number }}" >> $GITHUB_STEP_SUMMARY + echo "" >> $GITHUB_STEP_SUMMARY + echo "Please review and merge the existing PR before creating a new one." >> $GITHUB_STEP_SUMMARY + + exit 0 diff --git a/scripts/ai_determine_version_bump.py b/scripts/ai_determine_version_bump.py new file mode 100644 index 0000000000..5edff6670c --- /dev/null +++ b/scripts/ai_determine_version_bump.py @@ -0,0 +1,514 @@ +#!/usr/bin/env python3 +""" +Use AI (Gemini, Claude, or OpenAI) to analyze git commits and determine semantic version bump type. + +According to CONTRIBUTING.md: +- major increment: incompatible API changes +- minor increment: added functionality that is backwards-compatible +- patch increment: backwards-compatible bug fixes + +Requires one of the following environment variables to be set: +- GEMINI_API_KEY for Google Gemini +- ANTHROPIC_API_KEY for Claude +- OPENAI_API_KEY for OpenAI + +Optional environment variables: +- OPENAI_MODEL (default: gpt-4o) - specify which OpenAI model to use +- CLAUDE_MODEL (default: claude-3-5-sonnet-20241022) - specify which Claude model to use +- GEMINI_MODEL (default: gemini-2.0-flash-exp) - specify which Gemini model to use + +The script will try providers in order: OpenAI -> Claude -> Gemini -> Fallback + +Install: pip install openai anthropic google-generativeai +""" + +import argparse +import json +import os +import re +import subprocess +import sys +from typing import Tuple + + +def get_latest_tag() -> str: + """Get the latest git tag.""" + try: + result = subprocess.run( + ["git", "tag", "--sort=-v:refname"], + capture_output=True, + text=True, + check=True, + ) + tags = [ + line.strip() for line in result.stdout.strip().split("\n") if line.strip() + ] + if tags: + return tags[0] + return "" + except subprocess.CalledProcessError: + return "" + + +def get_commits_since_tag(tag: str) -> list[dict]: + """Get commit messages and diffs since the given tag.""" + try: + if tag: + cmd = [ + "git", + "log", + f"{tag}..HEAD", + "--pretty=format:%H|||%s|||%b", + ] + else: + cmd = ["git", "log", "--pretty=format:%H|||%s|||%b"] + + result = subprocess.run(cmd, capture_output=True, text=True, check=True) + + commits = [] + for line in result.stdout.strip().split("\n"): + if not line.strip(): + continue + parts = line.split("|||") + if len(parts) >= 2: + commit_hash = parts[0] + subject = parts[1] + body = parts[2] if len(parts) > 2 else "" + + # Get file changes for this commit + diff_cmd = ["git", "diff", f"{commit_hash}^..{commit_hash}", "--stat"] + diff_result = subprocess.run(diff_cmd, capture_output=True, text=True) + files_changed = diff_result.stdout.strip() + + commits.append( + { + "hash": commit_hash[:7], + "subject": subject, + "body": body, + "files_changed": files_changed, + } + ) + + return commits + except subprocess.CalledProcessError: + return [] + + +def build_analysis_prompt(commits_summary: str, current_version: str) -> str: + """Build the AI analysis prompt (shared by all AI providers).""" + return f"""You are analyzing git commits for a CUDA kernel library called FlashInfer to determine the appropriate semantic version bump. + +Current version: {current_version} + +Versioning rules for this project (from CONTRIBUTING.md): +FlashInfer follows a "right-shifted" versioning scheme (major.minor.patch[.post1]): +- MAJOR increment: architectural milestones and/or incompatible API changes (breaking changes to public APIs), similar to PyTorch 2.0 +- MINOR increment: significant backwards-compatible new features (major functionality additions) +- PATCH increment: small backwards-compatible features (e.g. new kernels, new SM support, etc.) and backwards-compatible bug fixes +- POST (e.g. .post1): optional suffix for quick follow-up release with just backwards-compatible bug fixes (not used in this analysis) + +Here are the commits since the last release: + +{commits_summary} + +Please analyze these commits and determine: +1. Whether there are any breaking API changes or architectural milestones (MAJOR bump needed) +2. Whether there are significant new features (MINOR bump needed) +3. Whether there are small features or bug fixes (PATCH bump needed) +4. If no significant changes, return "none" + +Respond in JSON format: +{{ + "bump_type": "major|minor|patch|none", + "reasoning": "Detailed explanation of your decision", + "key_changes": ["list of most important changes that influenced the decision"] +}} + +Important considerations: +- New kernel implementations, new SM support, performance improvements are PATCH-level changes +- MINOR bumps are for significant/major feature additions only, not incremental improvements +- Internal refactoring, test updates, documentation changes alone don't warrant a version bump +- API signature changes or removed functionality are MAJOR bumps +- Focus on changes that affect users of the library, not internal changes +""" + + +def extract_json_from_response(text: str) -> str: + """Extract JSON from response that might be wrapped in markdown code blocks.""" + json_match = re.search(r"```(?:json)?\s*(\{.*?\})\s*```", text, re.DOTALL) + if json_match: + return json_match.group(1) + return text + + +def analyze_with_openai(commits_summary: str, current_version: str) -> dict: + """Use OpenAI to analyze commits.""" + api_key = os.getenv("OPENAI_API_KEY") + if not api_key: + raise ValueError("OPENAI_API_KEY not set") + + model = os.getenv("OPENAI_MODEL", "gpt-4o") + + from openai import OpenAI + + client = OpenAI(api_key=api_key) + prompt = build_analysis_prompt(commits_summary, current_version) + + response = client.chat.completions.create( + model=model, + messages=[ + { + "role": "system", + "content": "You are a helpful assistant that analyzes git commits to determine semantic version bumps. Always respond with valid JSON.", + }, + {"role": "user", "content": prompt}, + ], + temperature=0.3, + response_format={"type": "json_object"}, + ) + + result_text = response.choices[0].message.content.strip() + return json.loads(result_text) + + +def analyze_with_claude(commits_summary: str, current_version: str) -> dict: + """Use Anthropic Claude to analyze commits.""" + api_key = os.getenv("ANTHROPIC_API_KEY") + if not api_key: + raise ValueError("ANTHROPIC_API_KEY not set") + + model = os.getenv("CLAUDE_MODEL", "claude-3-5-sonnet-20241022") + + from anthropic import Anthropic + + client = Anthropic(api_key=api_key) + prompt = build_analysis_prompt(commits_summary, current_version) + + response = client.messages.create( + model=model, + max_tokens=2048, + temperature=0.3, + messages=[{"role": "user", "content": prompt}], + ) + + result_text = response.content[0].text.strip() + result_text = extract_json_from_response(result_text) + + return json.loads(result_text) + + +def analyze_with_gemini(commits_summary: str, current_version: str) -> dict: + """Use Google Gemini to analyze commits.""" + api_key = os.getenv("GEMINI_API_KEY") + if not api_key: + raise ValueError("GEMINI_API_KEY not set") + + model_name = os.getenv("GEMINI_MODEL", "gemini-2.0-flash-exp") + + import google.generativeai as genai + + genai.configure(api_key=api_key) + + prompt = build_analysis_prompt(commits_summary, current_version) + + model = genai.GenerativeModel(model_name) + + response = model.generate_content( + prompt, + generation_config=genai.types.GenerationConfig( + temperature=0.3, + ), + ) + + result_text = response.text.strip() + result_text = extract_json_from_response(result_text) + + return json.loads(result_text) + + +def try_ai_provider( + provider_name: str, + analyzer_func, + commits_summary: str, + current_version: str, + model_env_var: str, + default_model: str, + install_package: str, +): + """ + Try to use an AI provider with standardized error handling. + + Returns: (success: bool, result: dict or None) + """ + try: + print(f"Trying {provider_name}...", file=sys.stderr) + result = analyzer_func(commits_summary, current_version) + model = os.getenv(model_env_var, default_model) + print(f"Successfully used {provider_name} (model: {model})", file=sys.stderr) + return True, result + except ImportError: + print( + f"{provider_name} package not installed. Install with: pip install {install_package}", + file=sys.stderr, + ) + except ValueError as e: + print(f"{provider_name} not available: {e}", file=sys.stderr) + except Exception as e: + print(f"Error calling {provider_name} API: {e}", file=sys.stderr) + + return False, None + + +def analyze_with_ai(commits: list[dict], current_version: str) -> dict: + """ + Use AI to analyze commits and determine version bump. + + Tries providers in order: OpenAI -> Claude -> Gemini -> Fallback + """ + # Prepare the commits summary once + commits_summary = "\n\n".join( + [ + f"Commit {c['hash']}:\n" + f"Subject: {c['subject']}\n" + f"Body: {c['body']}\n" + f"Files changed:\n{c['files_changed'][:500]}" # Limit file changes to avoid token limit + for c in commits + ] + ) + + # Define AI providers to try in order + providers = [ + ("OpenAI", analyze_with_openai, "OPENAI_MODEL", "gpt-4o", "openai"), + ( + "Anthropic Claude", + analyze_with_claude, + "CLAUDE_MODEL", + "claude-3-5-sonnet-20241022", + "anthropic", + ), + ( + "Google Gemini", + analyze_with_gemini, + "GEMINI_MODEL", + "gemini-2.0-flash-exp", + "google-generativeai", + ), + ] + + # Try each provider in order + for name, analyzer, model_env, default_model, package in providers: + success, result = try_ai_provider( + name, + analyzer, + commits_summary, + current_version, + model_env, + default_model, + package, + ) + if success: + return result + + # Fallback to basic analysis + print( + "Warning: No AI providers available, falling back to basic analysis", + file=sys.stderr, + ) + return fallback_analysis(commits) + + +def fallback_analysis(commits: list[dict]) -> dict: + """ + Fallback analysis using simple keyword matching. + """ + has_major = False + has_minor = False + has_patch = False + key_changes = [] + + for commit in commits: + text = (commit["subject"] + " " + commit["body"]).lower() + + # Skip version bump commits + if re.search(r"bump version|release.*v?\d+\.\d+\.\d+", text): + continue + + # Check for breaking changes + if any( + keyword in text + for keyword in [ + "breaking change", + "break:", + "breaking:", + "!:", + "incompatible", + ] + ): + has_major = True + key_changes.append(f"Breaking change: {commit['subject']}") + + # Check for features + elif any( + keyword in text + for keyword in ["feat:", "feature:", "add ", "implement", "support "] + ): + has_minor = True + key_changes.append(f"New feature: {commit['subject']}") + + # Check for fixes + elif any(keyword in text for keyword in ["fix:", "bugfix:", "fix ", "fixes "]): + has_patch = True + key_changes.append(f"Bug fix: {commit['subject']}") + + if has_major: + bump_type = "major" + reasoning = "Detected breaking changes in commits" + elif has_minor: + bump_type = "minor" + reasoning = "Detected new features without breaking changes" + elif has_patch: + bump_type = "patch" + reasoning = "Detected bug fixes only" + else: + bump_type = "none" + reasoning = "No significant changes detected (chore, docs, tests only)" + + return { + "bump_type": bump_type, + "reasoning": reasoning, + "key_changes": key_changes[:10], # Limit to top 10 + } + + +def parse_version(version_str: str) -> Tuple[int, int, int, str]: + """Parse version string like '0.4.1' or '0.4.0rc1'.""" + version_str = version_str.lstrip("v") + match = re.match(r"^(\d+)\.(\d+)\.(\d+)(.*)$", version_str) + if not match: + raise ValueError(f"Invalid version format: {version_str}") + + major, minor, patch, suffix = match.groups() + return int(major), int(minor), int(patch), suffix + + +def bump_version(current_version: str, bump_type: str) -> str: + """Bump the version according to semantic versioning.""" + major, minor, patch, suffix = parse_version(current_version) + + if bump_type == "major": + return f"{major + 1}.0.0" + elif bump_type == "minor": + return f"{major}.{minor + 1}.0" + elif bump_type == "patch": + return f"{major}.{minor}.{patch + 1}" + else: + raise ValueError(f"Invalid bump type: {bump_type}") + + +def main(): + parser = argparse.ArgumentParser( + description="Use AI to determine semantic version bump type based on git commits" + ) + parser.add_argument( + "--current-version", + help="Current version (if not provided, will read from version.txt)", + ) + parser.add_argument( + "--since-tag", + help="Analyze commits since this tag (if not provided, uses latest tag)", + ) + parser.add_argument( + "--output-format", + choices=["simple", "json"], + default="simple", + help="Output format", + ) + parser.add_argument( + "--verbose", + action="store_true", + help="Print detailed analysis", + ) + + args = parser.parse_args() + + # Get current version + if args.current_version: + current_version = args.current_version + else: + try: + with open("version.txt", "r") as f: + current_version = f.read().strip() + except FileNotFoundError: + print( + "Error: version.txt not found and --current-version not provided", + file=sys.stderr, + ) + sys.exit(1) + + # Get commits to analyze + if args.since_tag: + tag = args.since_tag + else: + tag = get_latest_tag() + + if args.verbose: + print(f"Current version: {current_version}", file=sys.stderr) + print( + f"Analyzing commits since: {tag if tag else 'beginning'}", file=sys.stderr + ) + + commits = get_commits_since_tag(tag) + + if not commits: + if args.verbose: + print("No commits to analyze", file=sys.stderr) + + if args.output_format == "json": + print(json.dumps({"bump_type": "none", "new_version": current_version})) + else: + print("none") + sys.exit(0) + + if args.verbose: + print(f"\nAnalyzing {len(commits)} commits with AI...", file=sys.stderr) + + # Use AI to analyze + result = analyze_with_ai(commits, current_version) + + bump_type = result.get("bump_type", "none") + reasoning = result.get("reasoning", "") + key_changes = result.get("key_changes", []) + + # Calculate new version + if bump_type == "none": + new_version = current_version + else: + new_version = bump_version(current_version, bump_type) + + if args.verbose: + print("\n=== AI Analysis Result ===", file=sys.stderr) + print(f"Bump type: {bump_type}", file=sys.stderr) + print(f"New version: {new_version}", file=sys.stderr) + print(f"\nReasoning: {reasoning}", file=sys.stderr) + if key_changes: + print("\nKey changes:", file=sys.stderr) + for change in key_changes: + print(f" - {change}", file=sys.stderr) + + # Output result + if args.output_format == "json": + output = { + "bump_type": bump_type, + "current_version": current_version, + "new_version": new_version, + "reasoning": reasoning, + "key_changes": key_changes, + } + print(json.dumps(output, indent=2)) + else: + # Simple format: "bump_type new_version" + print(f"{bump_type} {new_version}") + + +if __name__ == "__main__": + main() diff --git a/scripts/generate_bump_pr_body.py b/scripts/generate_bump_pr_body.py new file mode 100644 index 0000000000..7edfe0a0c6 --- /dev/null +++ b/scripts/generate_bump_pr_body.py @@ -0,0 +1,168 @@ +#!/usr/bin/env python3 +"""Generate PR body for version bump pull requests.""" + +import argparse +import sys +from pathlib import Path + + +def read_file_safe(filepath: Path) -> str: + """Read file contents safely, returning empty string if file doesn't exist.""" + try: + return filepath.read_text().strip() + except FileNotFoundError: + return "" + + +def generate_pr_body( + current_version: str, + new_version: str, + bump_type: str, + ai_provider: str = None, + reasoning_file: Path = None, + key_changes_file: Path = None, + run_id: str = None, +) -> str: + """Generate the PR body markdown text. + + Args: + current_version: Current version string (e.g., "2.1.4") + new_version: New version string (e.g., "2.2.0") + bump_type: Type of version bump ("major", "minor", "patch") + ai_provider: Optional AI provider name used for analysis + reasoning_file: Optional path to file containing AI reasoning + key_changes_file: Optional path to file containing key changes (one per line) + run_id: Optional GitHub Actions run ID + + Returns: + Formatted PR body as markdown string + """ + bump_type_upper = bump_type.upper() + + # Build AI provider text + ai_provider_text = f" (using {ai_provider})" if ai_provider else "" + + # Read reasoning + reasoning = "Manual version bump" + if reasoning_file: + reasoning = read_file_safe(reasoning_file) or reasoning + + # Start building PR body + lines = [ + f"## Version Bump: v{current_version} -> v{new_version}", + "", + f"This PR bumps the version from **v{current_version}** to **v{new_version}** ({bump_type_upper} bump).", + "", + f"### AI Analysis{ai_provider_text}", + "", + reasoning, + "", + ] + + # Add key changes if available + if key_changes_file: + key_changes_text = read_file_safe(key_changes_file) + if key_changes_text: + lines.extend( + [ + "### Key Changes", + "", + ] + ) + for change in key_changes_text.split("\n"): + change = change.strip() + if change: + lines.append(f"- {change}") + lines.append("") + + # Add semantic versioning rules + lines.extend( + [ + "### Semantic Versioning Rules", + "", + "According to our [CONTRIBUTING.md](../blob/main/CONTRIBUTING.md):", + "", + "- **Major**: Incompatible API changes", + "- **Minor**: Added functionality that is backwards-compatible", + "- **Patch**: Backwards-compatible bug fixes", + "", + "### Next Steps", + "", + "After merging this PR:", + "- The [auto-tag-and-release workflow](../actions/workflows/auto-tag-and-release.yml) will automatically:", + f" 1. Create git tag `v{new_version}` on the main branch", + " 2. Trigger the [release workflow](../actions/workflows/release.yml) to build and publish the release", + "", + "---", + "", + ] + ) + + # Add auto-generated footer + if run_id: + lines.append( + f"🤖 Auto-generated by [bump-version workflow](../actions/runs/{run_id})" + ) + else: + lines.append("🤖 Auto-generated by bump-version workflow") + + return "\n".join(lines) + + +def main(): + parser = argparse.ArgumentParser( + description="Generate PR body for version bump pull requests" + ) + parser.add_argument( + "--current-version", required=True, help="Current version (e.g., 2.1.4)" + ) + parser.add_argument( + "--new-version", required=True, help="New version (e.g., 2.2.0)" + ) + parser.add_argument( + "--bump-type", + required=True, + choices=["major", "minor", "patch"], + help="Type of version bump", + ) + parser.add_argument( + "--ai-provider", help="AI provider used for analysis (optional)" + ) + parser.add_argument( + "--reasoning-file", type=Path, help="Path to file containing AI reasoning" + ) + parser.add_argument( + "--key-changes-file", + type=Path, + help="Path to file containing key changes (one per line)", + ) + parser.add_argument("--run-id", help="GitHub Actions run ID") + parser.add_argument( + "--output", type=Path, help="Output file path (default: stdout)" + ) + + args = parser.parse_args() + + # Generate PR body + pr_body = generate_pr_body( + current_version=args.current_version, + new_version=args.new_version, + bump_type=args.bump_type, + ai_provider=args.ai_provider, + reasoning_file=args.reasoning_file, + key_changes_file=args.key_changes_file, + run_id=args.run_id, + ) + + # Write output + if args.output: + args.output.write_text(pr_body) + print(f"PR body written to {args.output}", file=sys.stderr) + else: + print(pr_body) + + return 0 + + +if __name__ == "__main__": + sys.exit(main())