|
| 1 | +const fs = require('node:fs'); |
| 2 | +const path = require('node:path'); |
| 3 | + |
| 4 | +// Configuration |
| 5 | +const DOCS_DIR = path.join(__dirname, '../docs'); |
| 6 | +const OUTPUT_FILE = path.join(__dirname, '../static/llm-context.md'); |
| 7 | + |
| 8 | +/** |
| 9 | + * Recursively get all markdown files from a directory |
| 10 | + * @param {string} dir - Directory to search |
| 11 | + * @param {string} baseDir - Base directory for relative paths |
| 12 | + * @returns {Array<{path: string, relativePath: string}>} |
| 13 | + */ |
| 14 | +function getAllMarkdownFiles(dir, baseDir = dir) { |
| 15 | + let results = []; |
| 16 | + const items = fs.readdirSync(dir, { withFileTypes: true }); |
| 17 | + |
| 18 | + for (const item of items) { |
| 19 | + const fullPath = path.join(dir, item.name); |
| 20 | + |
| 21 | + if (item.isDirectory()) { |
| 22 | + // Recursively search subdirectories |
| 23 | + results = results.concat(getAllMarkdownFiles(fullPath, baseDir)); |
| 24 | + } else if (item.isFile() && item.name.endsWith('.md')) { |
| 25 | + const relativePath = path.relative(baseDir, fullPath); |
| 26 | + results.push({ path: fullPath, relativePath }); |
| 27 | + } |
| 28 | + } |
| 29 | + |
| 30 | + return results; |
| 31 | +} |
| 32 | + |
| 33 | +/** |
| 34 | + * Sort files in a logical order (numerical prefix first, then alphabetically) |
| 35 | + */ |
| 36 | +function sortFiles(files) { |
| 37 | + return files.sort((a, b) => { |
| 38 | + // Sort by relative path for proper hierarchy |
| 39 | + const pathA = a.relativePath.toLowerCase(); |
| 40 | + const pathB = b.relativePath.toLowerCase(); |
| 41 | + |
| 42 | + return pathA.localeCompare(pathB, undefined, { numeric: true }); |
| 43 | + }); |
| 44 | +} |
| 45 | + |
| 46 | +/** |
| 47 | + * Generate the LLM context file |
| 48 | + */ |
| 49 | +function generateLLMContext() { |
| 50 | + console.log('🔍 Scanning documentation files...'); |
| 51 | + |
| 52 | + // Get all markdown files |
| 53 | + const markdownFiles = getAllMarkdownFiles(DOCS_DIR); |
| 54 | + const sortedFiles = sortFiles(markdownFiles); |
| 55 | + |
| 56 | + console.log(`📄 Found ${sortedFiles.length} markdown files`); |
| 57 | + |
| 58 | + // Create output content |
| 59 | + let output = ''; |
| 60 | + |
| 61 | + // Add header |
| 62 | + output += '# Adaptivestone Framework - Complete Documentation\n\n'; |
| 63 | + output += '> This file is auto-generated from the Docusaurus documentation.\n'; |
| 64 | + output += `> Generated on: ${new Date().toISOString()}\n`; |
| 65 | + output += `> Total documents: ${sortedFiles.length}\n\n`; |
| 66 | + output += '---\n\n'; |
| 67 | + |
| 68 | + // Add table of contents |
| 69 | + output += '## Table of Contents\n\n'; |
| 70 | + sortedFiles.forEach((file, index) => { |
| 71 | + const title = file.relativePath.replace(/\.md$/, '').replace(/\//g, ' / '); |
| 72 | + output += `${index + 1}. ${title}\n`; |
| 73 | + }); |
| 74 | + output += '\n---\n\n'; |
| 75 | + |
| 76 | + // Concatenate all files |
| 77 | + sortedFiles.forEach((file, index) => { |
| 78 | + console.log(` ├─ Processing: ${file.relativePath}`); |
| 79 | + |
| 80 | + // Add section header |
| 81 | + const sectionTitle = file.relativePath.replace(/\.md$/, '').replace(/\//g, ' > '); |
| 82 | + output += `\n\n# Document ${index + 1}: ${sectionTitle}\n\n`; |
| 83 | + output += `<!-- Source: ${file.relativePath} -->\n\n`; |
| 84 | + |
| 85 | + // Read and add file content |
| 86 | + const content = fs.readFileSync(file.path, 'utf8'); |
| 87 | + output += content; |
| 88 | + |
| 89 | + // Add separator between documents |
| 90 | + output += '\n\n---\n'; |
| 91 | + }); |
| 92 | + |
| 93 | + // Add footer |
| 94 | + output += '\n\n<!-- End of Documentation -->\n'; |
| 95 | + |
| 96 | + // Write output file |
| 97 | + fs.writeFileSync(OUTPUT_FILE, output, 'utf8'); |
| 98 | + |
| 99 | + // Calculate statistics |
| 100 | + const stats = fs.statSync(OUTPUT_FILE); |
| 101 | + const words = output.split(/\s+/).length; |
| 102 | + const estimatedTokens = Math.round(words * 1.3); // Rough estimate: 1 word ≈ 1.3 tokens |
| 103 | + |
| 104 | + console.log('\n✅ LLM context file generated successfully!'); |
| 105 | + console.log(`\n📊 Statistics:`); |
| 106 | + console.log(` - Output file: ${path.relative(process.cwd(), OUTPUT_FILE)}`); |
| 107 | + console.log(` - File size: ${(stats.size / 1024).toFixed(2)} KB`); |
| 108 | + console.log(` - Word count: ${words.toLocaleString()}`); |
| 109 | + console.log(` - Estimated tokens: ${estimatedTokens.toLocaleString()}`); |
| 110 | + console.log(`\n💡 You can now use this file with AI assistants like ChatGPT, Claude, etc.`); |
| 111 | +} |
| 112 | + |
| 113 | +// Run the script |
| 114 | +try { |
| 115 | + generateLLMContext(); |
| 116 | +} catch (error) { |
| 117 | + console.error('❌ Error generating LLM context:', error); |
| 118 | + process.exit(1); |
| 119 | +} |
0 commit comments